Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,7 @@
/packages/oracle_weblogic @elastic/obs-infraobs-integrations
/packages/osquery @elastic/sec-windows-platform
/packages/osquery_manager @elastic/security-defend-workflows
/packages/otel_ecs_converter @elastic/obs-ds-hosted-services
/packages/pad @elastic/ml-ui @elastic/sec-applied-ml
/packages/panw @elastic/integration-experience
/packages/panw_cortex_xdr @elastic/security-service-integrations
Expand Down
5 changes: 5 additions & 0 deletions packages/aws/changelog.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
# newer versions go on top
- version: "4.4.0"
changes:
- description: Add VPC flow log compatibility for OTel shim pipeline.
type: enhancement
link: https://github.com/elastic/integrations/pull/15570
- version: "4.3.0"
changes:
- description: Improve documentation to align with new guidelines.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ processors:
- rename:
field: message
target_field: event.original
ignore_missing: true
if: ctx.event?.original == null
description: 'Renames the original `message` field to `event.original` to store a copy of the original message. The `event.original` field is not touched if the document already has one; it may happen when Logstash sends the document.'
- remove:
Expand All @@ -18,6 +19,7 @@ processors:
- json:
field: event.original
target_field: json
if: ctx.pre_extracted != true
- pipeline:
if: ctx.json?.preview != null
name: '{{ IngestPipeline "third-party" }}'
Expand All @@ -28,11 +30,13 @@ processors:
field: json.eventTime
target_field: "@timestamp"
ignore_failure: true
if: ctx.pre_extracted != true
formats:
- ISO8601
- script:
description: Drops null/empty values recursively
lang: painless
if: ctx.pre_extracted != true
source: |
boolean drop(Object o) {
if (o == null || o == "") {
Expand Down Expand Up @@ -1787,6 +1791,7 @@ processors:
field:
- json
- _conf
- pre_extracted
ignore_missing: true
- script:
tag: remove_nulls
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,15 @@ processors:
field: message
target_field: event.original
ignore_missing: true
if: 'ctx.event?.original == null'
if: ctx.event?.original == null
description: 'Renames the original `message` field to `event.original` to store a copy of the original message. The `event.original` field is not touched if the document already has one; it may happen when Logstash sends the document.'
- remove:
field: message
ignore_missing: true
if: 'ctx.event?.original != null'
if: ctx.event?.original != null
description: 'The `message` field is no longer required if the document has an `event.original` field.'
- grok:
if: ctx.pre_extracted != true
field: event.original
# Classic ELB patterns documented in https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html
# ELB v2 Application load balancers https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html
Expand Down Expand Up @@ -182,6 +183,7 @@ processors:
separator: ' '
ignore_missing: true
- date:
if: ctx.pre_extracted != true
field: _tmp.timestamp
formats:
- ISO8601
Expand Down Expand Up @@ -264,6 +266,10 @@ processors:
field: ["aws.elb.leaf_client_cert_not_after_str", "aws.elb.leaf_client_cert_not_before_str"]
ignore_missing: true

- remove:
field:
- pre_extracted
ignore_missing: true
on_failure:
- set:
field: event.kind
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,50 +25,51 @@ processors:
field: event.category
value: [network]
- drop:
if: 'ctx.event?.original.startsWith("version") || ctx.event?.original.startsWith("instance-id")'
if: ctx.pre_extracted != true && (ctx.event?.original?.startsWith("version") || ctx.event?.original?.startsWith("instance-id"))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is not safe (nor was the original). Both of ctx.event?.original?.startsWith("version") and ctx.event?.original?.startsWith("instance-id") can resolve to null which is not a boolean type. Either ctx.event.original is always present in which case the ?. operators are not necessary, or they are not in which case presence must be checked for (probably with ctx.event?.original instanceof String) prior to the method calls.

- dissect:
field: event.original
pattern: '{"message":"%{event.original}"}'
if: ctx.pre_extracted != true
ignore_failure: true
- script:
lang: painless
if: ctx.event?.original != null
if: ctx.event?.original != null && ctx.pre_extracted != true
source: >-
ctx._temp_ = new HashMap();
ctx._temp_.message_token_count = ctx.event?.original.splitOnToken(" ").length;
- dissect:
field: event.original
pattern: '%{aws.vpcflow.version} %{aws.vpcflow.account_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.log_status}'
if: ctx?._temp_?.message_token_count == 14
if: ctx?._temp_?.message_token_count == 14 && ctx.pre_extracted != true
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if: ctx?._temp_?.message_token_count == 14 && ctx.pre_extracted != true
if: ctx._temp_?.message_token_count == 14 && ctx.pre_extracted != true

(similar below)

- dissect:
field: event.original
pattern: '%{aws.vpcflow.instance_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr}'
if: ctx?._temp_?.message_token_count == 6
if: ctx?._temp_?.message_token_count == 6 && ctx.pre_extracted != true
- dissect:
field: event.original
pattern: '%{aws.vpcflow.version} %{aws.vpcflow.interface_id} %{aws.vpcflow.account_id} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.type} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{aws.vpcflow.action} %{aws.vpcflow.log_status}'
if: ctx?._temp_?.message_token_count == 17
if: ctx?._temp_?.message_token_count == 17 && ctx.pre_extracted != true
- dissect:
field: event.original
pattern: '%{aws.vpcflow.version} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.account_id} %{aws.vpcflow.type} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{aws.vpcflow.protocol} %{aws.vpcflow.bytes} %{aws.vpcflow.packets} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.log_status}'
if: ctx?._temp_?.message_token_count == 21
if: ctx?._temp_?.message_token_count == 21 && ctx.pre_extracted != true
- dissect:
field: event.original
pattern: '%{aws.vpcflow.version} %{aws.vpcflow.account_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.log_status} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.type} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{cloud.region} %{cloud.availability_zone} %{aws.vpcflow.sublocation.type} %{aws.vpcflow.sublocation.id} %{aws.vpcflow.pkt_src_service} %{aws.vpcflow.pkt_dst_service} %{network.direction} %{aws.vpcflow.traffic_path}'
if: ctx?._temp_?.message_token_count == 29
if: ctx?._temp_?.message_token_count == 29 && ctx.pre_extracted != true
- dissect:
field: event.original
description: default format for transit gateway vpc flow logs, covering versions v2 through v6.
pattern: '%{aws.vpcflow.version} %{aws.vpcflow.resource_type} %{aws.vpcflow.account_id} %{aws.vpcflow.tgw_id} %{aws.vpcflow.tgw_attachment_id} %{aws.vpcflow.tgw_src_vpc_account_id} %{aws.vpcflow.tgw_dst_vpc_account_id} %{aws.vpcflow.tgw_src_vpc_id} %{aws.vpcflow.tgw_dst_vpc_id} %{aws.vpcflow.tgw_src_subnet_id} %{aws.vpcflow.tgw_dst_subnet_id} %{aws.vpcflow.tgw_src_eni} %{aws.vpcflow.tgw_dst_eni} %{aws.vpcflow.tgw_src_az_id} %{aws.vpcflow.tgw_dst_az_id} %{aws.vpcflow.tgw_pair_attachment_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.log_status} %{aws.vpcflow.type} %{aws.vpcflow.packets_lost_no_route} %{aws.vpcflow.packets_lost_blackhole} %{aws.vpcflow.packets_lost_mtu_exceeded} %{aws.vpcflow.packets_lost_ttl_expired} %{aws.vpcflow.tcp_flags} %{cloud.region} %{network.direction} %{aws.vpcflow.pkt_src_service} %{aws.vpcflow.pkt_dst_service}'
if: ctx?._temp_?.message_token_count == 36
if: ctx?._temp_?.message_token_count == 36 && ctx.pre_extracted != true
- dissect:
field: event.original
pattern: '%{aws.vpcflow.version} %{aws.vpcflow.account_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.log_status} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.type} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{cloud.region} %{cloud.availability_zone} %{aws.vpcflow.sublocation.type} %{aws.vpcflow.sublocation.id} %{aws.vpcflow.pkt_src_service} %{aws.vpcflow.pkt_dst_service} %{network.direction} %{aws.vpcflow.traffic_path} %{aws.vpcflow.ecs_cluster_arn} %{aws.vpcflow.ecs_cluster_name} %{aws.vpcflow.ecs_container_instance_arn} %{aws.vpcflow.ecs_container_instance_id} %{aws.vpcflow.ecs_container_id} %{aws.vpcflow.ecs_second_container_id} %{aws.vpcflow.ecs_service_name} %{aws.vpcflow.ecs_task_definition_arn} %{aws.vpcflow.ecs_task_arn} %{aws.vpcflow.ecs_task_id}'
if: ctx?._temp_?.message_token_count == 39
if: ctx?._temp_?.message_token_count == 39 && ctx.pre_extracted != true
- dissect:
field: event.original
pattern: '%{aws.vpcflow.version} %{aws.vpcflow.account_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.log_status} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.type} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{cloud.region} %{cloud.availability_zone} %{aws.vpcflow.sublocation.type} %{aws.vpcflow.sublocation.id} %{aws.vpcflow.pkt_src_service} %{aws.vpcflow.pkt_dst_service} %{network.direction} %{aws.vpcflow.traffic_path} %{aws.vpcflow.ecs_cluster_arn} %{aws.vpcflow.ecs_cluster_name} %{aws.vpcflow.ecs_container_instance_arn} %{aws.vpcflow.ecs_container_instance_id} %{aws.vpcflow.ecs_container_id} %{aws.vpcflow.ecs_second_container_id} %{aws.vpcflow.ecs_service_name} %{aws.vpcflow.ecs_task_definition_arn} %{aws.vpcflow.ecs_task_arn} %{aws.vpcflow.ecs_task_id} %{aws.vpcflow.reject_reason}'
if: ctx?._temp_?.message_token_count == 40
if: ctx?._temp_?.message_token_count == 40 && ctx.pre_extracted != true

# Convert Unix epoch to timestamp
- date:
Expand All @@ -77,18 +78,21 @@ processors:
ignore_failure: true
formats:
- UNIX
if: ctx.pre_extracted != true
- date:
field: aws.vpcflow.start
target_field: event.start
ignore_failure: true
formats:
- UNIX
if: ctx.pre_extracted != true
- date:
field: aws.vpcflow.end
target_field: event.end
ignore_failure: true
formats:
- UNIX
if: ctx.pre_extracted != true
- remove:
field:
- aws.vpcflow.start
Expand Down Expand Up @@ -391,6 +395,7 @@ processors:
- aws.vpcflow.ecs_container_instance_id
- aws.vpcflow.ecs_service_name
- aws.vpcflow.reject_reason
- pre_extracted
ignore_missing: true
on_failure:
- set:
Expand Down
2 changes: 1 addition & 1 deletion packages/aws/manifest.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
format_version: 3.3.2
name: aws
title: AWS
version: "4.3.0"
version: 4.4.0
description: Collect logs and metrics from Amazon Web Services (AWS) with Elastic Agent.
type: integration
categories:
Expand Down
4 changes: 4 additions & 0 deletions packages/otel_ecs_converter/_dev/build/build.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
dependencies:
ecs:
reference: [email protected]
import_mappings: true
40 changes: 40 additions & 0 deletions packages/otel_ecs_converter/_dev/build/docs/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# OTel ECS converter

OTel ECS converter integration provide pipelines to convert OTel native fields to ECS fields in documents.
The table below lists supported data source, source OTel data set and destination ECS data set:

| Data source | OTel data set (source) | ECS data set (destination) |
|-----------------|------------------------|----------------------------|
| AWS ELB logs | aws.elbaccess.otel | aws.elb_logs |
| VPC flow logs | aws.vpcflow.otel | aws.vpcflow |
| CloudTrail logs | aws.cloudtrail.otel | aws.cloudtrail |

## Instructions

This integration works on OTel documents. Hence, you should ingest OTel native documents from sources stated above.
The destination data sets are installed with the **AWS integration**. Hence, you must install the integration.

1. Install the **OTel ECS converter** integration

Search the integration from the catalog,

![otel_converter.png](../img/otel_converter.png)

Then install assets through settings section,

![otel_converter_install.png](../img/otel_converter_install.png)

This installs the required pipelines which parse and re-route to destination data set

2. Install the **AWS integration** assets

You only needs to install assets of the integration, to do install them,
find the **AWS** integration by searching or browsing the catalog.

![aws.png](../img/aws.png)

Navigate to the **Settings** tab and click **Install AWS**. Confirm by clicking **Install AWS** in the popup.

![aws_assets.png](../img/aws_assets.png)

3. Ingest OTel native data into the data sets
6 changes: 6 additions & 0 deletions packages/otel_ecs_converter/changelog.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# newer versions go on top
- version: "0.1.0"
changes:
- description: Introduce OTel converter pipelines to map OTel attributes to ECS fields.
type: enhancement
link: https://github.com/elastic/integrations/pull/15570
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
---
description: Pipeline for processing sample logs
processors:
- dot_expander:
field: "*"
path: attributes
- dot_expander:
field: "*"
path: resource.attributes
- set:
field: aws.cloudtrail.event_version
copy_from: attributes.aws.cloudtrail.event_version
- set:
field: aws.cloudtrail.user_identity.type
copy_from: attributes.aws.principal.type
- set:
field: user.name
copy_from: attributes.user.name
ignore_empty_value: true
- set:
field: user.id
copy_from: attributes.aws.principal.id
ignore_empty_value: true
- set:
field: aws.cloudtrail.user_identity.arn
copy_from: attributes.aws.principal.arn
ignore_empty_value: true
- set:
field: aws.cloudtrail.user_identity.access_key_id
copy_from: attributes.aws.access_key.id
ignore_empty_value: true
- set:
field: event.provider
copy_from: attributes.rpc.service
- set:
field: event.action
copy_from: attributes.rpc.method
- set:
field: aws.cloudtrail.event_type
copy_from: attributes.rpc.system
- set:
field: aws.cloudtrail.event_category
copy_from: attributes.aws.event.category
ignore_empty_value: true
- set:
field: cloud.region
copy_from: resource.attributes.cloud.region
- set:
field: json.sourceIPAddress
copy_from: attributes.source.address
- user_agent:
field: attributes.user_agent.original
target_field: user_agent
on_failure:
- rename:
field: attributes.user_agent.original
target_field: user_agent.original
ignore_failure: true
- set:
field: aws.cloudtrail.error_code
copy_from: attributes.aws.error.code
ignore_empty_value: true
- set:
field: aws.cloudtrail.error_message
copy_from: attributes.aws.error.message
ignore_empty_value: true
- set:
field: aws.cloudtrail.request_id
copy_from: attributes.aws.request_id
- set:
field: event.id
copy_from: attributes.aws.cloudtrail.event_id
- set:
field: aws.cloudtrail.management_event
copy_from: attributes.aws.event.management
- set:
field: aws.cloudtrail.read_only
copy_from: attributes.aws.event.read_only
- set:
field: aws.cloudtrail.resources
copy_from: attributes.aws.resources
- set:
field: cloud.account.id
copy_from: resource.attributes.cloud.account.id
- set:
field: aws.cloudtrail.recipient_account_id
copy_from: resource.attributes.cloud.account.id
- set:
field: aws.cloudtrail.shared_event_id
copy_from: attributes.aws.shared_event_id
- append:
field: related.user
value: '{{attributes.aws.request.parameters.userName}}'
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
value: '{{attributes.aws.request.parameters.userName}}'
value: '{{{attributes.aws.request.parameters.userName}}}'

(see Variables in man 5 mustache)

if: ctx.attributes.aws?.request?.parameters?.userName != null
- append:
field: related.user
value: '{{attributes.aws.request.parameters.newUserName}}'
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
value: '{{attributes.aws.request.parameters.newUserName}}'
value: '{{{attributes.aws.request.parameters.newUserName}}}'

if: ctx.attributes.aws?.request?.parameters?.newUserName != null
# todo: need additionalEventData populated and extracted for mfa, mobile & needs digest file support
# will be addressed once OTel encoding extension support added through
# https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/43403
- set:
field: user.changes.name
copy_from: attributes.aws.request.parameters.newUserName
if: ctx.attributes.aws?.request?.parameters?.newUserName != null
- set:
field: group.name
copy_from: attributes.aws.request.parameters.groupName
if: ctx.attributes.aws?.request?.parameters?.groupName != null
- set:
field: user.target.name
copy_from: attributes.aws.request.parameters.userName
if: ctx.attributes.aws?.request?.parameters?.userName != null
- set:
field: tls.version
copy_from: attributes.tls.protocol.version
if: ctx.attributes?.tls?.protocol?.version != null
- set:
field: tls.cipher
copy_from: attributes.tls.cipher
if: ctx.attributes?.tls?.cipher!= null
- set:
field: tls.client.server_name
copy_from: attributes.server.address
if: ctx.attributes?.server?.address!= null
- remove:
field:
- attributes
- resource.attributes
ignore_missing: true
- set:
field: pre_extracted
value: true
on_failure:
- set:
field: error.message
value: '{{ _ingest.on_failure_message }}'
Comment on lines +134 to +137
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
on_failure:
- set:
field: error.message
value: '{{ _ingest.on_failure_message }}'
- set:
field: event.kind
value: pipeline_error
tag: set_pipeline_error_into_event_kind
if: ctx.error?.message != null
- append:
field: tags
value: preserve_original_event
allow_duplicates: false
if: ctx.error?.message != null
on_failure:
- append:
field: error.message
value: >-
Processor '{{{ _ingest.on_failure_processor_type }}}'
{{{#_ingest.on_failure_processor_tag}}}with tag '{{{ _ingest.on_failure_processor_tag }}}'
{{{/_ingest.on_failure_processor_tag}}}failed with message '{{{ _ingest.on_failure_message }}}'
- set:
field: event.kind
tag: set_pipeline_error_to_event_kind
value: pipeline_error
- append:
field: tags
value: preserve_original_event
allow_duplicates: false

Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
- name: data_stream.type
type: constant_keyword
description: Data stream type.
- name: data_stream.dataset
type: constant_keyword
description: Data stream dataset.
- name: data_stream.namespace
type: constant_keyword
description: Data stream namespace.
- name: '@timestamp'
type: date
description: Event timestamp.
Loading