diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7361bdde9fa..12491a28bf5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -350,6 +350,7 @@ /packages/oracle_weblogic @elastic/obs-infraobs-integrations /packages/osquery @elastic/sec-windows-platform /packages/osquery_manager @elastic/security-defend-workflows +/packages/otel_ecs_converter @elastic/obs-ds-hosted-services /packages/pad @elastic/ml-ui @elastic/sec-applied-ml /packages/panw @elastic/integration-experience /packages/panw_cortex_xdr @elastic/security-service-integrations diff --git a/packages/aws/changelog.yml b/packages/aws/changelog.yml index 75529878a9f..ccf10fb9691 100644 --- a/packages/aws/changelog.yml +++ b/packages/aws/changelog.yml @@ -1,4 +1,9 @@ # newer versions go on top +- version: "4.4.0" + changes: + - description: Add VPC flow log compatibility for OTel shim pipeline. + type: enhancement + link: https://github.com/elastic/integrations/pull/15570 - version: "4.3.0" changes: - description: Improve documentation to align with new guidelines. diff --git a/packages/aws/data_stream/cloudtrail/elasticsearch/ingest_pipeline/default.yml b/packages/aws/data_stream/cloudtrail/elasticsearch/ingest_pipeline/default.yml index 8ca248a405b..7b2574fec65 100644 --- a/packages/aws/data_stream/cloudtrail/elasticsearch/ingest_pipeline/default.yml +++ b/packages/aws/data_stream/cloudtrail/elasticsearch/ingest_pipeline/default.yml @@ -4,6 +4,7 @@ processors: - rename: field: message target_field: event.original + ignore_missing: true if: ctx.event?.original == null description: 'Renames the original `message` field to `event.original` to store a copy of the original message. The `event.original` field is not touched if the document already has one; it may happen when Logstash sends the document.' - remove: @@ -18,6 +19,7 @@ processors: - json: field: event.original target_field: json + if: ctx.pre_extracted != true - pipeline: if: ctx.json?.preview != null name: '{{ IngestPipeline "third-party" }}' @@ -28,11 +30,13 @@ processors: field: json.eventTime target_field: "@timestamp" ignore_failure: true + if: ctx.pre_extracted != true formats: - ISO8601 - script: description: Drops null/empty values recursively lang: painless + if: ctx.pre_extracted != true source: | boolean drop(Object o) { if (o == null || o == "") { @@ -1787,6 +1791,7 @@ processors: field: - json - _conf + - pre_extracted ignore_missing: true - script: tag: remove_nulls diff --git a/packages/aws/data_stream/elb_logs/elasticsearch/ingest_pipeline/default.yml b/packages/aws/data_stream/elb_logs/elasticsearch/ingest_pipeline/default.yml index 521161e2e57..0ab79b7ef69 100644 --- a/packages/aws/data_stream/elb_logs/elasticsearch/ingest_pipeline/default.yml +++ b/packages/aws/data_stream/elb_logs/elasticsearch/ingest_pipeline/default.yml @@ -9,14 +9,15 @@ processors: field: message target_field: event.original ignore_missing: true - if: 'ctx.event?.original == null' + if: ctx.event?.original == null description: 'Renames the original `message` field to `event.original` to store a copy of the original message. The `event.original` field is not touched if the document already has one; it may happen when Logstash sends the document.' - remove: field: message ignore_missing: true - if: 'ctx.event?.original != null' + if: ctx.event?.original != null description: 'The `message` field is no longer required if the document has an `event.original` field.' - grok: + if: ctx.pre_extracted != true field: event.original # Classic ELB patterns documented in https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html # ELB v2 Application load balancers https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html @@ -182,6 +183,7 @@ processors: separator: ' ' ignore_missing: true - date: + if: ctx.pre_extracted != true field: _tmp.timestamp formats: - ISO8601 @@ -264,6 +266,10 @@ processors: field: ["aws.elb.leaf_client_cert_not_after_str", "aws.elb.leaf_client_cert_not_before_str"] ignore_missing: true + - remove: + field: + - pre_extracted + ignore_missing: true on_failure: - set: field: event.kind diff --git a/packages/aws/data_stream/vpcflow/elasticsearch/ingest_pipeline/default.yml b/packages/aws/data_stream/vpcflow/elasticsearch/ingest_pipeline/default.yml index 8764003dd59..984b8ed2cb4 100644 --- a/packages/aws/data_stream/vpcflow/elasticsearch/ingest_pipeline/default.yml +++ b/packages/aws/data_stream/vpcflow/elasticsearch/ingest_pipeline/default.yml @@ -25,50 +25,51 @@ processors: field: event.category value: [network] - drop: - if: 'ctx.event?.original.startsWith("version") || ctx.event?.original.startsWith("instance-id")' + if: ctx.pre_extracted != true && (ctx.event?.original.startsWith("version") || ctx.event?.original.startsWith("instance-id")) - dissect: field: event.original pattern: '{"message":"%{event.original}"}' + if: ctx.pre_extracted != true ignore_failure: true - script: lang: painless - if: ctx.event?.original != null + if: ctx.event?.original != null && ctx.pre_extracted != true source: >- ctx._temp_ = new HashMap(); ctx._temp_.message_token_count = ctx.event?.original.splitOnToken(" ").length; - dissect: field: event.original pattern: '%{aws.vpcflow.version} %{aws.vpcflow.account_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.log_status}' - if: ctx?._temp_?.message_token_count == 14 + if: ctx._temp_?.message_token_count == 14 && ctx.pre_extracted != true - dissect: field: event.original pattern: '%{aws.vpcflow.instance_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr}' - if: ctx?._temp_?.message_token_count == 6 + if: ctx?._temp_?.message_token_count == 6 && ctx.pre_extracted != true - dissect: field: event.original pattern: '%{aws.vpcflow.version} %{aws.vpcflow.interface_id} %{aws.vpcflow.account_id} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.type} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{aws.vpcflow.action} %{aws.vpcflow.log_status}' - if: ctx?._temp_?.message_token_count == 17 + if: ctx?._temp_?.message_token_count == 17 && ctx.pre_extracted != true - dissect: field: event.original pattern: '%{aws.vpcflow.version} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.account_id} %{aws.vpcflow.type} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{aws.vpcflow.protocol} %{aws.vpcflow.bytes} %{aws.vpcflow.packets} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.log_status}' - if: ctx?._temp_?.message_token_count == 21 + if: ctx?._temp_?.message_token_count == 21 && ctx.pre_extracted != true - dissect: field: event.original pattern: '%{aws.vpcflow.version} %{aws.vpcflow.account_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.log_status} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.type} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{cloud.region} %{cloud.availability_zone} %{aws.vpcflow.sublocation.type} %{aws.vpcflow.sublocation.id} %{aws.vpcflow.pkt_src_service} %{aws.vpcflow.pkt_dst_service} %{network.direction} %{aws.vpcflow.traffic_path}' - if: ctx?._temp_?.message_token_count == 29 + if: ctx?._temp_?.message_token_count == 29 && ctx.pre_extracted != true - dissect: field: event.original description: default format for transit gateway vpc flow logs, covering versions v2 through v6. pattern: '%{aws.vpcflow.version} %{aws.vpcflow.resource_type} %{aws.vpcflow.account_id} %{aws.vpcflow.tgw_id} %{aws.vpcflow.tgw_attachment_id} %{aws.vpcflow.tgw_src_vpc_account_id} %{aws.vpcflow.tgw_dst_vpc_account_id} %{aws.vpcflow.tgw_src_vpc_id} %{aws.vpcflow.tgw_dst_vpc_id} %{aws.vpcflow.tgw_src_subnet_id} %{aws.vpcflow.tgw_dst_subnet_id} %{aws.vpcflow.tgw_src_eni} %{aws.vpcflow.tgw_dst_eni} %{aws.vpcflow.tgw_src_az_id} %{aws.vpcflow.tgw_dst_az_id} %{aws.vpcflow.tgw_pair_attachment_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.log_status} %{aws.vpcflow.type} %{aws.vpcflow.packets_lost_no_route} %{aws.vpcflow.packets_lost_blackhole} %{aws.vpcflow.packets_lost_mtu_exceeded} %{aws.vpcflow.packets_lost_ttl_expired} %{aws.vpcflow.tcp_flags} %{cloud.region} %{network.direction} %{aws.vpcflow.pkt_src_service} %{aws.vpcflow.pkt_dst_service}' - if: ctx?._temp_?.message_token_count == 36 + if: ctx?._temp_?.message_token_count == 36 && ctx.pre_extracted != true - dissect: field: event.original pattern: '%{aws.vpcflow.version} %{aws.vpcflow.account_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.log_status} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.type} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{cloud.region} %{cloud.availability_zone} %{aws.vpcflow.sublocation.type} %{aws.vpcflow.sublocation.id} %{aws.vpcflow.pkt_src_service} %{aws.vpcflow.pkt_dst_service} %{network.direction} %{aws.vpcflow.traffic_path} %{aws.vpcflow.ecs_cluster_arn} %{aws.vpcflow.ecs_cluster_name} %{aws.vpcflow.ecs_container_instance_arn} %{aws.vpcflow.ecs_container_instance_id} %{aws.vpcflow.ecs_container_id} %{aws.vpcflow.ecs_second_container_id} %{aws.vpcflow.ecs_service_name} %{aws.vpcflow.ecs_task_definition_arn} %{aws.vpcflow.ecs_task_arn} %{aws.vpcflow.ecs_task_id}' - if: ctx?._temp_?.message_token_count == 39 + if: ctx?._temp_?.message_token_count == 39 && ctx.pre_extracted != true - dissect: field: event.original pattern: '%{aws.vpcflow.version} %{aws.vpcflow.account_id} %{aws.vpcflow.interface_id} %{aws.vpcflow.srcaddr} %{aws.vpcflow.dstaddr} %{aws.vpcflow.srcport} %{aws.vpcflow.dstport} %{aws.vpcflow.protocol} %{aws.vpcflow.packets} %{aws.vpcflow.bytes} %{aws.vpcflow.start} %{aws.vpcflow.end} %{aws.vpcflow.action} %{aws.vpcflow.log_status} %{aws.vpcflow.vpc_id} %{aws.vpcflow.subnet_id} %{aws.vpcflow.instance_id} %{aws.vpcflow.tcp_flags} %{aws.vpcflow.type} %{aws.vpcflow.pkt_srcaddr} %{aws.vpcflow.pkt_dstaddr} %{cloud.region} %{cloud.availability_zone} %{aws.vpcflow.sublocation.type} %{aws.vpcflow.sublocation.id} %{aws.vpcflow.pkt_src_service} %{aws.vpcflow.pkt_dst_service} %{network.direction} %{aws.vpcflow.traffic_path} %{aws.vpcflow.ecs_cluster_arn} %{aws.vpcflow.ecs_cluster_name} %{aws.vpcflow.ecs_container_instance_arn} %{aws.vpcflow.ecs_container_instance_id} %{aws.vpcflow.ecs_container_id} %{aws.vpcflow.ecs_second_container_id} %{aws.vpcflow.ecs_service_name} %{aws.vpcflow.ecs_task_definition_arn} %{aws.vpcflow.ecs_task_arn} %{aws.vpcflow.ecs_task_id} %{aws.vpcflow.reject_reason}' - if: ctx?._temp_?.message_token_count == 40 + if: ctx?._temp_?.message_token_count == 40 && ctx.pre_extracted != true # Convert Unix epoch to timestamp - date: @@ -77,18 +78,21 @@ processors: ignore_failure: true formats: - UNIX + if: ctx.pre_extracted != true - date: field: aws.vpcflow.start target_field: event.start ignore_failure: true formats: - UNIX + if: ctx.pre_extracted != true - date: field: aws.vpcflow.end target_field: event.end ignore_failure: true formats: - UNIX + if: ctx.pre_extracted != true - remove: field: - aws.vpcflow.start @@ -391,6 +395,7 @@ processors: - aws.vpcflow.ecs_container_instance_id - aws.vpcflow.ecs_service_name - aws.vpcflow.reject_reason + - pre_extracted ignore_missing: true on_failure: - set: diff --git a/packages/aws/manifest.yml b/packages/aws/manifest.yml index d204519f21c..0ff01b28a1f 100644 --- a/packages/aws/manifest.yml +++ b/packages/aws/manifest.yml @@ -1,7 +1,7 @@ format_version: 3.3.2 name: aws title: AWS -version: "4.3.0" +version: 4.4.0 description: Collect logs and metrics from Amazon Web Services (AWS) with Elastic Agent. type: integration categories: diff --git a/packages/otel_ecs_converter/_dev/build/build.yml b/packages/otel_ecs_converter/_dev/build/build.yml new file mode 100644 index 00000000000..dd0bfe313fc --- /dev/null +++ b/packages/otel_ecs_converter/_dev/build/build.yml @@ -0,0 +1,4 @@ +dependencies: + ecs: + reference: git@v8.17.0 + import_mappings: true diff --git a/packages/otel_ecs_converter/_dev/build/docs/README.md b/packages/otel_ecs_converter/_dev/build/docs/README.md new file mode 100644 index 00000000000..08c5b8dfb3d --- /dev/null +++ b/packages/otel_ecs_converter/_dev/build/docs/README.md @@ -0,0 +1,40 @@ +# OTel ECS converter + +OTel ECS converter integration provide pipelines to convert OTel native fields to ECS fields in documents. +The table below lists supported data source, source OTel data set and destination ECS data set: + +| Data source | OTel data set (source) | ECS data set (destination) | +|-----------------|------------------------|----------------------------| +| AWS ELB logs | aws.elbaccess.otel | aws.elb_logs | +| VPC flow logs | aws.vpcflow.otel | aws.vpcflow | +| CloudTrail logs | aws.cloudtrail.otel | aws.cloudtrail | + +## Instructions + +This integration works on OTel documents. Hence, you should ingest OTel native documents from sources stated above. +The destination data sets are installed with the **AWS integration**. Hence, you must install the integration. + +1. Install the **OTel ECS converter** integration + + Search the integration from the catalog, + + ![otel_converter.png](../img/otel_converter.png) + + Then install assets through settings section, + + ![otel_converter_install.png](../img/otel_converter_install.png) + + This installs the required pipelines which parse and re-route to destination data set + +2. Install the **AWS integration** assets + + You only needs to install assets of the integration, to do install them, + find the **AWS** integration by searching or browsing the catalog. + + ![aws.png](../img/aws.png) + + Navigate to the **Settings** tab and click **Install AWS**. Confirm by clicking **Install AWS** in the popup. + + ![aws_assets.png](../img/aws_assets.png) + +3. Ingest OTel native data into the data sets \ No newline at end of file diff --git a/packages/otel_ecs_converter/changelog.yml b/packages/otel_ecs_converter/changelog.yml new file mode 100644 index 00000000000..5678881b404 --- /dev/null +++ b/packages/otel_ecs_converter/changelog.yml @@ -0,0 +1,6 @@ +# newer versions go on top +- version: "0.1.0" + changes: + - description: Introduce OTel converter pipelines to map OTel attributes to ECS fields. + type: enhancement + link: https://github.com/elastic/integrations/pull/15570 diff --git a/packages/otel_ecs_converter/data_stream/aws_cloudtrail/elasticsearch/ingest_pipeline/default.yml b/packages/otel_ecs_converter/data_stream/aws_cloudtrail/elasticsearch/ingest_pipeline/default.yml new file mode 100644 index 00000000000..35ae2c9649e --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_cloudtrail/elasticsearch/ingest_pipeline/default.yml @@ -0,0 +1,144 @@ +--- +description: Pipeline for processing sample logs +processors: + - dot_expander: + field: "*" + path: attributes + - dot_expander: + field: "*" + path: resource.attributes + - set: + field: aws.cloudtrail.event_version + copy_from: attributes.aws.cloudtrail.event_version + - set: + field: aws.cloudtrail.user_identity.type + copy_from: attributes.aws.principal.type + - set: + field: user.name + copy_from: attributes.user.name + ignore_empty_value: true + - set: + field: user.id + copy_from: attributes.aws.principal.id + ignore_empty_value: true + - set: + field: aws.cloudtrail.user_identity.arn + copy_from: attributes.aws.principal.arn + ignore_empty_value: true + - set: + field: aws.cloudtrail.user_identity.access_key_id + copy_from: attributes.aws.access_key.id + ignore_empty_value: true + - set: + field: event.provider + copy_from: attributes.rpc.service + - set: + field: event.action + copy_from: attributes.rpc.method + - set: + field: aws.cloudtrail.event_type + copy_from: attributes.rpc.system + - set: + field: aws.cloudtrail.event_category + copy_from: attributes.aws.event.category + ignore_empty_value: true + - set: + field: cloud.region + copy_from: resource.attributes.cloud.region + - set: + field: json.sourceIPAddress + copy_from: attributes.source.address + - user_agent: + field: attributes.user_agent.original + target_field: user_agent + on_failure: + - rename: + field: attributes.user_agent.original + target_field: user_agent.original + ignore_failure: true + - set: + field: aws.cloudtrail.error_code + copy_from: attributes.aws.error.code + ignore_empty_value: true + - set: + field: aws.cloudtrail.error_message + copy_from: attributes.aws.error.message + ignore_empty_value: true + - set: + field: aws.cloudtrail.request_id + copy_from: attributes.aws.request_id + - set: + field: event.id + copy_from: attributes.aws.cloudtrail.event_id + - set: + field: aws.cloudtrail.management_event + copy_from: attributes.aws.event.management + - set: + field: aws.cloudtrail.read_only + copy_from: attributes.aws.event.read_only + - set: + field: aws.cloudtrail.resources + copy_from: attributes.aws.resources + - set: + field: cloud.account.id + copy_from: resource.attributes.cloud.account.id + - set: + field: aws.cloudtrail.recipient_account_id + copy_from: resource.attributes.cloud.account.id + - set: + field: aws.cloudtrail.shared_event_id + copy_from: attributes.aws.shared_event_id + - append: + field: related.user + value: '{{{attributes.aws.request.parameters.userName}}}' + if: ctx.attributes.aws?.request?.parameters?.userName != null + - append: + field: related.user + value: '{{{attributes.aws.request.parameters.newUserName}}}' + if: ctx.attributes.aws?.request?.parameters?.newUserName != null + # todo: need additionalEventData populated and extracted for mfa, mobile & needs digest file support + # will be addressed once OTel encoding extension support added through + # https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/43403 + - set: + field: user.changes.name + copy_from: attributes.aws.request.parameters.newUserName + if: ctx.attributes.aws?.request?.parameters?.newUserName != null + - set: + field: group.name + copy_from: attributes.aws.request.parameters.groupName + if: ctx.attributes.aws?.request?.parameters?.groupName != null + - set: + field: user.target.name + copy_from: attributes.aws.request.parameters.userName + if: ctx.attributes.aws?.request?.parameters?.userName != null + - set: + field: tls.version + copy_from: attributes.tls.protocol.version + if: ctx.attributes?.tls?.protocol?.version != null + - set: + field: tls.cipher + copy_from: attributes.tls.cipher + if: ctx.attributes?.tls?.cipher!= null + - set: + field: tls.client.server_name + copy_from: attributes.server.address + if: ctx.attributes?.server?.address!= null + - remove: + field: + - attributes + - resource.attributes + ignore_missing: true + - set: + field: pre_extracted + value: true +on_failure: + - set: + field: event.kind + value: pipeline_error + tag: set_pipeline_error_into_event_kind + - append: + field: error.message + value: >- + Processor '{{{ _ingest.on_failure_processor_type }}}' + {{{#_ingest.on_failure_processor_tag}}}with tag '{{{ _ingest.on_failure_processor_tag }}}' + {{{/_ingest.on_failure_processor_tag}}}failed with message '{{{ _ingest.on_failure_message }}}' \ No newline at end of file diff --git a/packages/otel_ecs_converter/data_stream/aws_cloudtrail/fields/base-fields.yml b/packages/otel_ecs_converter/data_stream/aws_cloudtrail/fields/base-fields.yml new file mode 100644 index 00000000000..7c798f4534c --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_cloudtrail/fields/base-fields.yml @@ -0,0 +1,12 @@ +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: '@timestamp' + type: date + description: Event timestamp. diff --git a/packages/otel_ecs_converter/data_stream/aws_cloudtrail/manifest.yml b/packages/otel_ecs_converter/data_stream/aws_cloudtrail/manifest.yml new file mode 100644 index 00000000000..ecc63e29d20 --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_cloudtrail/manifest.yml @@ -0,0 +1,6 @@ +title: "Route OTel AWS CloudTrail logs to ECS" +type: logs +dataset: aws.cloudtrail.otel +elasticsearch: + dynamic_dataset: true + dynamic_namespace: true diff --git a/packages/otel_ecs_converter/data_stream/aws_cloudtrail/routing_rules.yml b/packages/otel_ecs_converter/data_stream/aws_cloudtrail/routing_rules.yml new file mode 100644 index 00000000000..1d9dacb4e51 --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_cloudtrail/routing_rules.yml @@ -0,0 +1,8 @@ +- source_dataset: aws.cloudtrail.otel + rules: + # Route to aws.cloudtrail dataset if event parsing is successful + - target_dataset: aws.cloudtrail + if: ctx.pre_extracted == true + namespace: + - "{{data_stream.namespace}}" + - default diff --git a/packages/otel_ecs_converter/data_stream/aws_elblogs/elasticsearch/ingest_pipeline/default.yml b/packages/otel_ecs_converter/data_stream/aws_elblogs/elasticsearch/ingest_pipeline/default.yml new file mode 100644 index 00000000000..7bfa1ca87b5 --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_elblogs/elasticsearch/ingest_pipeline/default.yml @@ -0,0 +1,84 @@ +--- +description: "Pipeline for processing sample logs" + +processors: + - dot_expander: + field: "*" + path: attributes + - dot_expander: + field: "*" + path: resource.attributes + # Common fields + - set: + field: aws.elb.type + copy_from: attributes.network.protocol.name + - set: + field: aws.elb.name + copy_from: resource.attributes.cloud.resource_id + - set: + field: source.address + copy_from: attributes.client.address + - set: + field: source.port + copy_from: attributes.client.port + - set: + field: http.request.body.bytes + copy_from: attributes.http.request.size + - set: + field: http.response.body.bytes + copy_from: attributes.http.response.size + - set: + field: source.bytes + copy_from: attributes.http.request.size + - set: + field: destination.bytes + copy_from: attributes.http.response.size + # Fields that may be missing between ALB, NLB & CLB + - set: + if: 'ctx?.attributes?.http?.request?.method != null' + field: http.request.method + copy_from: attributes.http.request.method + - set: + if: 'ctx?.attributes?.url?.full != null' + field: _tmp.uri_orig + copy_from: attributes.url.full + - set: + if: 'ctx?.attributes?.network?.protocol?.version != null' + field: http.version + copy_from: attributes.network.protocol.version + - set: + if: 'ctx?.attributes?.tls?.cipher != null' + field: ssl_cipher + copy_from: attributes.tls.cipher + - set: + if: 'ctx?.attributes?.tls?.protocol?.version != null' + field: ssl_protocol + copy_from: attributes.tls.protocol.version + - set: + if: 'ctx?.attributes?.aws?.elb?.tls?.listener?.resource_id != null' + field: aws.elb.listener + copy_from: attributes.aws.elb.tls.listener.resource_id + - set: + if: 'ctx?.attributes?.aws?.elb?.status?.code!= null' + field: http.response.status_code + copy_from: attributes.aws.elb.status.code + # Drop OTel attributes + - remove: + field: + - attributes + - resource.attributes + ignore_missing: true + - set: + field: pre_extracted + value: true +on_failure: + - set: + field: event.kind + value: pipeline_error + tag: set_pipeline_error_into_event_kind + - append: + field: error.message + value: >- + Processor '{{{ _ingest.on_failure_processor_type }}}' + {{{#_ingest.on_failure_processor_tag}}}with tag '{{{ _ingest.on_failure_processor_tag }}}' + {{{/_ingest.on_failure_processor_tag}}}failed with message '{{{ _ingest.on_failure_message }}}' \ No newline at end of file diff --git a/packages/otel_ecs_converter/data_stream/aws_elblogs/fields/base-fields.yml b/packages/otel_ecs_converter/data_stream/aws_elblogs/fields/base-fields.yml new file mode 100644 index 00000000000..7c798f4534c --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_elblogs/fields/base-fields.yml @@ -0,0 +1,12 @@ +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: '@timestamp' + type: date + description: Event timestamp. diff --git a/packages/otel_ecs_converter/data_stream/aws_elblogs/manifest.yml b/packages/otel_ecs_converter/data_stream/aws_elblogs/manifest.yml new file mode 100644 index 00000000000..0da03e64290 --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_elblogs/manifest.yml @@ -0,0 +1,6 @@ +title: "Route OTel AWS ELB logs to ECS" +type: logs +dataset: aws.elbaccess.otel +elasticsearch: + dynamic_dataset: true + dynamic_namespace: true diff --git a/packages/otel_ecs_converter/data_stream/aws_elblogs/routing_rules.yml b/packages/otel_ecs_converter/data_stream/aws_elblogs/routing_rules.yml new file mode 100644 index 00000000000..1464415d771 --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_elblogs/routing_rules.yml @@ -0,0 +1,8 @@ +- source_dataset: aws.elbaccess.otel + rules: + # Route to aws.elb_logs dataset if event parsing is successful + - target_dataset: aws.elb_logs + if: ctx.pre_extracted == true + namespace: + - "{{data_stream.namespace}}" + - default diff --git a/packages/otel_ecs_converter/data_stream/aws_vpcflow/elasticsearch/ingest_pipeline/default.yml b/packages/otel_ecs_converter/data_stream/aws_vpcflow/elasticsearch/ingest_pipeline/default.yml new file mode 100644 index 00000000000..9763b1b21d6 --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_vpcflow/elasticsearch/ingest_pipeline/default.yml @@ -0,0 +1,76 @@ +--- +description: Pipeline for processing sample logs +processors: + - dot_expander: + field: "*" + path: attributes + - dot_expander: + field: "*" + path: resource.attributes + - set: + field: aws.vpcflow.version + copy_from: attributes.aws.vpc.flow.log.version + - set: + field: aws.vpcflow.account_id + copy_from: resource.attributes.cloud.account.id + - set: + field: aws.vpcflow.interface_id + copy_from: attributes.network.interface.name + - set: + field: aws.vpcflow.srcaddr + copy_from: attributes.source.address + - set: + field: aws.vpcflow.dstaddr + copy_from: attributes.destination.address + - set: + field: aws.vpcflow.srcport + copy_from: attributes.source.port + - set: + field: aws.vpcflow.dstport + copy_from: attributes.destination.port + - set: + field: aws.vpcflow.protocol + copy_from: attributes.network.protocol.name + - set: + field: aws.vpcflow.packets + copy_from: attributes.aws.vpc.flow.packets + - set: + field: aws.vpcflow.bytes + copy_from: attributes.aws.vpc.flow.bytes + - script: + lang: painless + source: | + ctx.tmpStart = Instant.ofEpochMilli(ctx.attributes.aws.vpc.flow.start).toEpochMilli(); + - date: + field: tmpStart + target_field: event.start + ignore_failure: true + formats: + - UNIX + - set: + field: event.end + copy_from: '@timestamp' + - set: + field: aws.vpcflow.action + copy_from: attributes.aws.vpc.flow.action + - set: + field: aws.vpcflow.log_status + copy_from: attributes.aws.vpc.flow.status + - remove: + field: + - attributes + ignore_missing: true + - set: + field: pre_extracted + value: true +on_failure: + - set: + field: event.kind + value: pipeline_error + tag: set_pipeline_error_into_event_kind + - append: + field: error.message + value: >- + Processor '{{{ _ingest.on_failure_processor_type }}}' + {{{#_ingest.on_failure_processor_tag}}}with tag '{{{ _ingest.on_failure_processor_tag }}}' + {{{/_ingest.on_failure_processor_tag}}}failed with message '{{{ _ingest.on_failure_message }}}' \ No newline at end of file diff --git a/packages/otel_ecs_converter/data_stream/aws_vpcflow/fields/base-fields.yml b/packages/otel_ecs_converter/data_stream/aws_vpcflow/fields/base-fields.yml new file mode 100644 index 00000000000..7c798f4534c --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_vpcflow/fields/base-fields.yml @@ -0,0 +1,12 @@ +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: '@timestamp' + type: date + description: Event timestamp. diff --git a/packages/otel_ecs_converter/data_stream/aws_vpcflow/manifest.yml b/packages/otel_ecs_converter/data_stream/aws_vpcflow/manifest.yml new file mode 100644 index 00000000000..e2730a22336 --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_vpcflow/manifest.yml @@ -0,0 +1,6 @@ +title: "Route OTel AWS VPC Flow Logs to ECS" +type: logs +dataset: aws.vpcflow.otel +elasticsearch: + dynamic_dataset: true + dynamic_namespace: true diff --git a/packages/otel_ecs_converter/data_stream/aws_vpcflow/routing_rules.yml b/packages/otel_ecs_converter/data_stream/aws_vpcflow/routing_rules.yml new file mode 100644 index 00000000000..81347609991 --- /dev/null +++ b/packages/otel_ecs_converter/data_stream/aws_vpcflow/routing_rules.yml @@ -0,0 +1,8 @@ +- source_dataset: aws.vpcflow.otel + rules: + # Route to aws.vpcflow dataset if event parsing is successful + - target_dataset: aws.vpcflow + if: ctx.pre_extracted == true + namespace: + - "{{data_stream.namespace}}" + - default diff --git a/packages/otel_ecs_converter/docs/README.md b/packages/otel_ecs_converter/docs/README.md new file mode 100644 index 00000000000..08c5b8dfb3d --- /dev/null +++ b/packages/otel_ecs_converter/docs/README.md @@ -0,0 +1,40 @@ +# OTel ECS converter + +OTel ECS converter integration provide pipelines to convert OTel native fields to ECS fields in documents. +The table below lists supported data source, source OTel data set and destination ECS data set: + +| Data source | OTel data set (source) | ECS data set (destination) | +|-----------------|------------------------|----------------------------| +| AWS ELB logs | aws.elbaccess.otel | aws.elb_logs | +| VPC flow logs | aws.vpcflow.otel | aws.vpcflow | +| CloudTrail logs | aws.cloudtrail.otel | aws.cloudtrail | + +## Instructions + +This integration works on OTel documents. Hence, you should ingest OTel native documents from sources stated above. +The destination data sets are installed with the **AWS integration**. Hence, you must install the integration. + +1. Install the **OTel ECS converter** integration + + Search the integration from the catalog, + + ![otel_converter.png](../img/otel_converter.png) + + Then install assets through settings section, + + ![otel_converter_install.png](../img/otel_converter_install.png) + + This installs the required pipelines which parse and re-route to destination data set + +2. Install the **AWS integration** assets + + You only needs to install assets of the integration, to do install them, + find the **AWS** integration by searching or browsing the catalog. + + ![aws.png](../img/aws.png) + + Navigate to the **Settings** tab and click **Install AWS**. Confirm by clicking **Install AWS** in the popup. + + ![aws_assets.png](../img/aws_assets.png) + +3. Ingest OTel native data into the data sets \ No newline at end of file diff --git a/packages/otel_ecs_converter/img/aws.png b/packages/otel_ecs_converter/img/aws.png new file mode 100644 index 00000000000..176a6866a4b Binary files /dev/null and b/packages/otel_ecs_converter/img/aws.png differ diff --git a/packages/otel_ecs_converter/img/aws_assets.png b/packages/otel_ecs_converter/img/aws_assets.png new file mode 100644 index 00000000000..42eb59b516d Binary files /dev/null and b/packages/otel_ecs_converter/img/aws_assets.png differ diff --git a/packages/otel_ecs_converter/img/otel_converter.png b/packages/otel_ecs_converter/img/otel_converter.png new file mode 100644 index 00000000000..5e5b345b204 Binary files /dev/null and b/packages/otel_ecs_converter/img/otel_converter.png differ diff --git a/packages/otel_ecs_converter/img/otel_converter_install.png b/packages/otel_ecs_converter/img/otel_converter_install.png new file mode 100644 index 00000000000..1196347c65f Binary files /dev/null and b/packages/otel_ecs_converter/img/otel_converter_install.png differ diff --git a/packages/otel_ecs_converter/img/otel_ecs.svg b/packages/otel_ecs_converter/img/otel_ecs.svg new file mode 100644 index 00000000000..3754fbe8a7b --- /dev/null +++ b/packages/otel_ecs_converter/img/otel_ecs.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/packages/otel_ecs_converter/manifest.yml b/packages/otel_ecs_converter/manifest.yml new file mode 100644 index 00000000000..b6ad8869ecc --- /dev/null +++ b/packages/otel_ecs_converter/manifest.yml @@ -0,0 +1,21 @@ +format_version: 3.4.1 +name: otel_ecs_converter +title: "OTel ECS converter" +version: 0.1.0 +description: "This integration contain converter pipelines to map OTel fields to ECS fields, allowing OTel data sources to work with existing ECS assets" +type: integration +categories: + - cloud + - aws + - observability +conditions: + kibana: + version: "^9.1.0" +owner: + github: elastic/obs-ds-hosted-services + type: elastic +icons: + - src: /img/otel_ecs.svg + title: logo OTel ecs converter + size: 64x64 + type: image/svg+xml