diff --git a/.github/workflows/deb-build.yaml b/.github/workflows/deb-build.yaml index 8c42ec04..4a00fcf8 100644 --- a/.github/workflows/deb-build.yaml +++ b/.github/workflows/deb-build.yaml @@ -16,7 +16,7 @@ jobs: arch: - amd64 target: - - "debian:stretch" + - "debian:bookworm" - "debian:buster" - "debian:bullseye" - "ubuntu:bionic" @@ -25,21 +25,102 @@ jobs: steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install prereq packages run: ./debpkg-setup.sh - name: Build packages run: ./debpkg-build.sh - - name: Store packages + - name: Set environment variables for upload + run: echo DIRNAME=${{ matrix.target }} | tr ':' '_' >> $GITHUB_ENV + - name: Copy packages to uploadable location run: | - export DIRNAME=`echo ${{ matrix.target }} | tr ':' '_'` mkdir -p packages/${DIRNAME} cp ../*.deb packages/${DIRNAME}/ - - name: Publish packages to cloudsmith - uses: wanduow/action-cloudsmith-upload-packages@v1 + - name: Store packages + uses: actions/upload-artifact@v3 with: - path: packages/ - repo: ${{ secrets.CLOUDSMITH_OWNER }}/openli - username: salcock - api_key: ${{ secrets.CLOUDSMITH_API_KEY }} + name: packages-${{ env.DIRNAME }} + path: packages/${{ env.DIRNAME }}/*.deb + retention-days: 1 + test: + runs-on: ubuntu-latest + container: + image: ${{ matrix.target }} + strategy: + fail-fast: false + matrix: + arch: + - amd64 + target: + - "debian:buster" + - "debian:bullseye" + - "ubuntu:bionic" + - "ubuntu:focal" + - "ubuntu:jammy" + - "debian:bookworm" + needs: build + steps: + - name: Set environment variables for download + run: echo DIRNAME=${{ matrix.target }} | tr ':' '_' >> $GITHUB_ENV + - name: Download artifact + uses: actions/download-artifact@v3 + with: + name: packages-${{ env.DIRNAME }} + - name: Add repositories + run: | + apt update -y + apt install -y apt-transport-https curl + curl -1sLf 'https://dl.cloudsmith.io/public/wand/libwandio/cfg/setup/bash.deb.sh' | bash + curl -1sLf 'https://dl.cloudsmith.io/public/wand/libwandder/cfg/setup/bash.deb.sh' | bash + curl -1sLf 'https://dl.cloudsmith.io/public/wand/libtrace/cfg/setup/bash.deb.sh' | bash + curl -1sLf 'https://dl.cloudsmith.io/public/wand/openli/cfg/setup/bash.deb.sh' | bash + - name: Test package install + env: + DEBIAN_FRONTEND: noninteractive + run: | + apt update -y + find . -name "*.deb" | xargs apt install -y + - name: Test package removal + env: + DEBIAN_FRONTEND: noninteractive + run: | + apt remove -y --purge openli-provisioner + apt remove -y --purge openli-collector + apt remove -y --purge openli-mediator + + publish: + runs-on: ubuntu-latest + container: + image: ${{ matrix.target }} + strategy: + fail-fast: false + matrix: + arch: + - amd64 + target: + - "debian:buster" + - "debian:bullseye" + - "debian:bookworm" + - "ubuntu:bionic" + - "ubuntu:focal" + - "ubuntu:jammy" + needs: test + steps: + - name: Set environment variables for download + run: echo DIRNAME=${{ matrix.target }} | tr ':' '_' >> $GITHUB_ENV + - name: Download artifact + uses: actions/download-artifact@v3 + with: + name: packages-${{ env.DIRNAME }} + - name: Copy packages + run: | + mkdir -p packages/${DIRNAME} + find . -name "*.deb" | xargs cp -t packages/${DIRNAME}/ + - name: Publish package to cloudsmith + uses: wanduow/action-cloudsmith-upload-packages@v1 + with: + path: packages/ + repo: ${{ secrets.CLOUDSMITH_OWNER }}/openli + username: salcock + api_key: ${{ secrets.CLOUDSMITH_API_KEY }} diff --git a/.github/workflows/rpm-build.yaml b/.github/workflows/rpm-build.yaml index 67962868..2e98a690 100644 --- a/.github/workflows/rpm-build.yaml +++ b/.github/workflows/rpm-build.yaml @@ -1,4 +1,4 @@ -name: Packaging for Centos and Fedora +name: Packaging for RPM on: push: @@ -7,40 +7,130 @@ on: jobs: build: - runs-on: ubuntu-latest - container: - image: ${{ matrix.target }} - strategy: - fail-fast: false - matrix: - arch: - - amd64 - target: - - "centos:7" - - "fedora:35" - - "fedora:36" - - "rockylinux:8.5" - - "almalinux:8.5" - - - steps: + runs-on: ubuntu-latest + container: + image: ${{ matrix.target }} + strategy: + fail-fast: false + matrix: + arch: + - amd64 + target: + - "rockylinux:8" + - "rockylinux:9" + - "fedora:37" + - "fedora:38" + - "centos:7" + + steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install prereq packages run: ./rpmpkg-setup.sh ${{ matrix.target }} - name: Build packages - run: ./rpmpkg-build.sh ${{ matrix.target }} - - name: Store packages + run: ./rpmpkg-build.sh + - name: Set environment variables for upload + run: echo DIRNAME=${{ matrix.target }} | tr ':' '_' >> $GITHUB_ENV + - name: Copy packages to uploadable location run: | - export DIRNAME=`echo ${{ matrix.target }} | tr ':' '_'` mkdir -p packages/${DIRNAME} cp ~/rpmbuild/RPMS/x86_64/*.rpm packages/${DIRNAME}/ - - name: Publish packages to cloudsmith - uses: salcock/action-cloudsmith-upload-packages@1.6 + - name: Store packages + uses: actions/upload-artifact@v3 with: - path: packages/ - repo: ${{ secrets.CLOUDSMITH_OWNER }}/openli - username: salcock - api_key: ${{ secrets.CLOUDSMITH_API_KEY }} + name: packages-${{ env.DIRNAME }} + path: packages/${{ env.DIRNAME }}/*.rpm + retention-days: 1 + + test: + runs-on: ubuntu-latest + container: + image: ${{ matrix.target }} + strategy: + fail-fast: false + matrix: + arch: + - amd64 + target: + - "rockylinux:8" + - "rockylinux:9" + - "fedora:37" + - "fedora:38" + - "centos:7" + needs: build + steps: + - name: Set environment variables for download + run: echo DIRNAME=${{ matrix.target }} | tr ':' '_' >> $GITHUB_ENV + - name: Download artifact + uses: actions/download-artifact@v3 + with: + name: packages-${{ env.DIRNAME }} + - name: Add repositories + env: + TARGET: ${{ matrix.target }} + run: | + curl -1sLf 'https://dl.cloudsmith.io/public/wand/libwandio/cfg/setup/bash.rpm.sh' | bash + curl -1sLf 'https://dl.cloudsmith.io/public/wand/libwandder/cfg/setup/bash.rpm.sh' | bash + curl -1sLf 'https://dl.cloudsmith.io/public/wand/libtrace/cfg/setup/bash.rpm.sh' | bash + curl -1sLf 'https://dl.cloudsmith.io/public/wand/openli/cfg/setup/bash.rpm.sh' | bash + curl -1sLf https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey -o packagecloud-rabbitmq-key.asc + gpg --import packagecloud-rabbitmq-key.asc + curl -s https://packagecloud.io/install/repositories/rabbitmq/rabbitmq-server/script.rpm.sh | bash + curl -s https://packagecloud.io/install/repositories/rabbitmq/erlang/script.rpm.sh | bash + if [ "${TARGET}" == "centos:7" ]; then + yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm || true + fi + if [[ "${TARGET}" =~ rocky* ]]; then + dnf install -y dnf-plugins-core epel-release || true + dnf config-manager --set-enabled powertools || true + /usr/bin/crb enable || true + fi + - name: Test package install + run: | + yum install -y findutils + find . -name "*.rpm" | xargs yum install -y + - name: Test package removal + run: | + rpm -e openli-provisioner + rpm -e openli-collector + rpm -e openli-mediator + + + publish: + runs-on: ubuntu-latest + container: + image: ${{ matrix.target }} + strategy: + fail-fast: false + matrix: + arch: + - amd64 + target: + - "rockylinux:8" + - "rockylinux:9" + - "fedora:37" + - "fedora:38" + - "centos:7" + needs: test + steps: + - name: Set environment variables for download + run: echo DIRNAME=${{ matrix.target }} | tr ':' '_' >> $GITHUB_ENV + - name: Download artifact + uses: actions/download-artifact@v3 + with: + name: packages-${{ env.DIRNAME }} + - name: Copy packages + run: | + mkdir -p packages/${DIRNAME} + yum install -y findutils + find . -name "*.rpm" | xargs cp -t packages/${DIRNAME}/ + - name: Publish package to cloudsmith + uses: wanduow/action-cloudsmith-upload-packages@v1 + with: + path: packages/ + repo: ${{ secrets.CLOUDSMITH_OWNER }}/openli + username: salcock + api_key: ${{ secrets.CLOUDSMITH_API_KEY }} + diff --git a/AUTHORS b/AUTHORS index 88421086..88361319 100644 --- a/AUTHORS +++ b/AUTHORS @@ -2,7 +2,7 @@ The original OpenLI implementation was written by Shane Alcock, while working for the WAND network research group at the University of Waikato. The current lead developer and maintainer of OpenLI is Shane Alcock (still!). -Email: salcock@waikato.ac.nz +Email: salcock@searchlight.nz ------ diff --git a/README.md b/README.md index dc6ee7b2..71c31eb0 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,14 @@ OpenLI -- open source ETSI-compliant Lawful Intercept software -Version: 1.0.15 +Version: 1.1.0 --------------------------------------------------------------------------- -Copyright (c) 2018 - 2022 The University of Waikato, Hamilton, New Zealand. +Copyright (c) 2018 - 2023 The University of Waikato, Hamilton, New Zealand. All rights reserved. -This code has been developed by the University of Waikato WAND research group. -For further information please see http://www.wand.net.nz/. +OpenLI was originally developed by the University of Waikato WAND research +group. For further information please see https://www.wand.net.nz/. OpenLI is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -32,7 +32,7 @@ of the software is an initial release and we anticipate that there will still be many bugs and incompatibilities that we have not yet encountered in our testing so far. If you do encounter issues with the OpenLI software, please report them to us via our Github page -(https://github.com/wanduow/openli) so that we can continue to improve the +(https://github.com/OpenLI-NZ/openli) so that we can continue to improve the quality of OpenLI for all of our users. ## ALSO IMPORTANT @@ -60,14 +60,14 @@ recommend that you install OpenLI using a package if you can, rather than building from source. Instructions on packaged installs can be found at: - https://github.com/wanduow/openli/wiki/Installing-Debian-Packaged-Version - https://github.com/wanduow/openli/wiki/Installing-via-RPM + https://github.com/OpenLI-NZ/openli/wiki/Installing-Debian-Packaged-Version + https://github.com/OpenLI-NZ/openli/wiki/Installing-via-RPM ## The OpenLI Wiki The best source of documentation for OpenLI is the OpenLI wiki at -https://github.com/wanduow/openli/wiki -- we have specific pages on a number +https://github.com/OpenLI-NZ/openli/wiki -- we have specific pages on a number of topics that may be relevant to OpenLI users (e.g. encryption, the REST API, DPDK with OpenLI, etc.). The wiki tends to be updated more often than the in-code documentation (e.g. the `doc/` directory) as well. @@ -78,11 +78,11 @@ will be more than happy to accept your contribution. ## Dependencies for building from source -* [libtrace 4.0.18 or later](http://research.wand.net.nz/software/libtrace.php) +* [libtrace 4.0.18 or later](https://github.com/LibtraceTeam/libtrace/) (packages for Debian / Ubuntu are available [from WAND](https://cloudsmith.io/~wand/repos/libtrace/packages/) as well). -* [libwandder 2.0.4 or later](https://github.com/wanduow/libwandder/) +* [libwandder 2.0.4 or later](https://github.com/LibtraceTeam/libwandder/) (packages for Debian / Ubuntu are available [from WAND](https://cloudsmith.io/~wand/repos/libwandder/packages/) as well). @@ -119,6 +119,9 @@ will be more than happy to accept your contribution. * libtcmalloc -- Debian / Ubuntu users can install the libgoogle-perftools-dev package. Optional, but highly recommended for performance reasons. +* RabbitMQ Server -- Debian/Ubuntu users can install the rabbitmq-server + package. Optional for the collector, required for the mediator. + ## Building OpenLI To build OpenLI from source, just follow the series of steps given below. @@ -156,6 +159,68 @@ To build OpenLI from source, just follow the series of steps given below. **This last step is optional -- the OpenLI software components should run without needing to be installed.** +## Mediator RabbitMQ Setup +If you have built OpenLI from source, you will also need to perform some +additional manual configuration steps to allow your mediator to be able +to use RabbitMQ server for its internal message passing. + +**Note, you only need to do this for the mediator component and only if +you built the mediator from source rather than using a packaged install.** + +More details can be found at https://github.com/OpenLI-NZ/openli/wiki/RabbitMQ-for-internal-buffering-on-Mediators but a brief set of instructions is +included below: + +First, if you haven't already done so, install RabbitMQ server. +Instructions can be found at https://www.rabbitmq.com/download.html + +Configure RabbitMQ on your mediator to only accept connections from localhost +by adding the following lines to a config file called +`/etc/rabbitmq/rabbitmq.conf` (note, if this file does not exist then just +create it -- if it does exist, just add the config to it): + +``` + listeners.tcp.default = 127.0.0.1:5672 + loopback_users.guest = false +``` + +Start the RabbitMQ service: +``` + service rabbitmq-server restart +``` + +Next, create the OpenLI-med vhost on your RabbitMQ server: +``` + rabbitmqctl add_vhost "OpenLI-med" +``` + +Create the openli.nz user and assign them a password: +``` + rabbitmqctl add_user "openli.nz" "" +``` + +Give the new user permissions to interact with the OpenLI-med vhost: +``` + rabbitmqctl set_permissions -p "OpenLI-med" "openli.nz" ".*" ".*" ".*" +``` + +The last thing you need to do is to provide your OpenLI mediator with the +password for the `openli.nz` user. There are two ways you can do this. +The first is by adding a configuration option to your mediator config file +(e.g. `/etc/openli/mediator-config.yaml`) as shown below: +``` + RMQinternalpass: +``` + +The second is to create a file at `/etc/openli/rmqinternalpass` that contains +ONLY the password that the mediator should use for internal RabbitMQ +interactions. Make sure that the file is only readable by the user that is +going to be running the OpenLI mediator process. + +Note that if you provide the password using both methods, the password in the +mediator config file has precedence over the one provided in +`/etc/openli/rmqinternalpass`. + + ## Running OpenLI OpenLI consists of three software components: the provisioner, the collector @@ -188,7 +253,7 @@ input sources (i.e. capture interfaces) and use multiple threads to spread the collection workload across multiple CPU cores. The recommended way to learn about OpenLI is by taking our tutorial, which can -be found at https://github.com/wanduow/openli/wiki/OpenLI-Tutorial -- the +be found at https://github.com/OpenLI-NZ/openli/wiki/OpenLI-Tutorial -- the tutorial includes practical exercises using containers that will help you become familiar with the OpenLI components and how to configure them. @@ -208,7 +273,7 @@ added, removed or modified and update their behaviour accordingly. Starting from version 1.0.4, the provisioner will also listen on a socket for RESTful HTTP requests that either add or modify the running intercept configuration. The API for interacting with this update socket is documented -at https://github.com/wanduow/openli/wiki/Intercept-Configuration-REST-API +at https://github.com/OpenLI-NZ/openli/wiki/Intercept-Configuration-REST-API ## Common problems with OpenLI @@ -227,10 +292,10 @@ A. Unfortunately there are plenty of reasons why this might happen. Here are https://github.com/LibtraceTeam/libtrace * Try installing the latest 'develop' branch of libwandder from - https://github.com/wanduow/libwandder + https://github.com/LibtraceTeam/libwandder * Try installing the latest 'develop' branch of openli itself from - https://github.com/wanduow/openli + https://github.com/OpenLI-NZ/openli If all else fails, send us an email at openli-support@waikato.ac.nz and someone will try to help you. @@ -265,4 +330,31 @@ A. This means that your collector is not keeping up with the number of with a solution that can scale to your network size (be prepared to pay a significant sum for this, of course). +--- + +Q. My mediator is not passing intercept records to the connected agencies and + I see that there are log messages complaining about "OpenLI Mediator: failed + to log into RMQ broker using plain auth". + +A. This means that your RabbitMQ internal password for the mediator is + incorrect. + + If you installed your OpenLI mediator using a package, you may need to + remove the package (using `--purge` if removing a `.deb`) and reinstall. + I would suggest backing up `/etc/openli/mediator-config.yaml` first. + If the issue still persists, remove any `RMQinternalpass` + configuration option that is present in your mediator config file and + try again. + + If you installed your OpenLI mediator manually, check the value of the + `RMQinternalpass` configuration option in your mediator config file. Ensure + that the value for this option matches the password that you provided when + you created the `openli.nz` user in RabbitMQ. If the option does not exist, + add it (and the correct value) to the mediator config file. + + If all else fails, you can reset the `openli.nz` user password by running: + ``` + rabbitmqctl change_password "openli.nz" "" + ``` + diff --git a/configure.ac b/configure.ac index 10f37746..b0a51622 100644 --- a/configure.ac +++ b/configure.ac @@ -1,10 +1,10 @@ # Super primitive configure script -AC_INIT(openli, 1.0.15, salcock@waikato.ac.nz) +AC_INIT([openli],[1.1.0],[shane@alcock.co.nz]) AM_INIT_AUTOMAKE([subdir-objects]) AC_CONFIG_SRCDIR(src/collector/collector.c) -AM_CONFIG_HEADER(config.h) +AC_CONFIG_HEADERS(config.h) AC_CONFIG_MACRO_DIR([m4]) m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])]) @@ -14,7 +14,7 @@ AC_PREFIX_DEFAULT(/usr/local/) AC_PROG_CC AC_PROG_INSTALL -AC_PROG_LIBTOOL +LT_INIT EXTRA_LIBS="" @@ -74,7 +74,11 @@ if test "x$enable_collector" != "xno"; then if test "$libosip2_found" = 0; then AC_MSG_ERROR(Required library libosipparser2 not found; use LDFLAGS to specify library location) fi - COLLECTOR_LIBS="$COLLECTOR_LIBS -losipparser2" + AC_CHECK_LIB([b64], [base64_decode_block],libb64_found=1,libb64_found=0) + if test "$libb64_found" = 0; then + AC_MSG_ERROR(Required library libb64 not found; use LDFLAGS to specify library location) + fi + COLLECTOR_LIBS="$COLLECTOR_LIBS -losipparser2 -lb64" fi if test "x$enable_provisioner" != "xno"; then @@ -89,6 +93,7 @@ if test "x$enable_provisioner" != "xno"; then fi PROVISIONER_LIBS="$PROVISIONER_LIBS -lmicrohttpd -ljson-c" + COLLECTOR_LIBS="$COLLECTOR_LIBS -lmicrohttpd -ljson-c" if test "x$libssl11_found" = "x1"; then AC_CHECK_LIB([sqlcipher], [sqlite3_key], sqlcipher_found=1, sqlcipher_found=0) @@ -104,15 +109,10 @@ if test "x$enable_provisioner" != "xno"; then fi if test "x$enable_collector" != "xno" -o "x$enable_mediator" != "xno"; then - AC_CHECK_LIB([wandder], [wandder_encode_init_top_ber],libwandder_ber_found=1,libwandder__ber_found=0) + AC_CHECK_LIB([wandder], [wandder_decode_integer_value],libwandder_found=1,libwandder_found=0) - if test "$libwandder_ber_found" = 1; then - AC_DEFINE(HAVE_BER_ENCODING, 1, [defined to 1 if libwandder supports BER encoding]) - else - AC_CHECK_LIB([wandder], [init_wandder_encoder],libwandder_found=1,libwandder_found=0) - if test "$libwandder_found" = 0; then - AC_MSG_ERROR(Required library libwandder not found; use LDFLAGS to specify library location) - fi + if test "$libwandder_found" = 0; then + AC_MSG_ERROR(Required library libwandder 2.0.6 or later not found; use LDFLAGS to specify library location) fi AC_CHECK_LIB([Judy], [JudySLGet],libjudy_found=1,libjudy_found=0) diff --git a/debian/changelog b/debian/changelog index 2879bfb3..7251f169 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,109 @@ +openli (1.1.0-1) unstable; urgency=medium + + Changes since 1.0.15 release: + * Mediator now uses rabbitmq for buffering intercept records before sending + them on to their respective agency. + * Mediator now dedicates separate threads to receiving data from each + collector, and separate threads to forwarding intercepts on to each agency. + This will make better use of multiple cores on a mediator device. + * Added support for email interception (as per ETSI TS 102 232-2 standard). + * Added ability to configure intercepts (of any type) to export only IRIs, or + only CCs. + * Include target-Information field in HI1 Operation messages. + * Fixed bug where encoded raw IP data (e.g. intercepted GTP) was missing a + preceding LIID when sent to the mediator. + * Fixed REST API bug that prevented DELETE requests from working for the + coreserver endpoint. + * Fixed bug where IPMM IRIs were using the IRI type "IRI-Report" instead of + the expected "IRI-Continue". + * Fixed bug where VoIP intercepts would not produce CCs if the corresponding + SIP session included a 183 message without any RTP port or IP information + in it. + * Fixed bug where endtimes for VOIP intercepts could not be modified using + the REST API. + + -- Shane Alcock Wed, 14 Jun 2023 18:53:26 +1200 + +openli (1.1.0-1~rc03) unstable; urgency=medium + + * Fix IMAP bug where OpenLI could generate partial download IRIs + without also generating corresponding CCs. + + -- Shane Alcock Thu, 25 May 2023 19:09:19 +1200 + +openli (1.1.0-1~rc02) unstable; urgency=medium + + * Fix bug where VOIP intercepts would not produce CCs if an RTP + endpoint was announced in the "200 OK" message instead of a + "183" message. + + -- Shane Alcock Thu, 25 May 2023 11:27:40 +1200 + +openli (1.1.0-1~rc01) unstable; urgency=medium + + * Add configuration options for the email ingestion socket. + * Include target-Information in HI1 operation messages. + * Fix incorrect IRI types for IPMM IRIs. + * mediator postinst: configure RMQ to listen on localhost only, if + installed on our behalf. + * Tidy up numerous compiler warnings. + + -- Shane Alcock Tue, 23 May 2023 12:00:12 +1200 + +openli (1.1.0-1~beta06) unstable; urgency=medium + + * Fix REST API bug where intercepts returned by GET requests would + include a "tomediate" field instead of "outputhandovers". + + -- Shane Alcock Wed, 19 Apr 2023 11:43:41 +1200 + +openli (1.1.0-1~beta05) unstable; urgency=medium + + * IMAP and POP3: we can now extract the email sender from the mail + body to include in email IRIs + + -- Shane Alcock Mon, 03 Apr 2023 09:59:02 +1200 + +openli (1.1.0-1~beta04) unstable; urgency=medium + + * Add support for email interception for the POP3 protocol. + + -- Shane Alcock Fri, 24 Feb 2023 15:16:08 +1300 + +openli (1.1.0-1~beta03) unstable; urgency=medium + + * Fix various errors when parsing IMAP AUTHENTICATE PLAIN messages. + * Fix bug where not all IMAP APPEND content was being included in + exported CC records. + * Fix install error when upgrading mediator from a previous 1.1.0 + beta version. + + -- Shane Alcock Fri, 03 Feb 2023 15:24:51 +1300 + +openli (1.1.0-1~beta02) unstable; urgency=medium + + * Fix bug where DELETE was not being properly applied to coreservers + via the REST API + * Fix IMAP parsing error when "\r\n" appears inside parentheses + * Add ability to configure intercepts as IRI or CC only intercepts + * Fix IMAP parsing error when parsing LOGIN commands + * Add credential masking for IMAP LOGIN commands + * Add config option for disabling IMAP credential masking + * Add config options to change email session timeouts + + -- Shane Alcock Tue, 24 Jan 2023 09:22:18 +1300 + +openli (1.1.0-1~beta01) unstable; urgency=medium + + * Mediator now uses RabbitMQ to buffer any intercept records that + have been received from collectors but not yet pushed onto a handover. + * Each connected collector is now handled in a separate thread. + * Each agency is now handled in a separate thread. + * Email interception (ETSI TS 102 232-2) is now supported for SMTP and IMAP + protocols. + + -- Shane Alcock Sat, 12 Nov 2022 15:13:10 +1300 + openli (1.0.15-1) unstable; urgency=medium * Fix crash when SDP identities are shared amongst multiple Call-IDs. diff --git a/debian/control b/debian/control index 805ea25c..e7a332ca 100644 --- a/debian/control +++ b/debian/control @@ -1,11 +1,11 @@ Source: openli Section: net Priority: optional -Maintainer: Shane Alcock +Maintainer: Shane Alcock Build-Depends: debhelper-compat (= 12), dh-autoreconf, dh-systemd (>=1.5), libtrace4-dev (>= 4.0.19), libyaml-dev, uthash-dev, libwandder2-dev (>=2.0.4), libjudy-dev, libzmq3-dev, libgoogle-perftools-dev, libosip2-dev, - libssl1.0-dev (>=1.0.2r) | libssl-dev, librabbitmq-dev, + libssl1.0-dev (>=1.0.2r) | libssl-dev, librabbitmq-dev, libb64-dev, libmicrohttpd-dev, libjson-c-dev, libsqlcipher-dev Standards-Version: 4.1.3 Homepage: https://openli.nz @@ -29,7 +29,8 @@ Description: Central provisioning daemon for an OpenLI system Package: openli-mediator Section: net Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends}, lsb-base, adduser +Depends: ${shlibs:Depends}, ${misc:Depends}, lsb-base, adduser, + rabbitmq-server Recommends: strongswan Description: Mediation daemon for an OpenLI system OpenLI is a software suite that allows network operators to conduct diff --git a/debian/copyright b/debian/copyright index 3a56589f..a35ab05c 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,7 +1,7 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: OpenLI -Upstream-Contact: WAND Network Research Group -Source: https://github.com/wanduow/openli +Upstream-Contact: Shane Alcock +Source: https://github.com/OpenLI-NZ/openli Files: * Copyright: 2018 The University of Waikato, Hamilton, New Zealand. diff --git a/debian/openli-mediator.postinst b/debian/openli-mediator.postinst index ca7c4e1c..7abd73b9 100644 --- a/debian/openli-mediator.postinst +++ b/debian/openli-mediator.postinst @@ -1,11 +1,37 @@ -#!/bin/sh -# postinst script for amplet2 +#!/bin/bash +# postinst script for openli-mediator # # see: dh_installdeb(1) set -e USER="openli" +declare -A origdeps + +package_installed_directly() { + local pname="$1" + local state="${origdeps[$pname]}" + + if [ "$state" = "installed" ]; then + return 0 + else + return 1 + fi +} + +save_original_packages() { + while read -r package; do + local pname=$(echo "$package" | awk '{print $4}') + local status=$(echo "$package" | awk '{print $1,$2,$3}') + + if [ "$status" = "install ok installed" ]; then + origdeps[$pname]="installed" + else + origdeps[$pname]="not-installed" + fi + done < <(dpkg-query -W -f='${Status} ${Package}\n') +} + case "$1" in configure) @@ -17,8 +43,49 @@ case "$1" in chown -R ${USER}: /etc/openli chown -R ${USER}: /var/run/openli chmod 2750 /etc/openli -;; + save_original_packages + + if systemctl is-active --quiet "rabbitmq-server"; then + echo "" + else + service rabbitmq-server start + fi + + EXISTS=`rabbitmqctl list_vhosts | grep "^OpenLI-med$" | wc -l` + if [ "$EXISTS" -eq "0" ]; then + rabbitmqctl add_vhost "OpenLI-med" + fi + + EXISTS=`rabbitmqctl list_users | grep "^openli.nz\b" | wc -l` + if [ "$EXISTS" -eq "0" ]; then + s="" + until s="$s$(dd bs=24 count=1 if=/dev/urandom 2>/dev/null | LC_ALL=C tr -cd 'a-zA-Z0-9')" + [ ${#s} -ge 16 ]; do :; done + CRED=$(printf %.16s $s) + + rabbitmqctl add_user "openli.nz" "${CRED}" + rabbitmqctl set_permissions -p "OpenLI-med" "openli.nz" ".*" ".*" ".*" + echo ${CRED} > /etc/openli/rmqinternalpass + chmod 0640 /etc/openli/rmqinternalpass + chown openli:openli /etc/openli/rmqinternalpass + + fi + + if package_installed_directly rabbitmq-server; then + if [ ! -f /etc/rabbitmq/rabbitmq.conf ]; then + cat > /etc/rabbitmq/rabbitmq.conf <&2 + exit 1 + ;; +esac + +#DEBHELPER# + +exit 0 diff --git a/debpkg-setup.sh b/debpkg-setup.sh index dd15f2ef..65bf314d 100755 --- a/debpkg-setup.sh +++ b/debpkg-setup.sh @@ -2,8 +2,8 @@ set -x -e -o pipefail -export DEBEMAIL='packaging@wand.net.nz' -export DEBFULLNAME='WAND Packaging' +export DEBEMAIL='salcock@searchlight.nz' +export DEBFULLNAME='Shane Alcock' export DEBIAN_FRONTEND=noninteractive export SOURCENAME=`echo ${GITHUB_REF##*/} | cut -d '-' -f 1` @@ -36,7 +36,7 @@ case ${DISTRO} in echo "10" > debian/compat ;; - bullseye ) + bullseye | bookworm ) sed -i 's/ dh-systemd (>=1.5),//' debian/control ;; diff --git a/doc/CollectorDoc.md b/doc/CollectorDoc.md index 0990fcc6..bca8f3ee 100644 --- a/doc/CollectorDoc.md +++ b/doc/CollectorDoc.md @@ -150,7 +150,7 @@ for the best reliability, we recommend configuring your collectors and mediators to use RabbitMQ as an intermediary. More details on how to configure RabbitMQ for a collector can be found at -https://github.com/wanduow/openli/wiki/Using-RabbitMQ-for-disk-backed-buffers-in-OpenLI. +https://github.com/OpenLI-NZ/openli/wiki/Using-RabbitMQ-for-disk-backed-buffers-in-OpenLI. A collector only requires a small amount of configuration: a username and password that can be used to authenticate against a local RabbitMQ instance, and a flag to inform the collector that RabbitMQ output is enabled. @@ -207,6 +207,23 @@ The basic option keys are: RabbitMQ instance. * sipallowfromident -- set to 'yes' to allow the SIP "From:" field to be used for target identification. Defaults to "no". +* maskimapcreds -- set to 'yes' to have OpenLI replace any clear-text or + base64 encoded credentials in IMAP traffic that has + been intercepted using an email intercept with "XXX". + Defaults to "yes". +* maskpop3creds -- set to 'yes' to have OpenLI replace any clear-text + credentials in IMAP traffic that have been + intercepted using an email intercept with "XXX". + Defaults to "yes". + +Be aware that increasing the number of threads used for sequence number +tracking, encoding or forwarding can actually decrease OpenLI's performance, +especially if there are more threads active than CPU cores available on +the collector host machine. Also, OpenLI uses a number of internal threads +for message-passing and connection maintenance, which will also be +contending for CPU time. A good rule of thumb is that the total number +of input threads, sequence tracker threads, encoding threads and forwarding +threads should NOT exceed the number of CPU cores on your machine. Inputs are specified as a YAML sequence with a key of `inputs:`. Each sequence item represents a single traffic source to intercept traffic from @@ -232,15 +249,71 @@ two key-value elements: * port -- the port that the sink is listening on for mirrored traffic -Be aware that increasing the number of threads used for sequence number -tracking, encoding or forwarding can actually decrease OpenLI's performance, -especially if there are more threads active than CPU cores available on -the collector host machine. Also, OpenLI uses a number of internal threads -for message-passing and connection maintenance, which will also be -contending for CPU time. A good rule of thumb is that the total number -of input threads, sequence tracker threads, encoding threads and forwarding -threads should NOT exceed the number of CPU cores on your machine. - +When performing email interception, mail protocol sessions will be ended as +soon as the protocol "closing" command (i.e. "QUIT" for SMTP, "BYE" for IMAP) +are observed. However, OpenLI will also expire any incomplete mail protocol +sessions that have been idle for a certain number of minutes. You can +configure the idle thresholds for each mail protocol by defining a YAML sequence +with the key `emailsessiontimeouts` and then adding a sequence item for each +protocol that you wish to define a timeout for. Each sequence item should +be expressed as a key-value pair, where the key is the protocol name and the +value is the desired timeout in minutes. + +The three mail protocols supported by OpenLI and their default timeout values +are: +* smtp (default is 5 minutes) +* imap (default is 30 minutes) +* pop3 (default is 10 minutes) + + +### Email ingestion service +Instead of intercepting email by capturing all SMTP, POP3 and/or IMAP traffic +observed on a network interface, OpenLI can also ingest email application +layer messages through an additional HTTP service that can be run on each/any +OpenLI collector. + +You can then use custom plugins on your mail servers (e.g. dovecot plugins) +to generate messages in the expected format for an interceptable email session +and POST the message to the ingestion service running on a collector. The +POSTed message is sent as `multipart/form-data`, where each field in +a message is a separate part encoded as `text/plain`. + +The message format itself is documented on the OpenLI wiki at +https://github.com/OpenLI-NZ/openli/wiki/Email-Ingestion-Message-Format + +By default, the email ingestion service is disabled on a collector but you +can enable and configure it using the following options. + +Firstly, you will need to add the `emailingest:` key to the top level of +your existing collector YAML configuration. + +Then you can specify the following mapping options as values inside the +`emailingest:` key to configure the ingestion service: + +* listenaddress -- the IP address that the service should listen on. +* listenport -- the port for the service to listen on. +* enabled -- if set to "no", the service will be disabled. Set + to "yes" to enable the service. +* requiretls -- if set to "yes", connections to the service will + only be permitted using HTTPS. +* authpassword -- if set, connections to the service will be rejected + unless they use digest authentication and provide + this value as their password. + +Example configuration is included in the `collector-example.yaml` config +file -- you can find this file in `doc/exampleconfigs` in the OpenLI source +tree or installed into `/etc/openli/` if you installed OpenLI using a +package. + +To enable TLS on the ingestion service, you must also configure your collector +(and all other OpenLI components) to use TLS for their internal communications, +as the ingestion service will use the same certificates and keys to +establish the encrypted channel. See `doc/TLSDoc.md` for details on how to +set up TLS for OpenLI. + +When using digest authentication, the username on the POST request can be +set to anything; the username is ignored by the ingestion service as long as +the provided password matches what has been set as the `authpassword`. ### SIP Ignore SDP O option When testing OpenLI VOIP intercepts, you may discover that the IRI stream for diff --git a/doc/DPDKNotes.md b/doc/DPDKNotes.md index 2e4cd0ac..479db2a0 100644 --- a/doc/DPDKNotes.md +++ b/doc/DPDKNotes.md @@ -16,7 +16,7 @@ fairly decent packages for DPDK are now available for major Linux distributions and the libtrace packages automatically include DPDK as a dependency. If you want to make your life easier and just use the packaged version of DPDK, stop reading this file and try the wiki page at -https://github.com/wanduow/openli/wiki/DPDK-and-OpenLI instead! +https://github.com/OpenLI-NZ/openli/wiki/DPDK-and-OpenLI instead! ======================== diff --git a/doc/MediatorDoc.md b/doc/MediatorDoc.md index d514422a..5ddcffe6 100644 --- a/doc/MediatorDoc.md +++ b/doc/MediatorDoc.md @@ -81,10 +81,10 @@ rotated -- in-progress pcap traces do not contain all of the necessary trailers to allow them to be correctly parsed by a reader. ### RabbitMQ Configuration -If you have using RabbitMQ to reliably persist the intercepted packets that -have not yet been received by your mediator, you will need to also provide -additional configuration on your mediator to allow it to read those packets -from the RabbitMQ queue on the collector. +If you are using RabbitMQ on the collector to reliably persist the intercepted +packets that have not yet been received by your mediator, you will need to also +provide additional configuration on your mediator to allow it to fetch those +packets from the collector. OpenLI supports (and recommends!) the use of SSL / TLS to authenticate with the RabbitMQ server that is running on the collector, but you may also choose to @@ -93,7 +93,8 @@ authenticate using the plain method with a password. Plain authentication will require you to provide the following options in your configuration file: -* RMQenabled -- must be set to `true` to enable RabbitMQ support +* RMQenabled -- must be set to `true` to enable RabbitMQ support for + receiving intercepted packets from the collector * RMQname -- the username to use when authenticating with RabbitMQ * RMQpass -- the password to use when authenticating with RabbitMQ * RMQSSL -- must be set to `false` to disable SSL authentication @@ -103,7 +104,8 @@ configuration file: SSL authentication will require you to provide the following options instead: -* RMQenabled -- must be set to `true` to enable RabbitMQ support +* RMQenabled -- must be set to `true` to enable RabbitMQ support for + receiving intercepted packets from the collector * RMQname -- the username to use when authenticating with RabbitMQ * RMQSSL -- must be set to `true` to enable SSL authentication * RMQheartbeatfreq -- time between RMQ heartbeat packets that are used to @@ -118,6 +120,34 @@ See TLSDoc.md for more details on the SSL certificate files required by OpenLI, as these will be the same certificates that you will/would use to encrypt other inter-component messages in an OpenLI deployment. +### RabbitMQ for local buffering + +As of version 1.1.0, the OpenLI mediator will also use a local RabbitMQ server +instance to move intercept records between threads and ensure that the records +are safely buffered on disk whenever a handover is unavailable. You will +therefore need to ensure that a RabbitMQ service is running on your mediator +host before starting the mediator. + +Installing the OpenLI mediator via one of our software packages should +install and configure a RabbitMQ server for the mediator automatically. + +If you install the OpenLI mediator from source instead, you will need to +also configure RabbitMQ server manually to support local buffering. +Full instructions for this are available at +https://github.com/OpenLI-NZ/openli/wiki/RabbitMQ-for-internal-buffering-on-Mediators +and an abbreviated version is also provided in the README.md file included +with the OpenLI source code. + +If you have configured RabbitMQ server manually, you can use the +`RMQinternalpass` configuration option to tell your OpenLI mediator the +password that it will need to access the local RabbitMQ message queues. + +It is STRONGLY recommended that you ensure the RabbitMQ server that is +running on your mediator is listening for connections on localhost only. +Our package install process will try to configure this for you, but if you +have installed OpenLI manually or already had RabbitMQ installed on the host +where your mediator is running then you will need to configure this yourself. + ### Configuration Syntax All of the mediator config options are standard YAML key-value pairs, where the key is the option name and the value is your chosen value for that option. @@ -139,12 +169,15 @@ The supported option keys are: * RMQenabled -- set to `true` if your collectors are using RabbitMQ to buffer ETSI records destined for this mediator * RMQname -- the username to use when authenticating with RabbitMQ + on the collectors. * RMQpass -- the password to use when authenticating with RabbitMQ - (required for plain auth only). + on the collectors (required for plain auth only). * RMQSSL -- set to `true` to use SSL authentication instead of plain * RMQheartbeatfreq -- time between RMQ heartbeat packets that are used to detect a connection breakdown (default is 0, which - disables heartbeats) + disables heartbeats). +* RMQinternalpass -- the password for the `openli.nz` RMQ user that is used + for internal message buffering. * tlscert -- the file containing an SSL certificate for the mediator * tlskey -- the file containing an SSL key for the mediator * tlsca -- the file containing the SSL certificate for the CA that diff --git a/doc/ProvisionerDoc.md b/doc/ProvisionerDoc.md index 2dbc611d..942460ea 100644 --- a/doc/ProvisionerDoc.md +++ b/doc/ProvisionerDoc.md @@ -30,7 +30,7 @@ delete or query intercepts within the OpenLI system. A simple HTTP server is run on the socket and there is a RESTful API that can be used to interact with the update socket. Full documentation of the REST API for intercept management is available at -https://github.com/wanduow/openli/wiki/Intercept-Configuration-REST-API +https://github.com/OpenLI-NZ/openli/wiki/Intercept-Configuration-REST-API In addition to intercepts, the update socket can be used to manage the agencies that the OpenLI system will export intercepts to, as well as the set of @@ -69,7 +69,7 @@ is running on. Full documentation on the authentication system, how to enable it and how to add users to it can be found at: -https://github.com/wanduow/openli/wiki/Authenticated-REST-API +https://github.com/OpenLI-NZ/openli/wiki/Authenticated-REST-API Users can authenticate by either including their API key in their HTTP requests (using the `X-API-KEY` header) or by performing standard Digest @@ -124,6 +124,28 @@ packets by setting the 'voip-ignorecomfort' option to 'yes' at the top level of your configuration. Please confirm with your LEAs that this is acceptable before doing so, of course! +### Email Intercepts +All email intercepts are specified using the emailintercepts option. +Each intercept is expressed as an item in a list and each intercept must be +configured with the following six parameters: + +* LIID -- the unique lawful intercept ID for this intercept. This will be + assigned by the agency and should be present on the warrant for the intercept. +* Authorisation country code -- the country within which the authorisation to + intercept was granted. +* Delivery country code -- the country where the intercept is taking place + (probably the same as above). +* Mediator -- the ID number of the mediator which will be forwarding the + intercept records to the requesting agency. +* Agency ID -- the agency that requested the intercept (this should match one + of the agencies specified elsewhere in this configuration file). +* Targets -- a list of email addresses that belong to the intercept target. + +OpenLI supports the interception of email transported using the SMTP, POP3 and +IMAP protocols. For each protocol that you wish to perform email interception +for, you will need to tell OpenLI the IP addresses and ports that each mail +service is being served from -- this is explained in more detail later on. + ### IP Data Intercepts All IP intercepts are specified using the ipintercepts option. As with VOIP @@ -225,6 +247,19 @@ configured using two parameters: * port -- the port that the RADIUS server is communicating on. +### Email Servers +To be able to intercept email sessions, the OpenLI collectors must be able to +recognise traffic that is sent to or from your email servers. There is a +separate configuration option for each email protocol (IMAP, POP3, SMTP), +named `imapservers`, `pop3servers` and `smtpservers` respectively. + +Each mail server in your network should be included as a list item under the +relevant configuration option. Failure to configure email servers correctly +will prevent OpenLI from performing email intercepts properly. +A mail server is configured using two parameters: +* ip -- the IP address of the mail server +* port -- the port that the mail server is listening on + ### GTP Servers For interception of mobile phone traffic, OpenLI uses GTPv2 traffic to track the state of mobile users' IP sessions. To be able to recognise the GTP traffic @@ -398,9 +433,9 @@ key-value elements: will not require a response to keep alives to maintain the handover connections. -VOIP and IPintercepts are also expressed as a YAML sequence, with a key of -`voipintercepts:` and `ipintercepts:` respectively. Each sequence item -represents a single intercept. +VOIP, Email and IPintercepts are also expressed as a YAML sequence, with a key +of `voipintercepts:`, `emailintercepts:`, and `ipintercepts:` respectively. +Each sequence item represents a single intercept. An IP intercept must contain the following key-value elements: @@ -480,6 +515,42 @@ A SIP target can be described using the following key-value elements: Authorization header will be associated with the target. +--- + +An email intercept must contain the following key-value elements: + +* `liid` -- the LIID +* `authcountrycode` -- the authorisation country code +* `deliverycountrycode` -- the delivery country code +* `mediator` -- the ID of the mediator which will forward the + intercept +* `agencyid` -- the internal identifier of the agency that + requested the intercept +* `targets` -- a list of email identities that are being used by + the target. You may specify multiple identities + for a target (e.g. if they have multiple mailboxes + that you need to monitor). + +An email target is a JSON object that contains just a single field: + +* `address` -- the email address of the target + + +All intercept types also support the following optional key-value elements: + +* `starttime` -- do not intercept any traffic observed before this + unix timestamp +* `endtime` -- do not intercept any traffic observed after this + unix timestamp +* `outputhandovers` -- If set to "all", then both IRI and CCs will be + produced by OpenLI for this intercept. + If set to "irionly", then only IRIs will be + produced by OpenLI for this intercept. + If set to "cconly", then only CCs will be produced + by OpenLI for this intercept. + The default setting is "all". + + ### SIP Target Specifics OpenLI currently supports five approaches for associating a SIP session diff --git a/doc/exampleconfigs/collector-example.yaml b/doc/exampleconfigs/collector-example.yaml index 265031fb..602a4dbe 100644 --- a/doc/exampleconfigs/collector-example.yaml +++ b/doc/exampleconfigs/collector-example.yaml @@ -33,6 +33,18 @@ forwardingthreads: 1 # validated and can be easily spoofed). sipallowfromident: no +# Set this to yes if you want OpenLI to replace any plain text or base64 +# encoded credentials (i.e. passwords) in intercepted IMAP traffic with +# the characters "XXX". Note that this masking will only be applied to +# IMAP traffic captured using an email intercept, not an IP intercept. +maskimapcreds: yes + +# Set this to yes if you want OpenLI to replace any plain text credentials +# (i.e. passwords) in intercepted POP3 traffic with +# the characters "XXX". Note that this masking will only be applied to +# POP3 traffic captured using an email intercept, not an IP intercept. +maskpop3creds: yes + # Number of minutes between each dump of performance statistics to the # logger. Set to zero to disable this extra logging altogether. logstatfrequency: 5 @@ -83,6 +95,36 @@ jmirrors: - ip: 192.168.200.77 port: 30030 +# Times (in minutes) to wait before expiring an idle email session for +# each supported email protocol. +emailsessiontimeouts: + - smtp: 5 + - imap: 30 + - pop3: 10 + +# Email ingestion service configuration -- instead of intercepting email +# by capturing traffic from an interface, OpenLI can also receive email +# application messages that are POSTed to this HTTP service. This is +# useful for mail service operators who would prefer to use custom plugins +# on their mail servers to push relevant mail content to OpenLI, rather +# than having to mirror traffic at the TCP/IP layers. +# +# More detail on the ingestion service and the expected formatting of +# the messages that this service can receive is available on the OpenLI +# wiki (https://github.com/OpenLI-NZ/openli/wiki). +# +emailingest: + listenaddress: 10.230.1.1 # the IP address for the service to listen on + listenport: 19999 # the port for the service to listen on + requiretls: yes # only allow connections via HTTPS (note + # that you will need to provide the 'tlscert' + # and 'tlskey' options elsewhere in this + # file for this to work) + authpassword: apassword # if set, POSTed messages will be rejected + # unless they are sent with digest auth and + # the provided password does not match the + # value for this option. + enabled: yes # set to "no" to disable this service # If set to true, then the collector will use RabbitMQ to buffer encoded # ETSI records to be forwarded to the mediators. The buffered packets will diff --git a/doc/exampleconfigs/mediator-example.yaml b/doc/exampleconfigs/mediator-example.yaml index 7e3b2ae6..e6999e0d 100644 --- a/doc/exampleconfigs/mediator-example.yaml +++ b/doc/exampleconfigs/mediator-example.yaml @@ -72,6 +72,14 @@ RMQpass: "rmqpassword" # not required if using SSL auth RMQSSL: true # set to false to use plain auth instead RMQheartbeatfreq: 30 # time between heartbeats in seconds +# The password to use for authenticating against the RabbitMQ virtual +# host that is handling internal message buffering for the mediator. +# Note that you should only set this if you have manually created the +# virtual host and `openli.nz` user -- packaged installs should NOT need +# to set this config option. +# +#RMQinternalpass: "theinternalpassword" + # If set to 'no', intercepted packets streamed to this mediator by the # collectors will be treated as unencrypted -- this may be desirable for # performance reasons. Make sure you set the corresponding option on the diff --git a/doc/exampleconfigs/provisioner-example.yaml b/doc/exampleconfigs/provisioner-example.yaml index 918611ab..203682c1 100644 --- a/doc/exampleconfigs/provisioner-example.yaml +++ b/doc/exampleconfigs/provisioner-example.yaml @@ -35,7 +35,7 @@ voip-ignorecomfort: no # Location of the SQLite3 database where credentials and API keys are stored # for authorised access to the REST API. If this option is not present, # then the REST API can be used without authentication. -# See https://github.com/wanduow/openli/wiki/Authenticated-REST-API for more +# See https://github.com/OpenLI-NZ/openli/wiki/Authenticated-REST-API for more # information about authentication and the REST API. # restauthdb: /var/lib/openli/provauth.db diff --git a/doc/exampleconfigs/running-intercept-example.yaml b/doc/exampleconfigs/running-intercept-example.yaml index d49aec39..696f07cd 100644 --- a/doc/exampleconfigs/running-intercept-example.yaml +++ b/doc/exampleconfigs/running-intercept-example.yaml @@ -6,7 +6,7 @@ # # You should use the OpenLI REST API to create and modify your intercept # configuration files, rather than editing by hand. -# See https://github.com/wanduow/openli/wiki/Intercept-Configuration-REST-API +# See https://github.com/OpenLI-NZ/openli/wiki/Intercept-Configuration-REST-API # for instructions on how to do this. # # @@ -32,6 +32,21 @@ gtpservers: - ip: 10.100.0.200 port: 2123 +# List of SMTP servers on our network (for performing email intercepts) +smtpservers: + - ip: 10.200.0.45 + port: 25 + +# List of IMAP servers on our network (for performing email intercepts) +imapservers: + - ip: 10.200.1.14 + port: 143 + +# List of POP3 servers on our network (for performing email intercepts) +pop3servers: + - ip: 10.200.2.100 + port: 110 + # List of default RADIUS usernames that OpenLI should ignore when tracking # user sessions from RADIUS traffic defaultradiususers: @@ -214,3 +229,15 @@ voipintercepts: # realm, this means we can match this username # against *any* realm. + +# List of active email intercepts +emailintercepts: + + - liid: N72007RT4 # LIID, should be provided by requesting agency + authcountrycode: DE # Authorisation country code + deliverycountrycode: DE # Delivery country code + mediator: 6001 # ID of the mediator to send intercept via + agencyid: "Police" # ID of agency to send intercept to + targets: + - address: abadperson@example.org # email address for the target + - address: myalias@example.org # multiple addresses are allowed diff --git a/rpm/openli.spec b/rpm/openli.spec index 57595597..0e8cb0ae 100644 --- a/rpm/openli.spec +++ b/rpm/openli.spec @@ -1,11 +1,11 @@ Name: openli -Version: 1.0.15 +Version: 1.1.0 Release: 1%{?dist} Summary: Software for performing ETSI-compliant lawful intercept License: GPLv3 -URL: https://github.com/wanduow/OpenLI -Source0: https://github.com/wanduow/OpenLI/archive/%{version}.tar.gz +URL: https://github.com/OpenLI-NZ/OpenLI +Source0: https://github.com/OpenLI-NZ/OpenLI/archive/%{version}.tar.gz BuildRequires: gcc BuildRequires: gcc-c++ @@ -27,6 +27,7 @@ BuildRequires: libmicrohttpd-devel BuildRequires: systemd BuildRequires: sqlcipher-devel BuildRequires: librabbitmq-devel +BuildRequires: libb64-devel %description Software for performing ETSI-compliant lawful intercept @@ -51,6 +52,7 @@ and mediators. %package mediator Summary: Mediation daemon for an OpenLI system Requires(pre): shadow-utils +Requires: rabbitmq-server %description mediator OpenLI is a software suite that allows network operators to conduct @@ -170,6 +172,50 @@ if [ -d /var/run/openli ]; then fi chmod 2750 /etc/openli +if /bin/systemctl is-active --quiet "rabbitmq-server"; then + echo "" +else + /bin/systemctl start rabbitmq-server +fi + +if rpm -q "rabbitmq-server" > /dev/null 2>&1; then + dep_install=$(rpm -q --queryformat '%{INSTALLTIME}\n' "rabbitmq-server") + this_install=$(rpm -q --queryformat '%{INSTALLTIME}\n' "openli-mediator") + if [ "$dep_install" -ge "$this_install" ]; then + # dependency was installed by our own package + if [ ! -f /etc/rabbitmq/rabbitmq.conf ]; then + cat > /etc/rabbitmq/rabbitmq.conf < /etc/openli/rmqinternalpass + chmod 0640 /etc/openli/rmqinternalpass + chown openli:openli /etc/openli/rmqinternalpass +fi + +/bin/systemctl restart rabbitmq-server + %preun mediator if [ $1 -eq 0 ]; then # Disable and stop the units @@ -183,6 +229,10 @@ if [ $1 -ge 1 ]; then /bin/systemctl daemon-reload >/dev/null 2>&1 || : # On upgrade, restart the daemon /bin/systemctl try-restart openli-mediator.service >/dev/null 2>&1 || : +else + rabbitmqctl delete_user "openli.nz" + rabbitmqctl delete_vhost "OpenLI-med" + rm -f /etc/openli/rmqinternalpass fi %post collector @@ -232,6 +282,9 @@ fi %changelog +* Fri May 26 2023 Shane Alcock - 1.1.0-1 +- Updated for 1.1.0 release + * Wed Jun 15 2022 Shane Alcock - 1.0.15-1 - Updated for 1.0.15 release diff --git a/rpmpkg-setup.sh b/rpmpkg-setup.sh index b3600592..5b5c8d35 100755 --- a/rpmpkg-setup.sh +++ b/rpmpkg-setup.sh @@ -26,14 +26,14 @@ yum update -y if [[ "$1" =~ rocky* ]]; then dnf install -y dnf-plugins-core epel-release || true dnf config-manager --set-enabled powertools || true - dnf config-manager --set-enabled devel || true - yum module -y enable mariadb mariadb-devel || true + dnf module disable -y mariadb || true + /usr/bin/crb enable || true fi if [[ "$1" =~ alma* ]]; then dnf install -y dnf-plugins-core epel-release || true dnf config-manager --set-enabled powertools || true - yum module -y enable mariadb mariadb-devel + /usr/bin/crb enable || true fi if [ "$1" = "centos:8" ]; then diff --git a/src/Makefile.am b/src/Makefile.am index 84bca262..ecd60253 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -58,6 +58,13 @@ openlicollector_SOURCES=collector/collector.c configparser.c configparser.h \ collector/umtsiri.h collector/umtsiri.c \ collector/radius_hasher.c collector/radius_hasher.h \ collector/timed_intercept.c collector/timed_intercept.h \ + collector/email_ingest_service.c \ + collector/email_ingest_service.h \ + collector/email_worker.c collector/email_worker.h \ + collector/emailprotocols/smtp.c \ + collector/emailprotocols/imap.c \ + collector/emailprotocols/pop3.c \ + collector/emailiri.c collector/emailiri.h collector/emailcc.c \ $(PLUGIN_SRCS) openlicollector_LDADD = @ADD_LIBS@ -L$(abs_top_srcdir)/extlib/libpatricia/.libs @@ -74,11 +81,12 @@ openlimediator_SOURCES=mediator/mediator.c mediator/mediator.h \ mediator/med_epoll.c mediator/liidmapping.c \ mediator/liidmapping.h mediator/mediator_prov.c \ mediator/med_epoll.h mediator/mediator_prov.h \ - mediator/mediator_coll.c mediator/mediator_coll.h \ - mediator/mediator_rmq.c \ + mediator/coll_recv_thread.c mediator/coll_recv_thread.h \ + mediator/lea_send_thread.c mediator/lea_send_thread.h \ + mediator/mediator_rmq.c mediator/mediator_rmq.h \ byteswap.c byteswap.h \ configparser.c configparser.h util.c util.h \ - agency.h logger.c logger.h netcomms.c \ + agency.h agency.c logger.c logger.h netcomms.c \ netcomms.h export_buffer.c intercept.c \ export_buffer.h etsili_core.h etsili_core.c \ collector/jenkinshash.c openli_tls.c openli_tls.h \ diff --git a/src/collector/collector.c b/src/collector/collector.c index 5f31d735..bc807d41 100644 --- a/src/collector/collector.c +++ b/src/collector/collector.c @@ -82,17 +82,23 @@ static void reset_collector_stats(collector_global_t *glob) { glob->stats.ipiri_created = 0; glob->stats.ipmmcc_created = 0; glob->stats.ipmmiri_created = 0; + glob->stats.emailiri_created = 0; + glob->stats.emailcc_created = 0; glob->stats.bad_sip_packets = 0; glob->stats.bad_ip_session_packets = 0; glob->stats.ipintercepts_added_diff = 0; glob->stats.voipintercepts_added_diff = 0; + glob->stats.emailintercepts_added_diff = 0; glob->stats.ipintercepts_ended_diff = 0; glob->stats.voipintercepts_ended_diff = 0; + glob->stats.emailintercepts_ended_diff = 0; glob->stats.ipsessions_added_diff = 0; glob->stats.voipsessions_added_diff = 0; + glob->stats.emailsessions_added_diff = 0; glob->stats.ipsessions_ended_diff = 0; glob->stats.voipsessions_ended_diff = 0; + glob->stats.emailsessions_ended_diff = 0; } static void log_collector_stats(collector_global_t *glob) { @@ -109,6 +115,8 @@ static void log_collector_stats(collector_global_t *glob) { glob->stats.packets_intercepted); logger(LOG_INFO, "OpenLI: Packets sent to IP sync: %lu, sent to VOIP sync: %lu", glob->stats.packets_sync_ip, glob->stats.packets_sync_voip); + logger(LOG_INFO, "OpenLI: Packets sent to Email workers: %lu", + glob->stats.packets_sync_email); logger(LOG_INFO, "OpenLI: Bad SIP packets: %lu Bad RADIUS packets: %lu", glob->stats.bad_sip_packets, glob->stats.bad_ip_session_packets); logger(LOG_INFO, "OpenLI: Records created... IPCCs: %lu IPIRIs: %lu MobIRIs: %lu", @@ -116,6 +124,8 @@ static void log_collector_stats(collector_global_t *glob) { glob->stats.mobiri_created); logger(LOG_INFO, "OpenLI: Records created... IPMMCCs: %lu IPMMIRIs: %lu", glob->stats.ipmmcc_created, glob->stats.ipmmiri_created); + logger(LOG_INFO, "OpenLI: Records created... EmailCCs: %lu EmailIRIs: %lu", + glob->stats.emailcc_created, glob->stats.emailiri_created); logger(LOG_INFO, "OpenLI: IP intercepts added: %lu (all-time: %lu)", glob->stats.ipintercepts_added_diff, @@ -131,6 +141,13 @@ static void log_collector_stats(collector_global_t *glob) { glob->stats.voipintercepts_ended_diff, glob->stats.voipintercepts_ended_total); + logger(LOG_INFO, "OpenLI: Email intercepts added: %lu (all-time: %lu)", + glob->stats.emailintercepts_added_diff, + glob->stats.emailintercepts_added_total); + logger(LOG_INFO, "OpenLI: Email intercepts ended: %lu (all-time: %lu)", + glob->stats.emailintercepts_ended_diff, + glob->stats.emailintercepts_ended_total); + logger(LOG_INFO, "OpenLI: IP sessions added: %lu (all-time: %lu)", glob->stats.ipsessions_added_diff, glob->stats.ipsessions_added_total); @@ -145,6 +162,13 @@ static void log_collector_stats(collector_global_t *glob) { glob->stats.voipsessions_ended_diff, glob->stats.voipsessions_ended_total); + logger(LOG_INFO, "OpenLI: Email sessions added: %lu (all-time: %lu)", + glob->stats.emailsessions_added_diff, + glob->stats.emailsessions_added_total); + logger(LOG_INFO, "OpenLI: Email sessions ended: %lu (all-time: %lu)", + glob->stats.emailsessions_ended_diff, + glob->stats.emailsessions_ended_total); + logger(LOG_INFO, "OpenLI: === statistics complete ==="); } @@ -210,6 +234,9 @@ static void init_collocal(colthread_local_t *loc, collector_global_t *glob, loc->radiusservers = NULL; loc->gtpservers = NULL; loc->sipservers = NULL; + loc->smtpservers = NULL; + loc->imapservers = NULL; + loc->pop3servers = NULL; loc->staticv4ranges = New_Patricia(32); loc->staticv6ranges = New_Patricia(128); loc->dynamicv6ranges = New_Patricia(128); @@ -231,6 +258,17 @@ static void init_collocal(colthread_local_t *loc, collector_global_t *glob, zmq_connect(loc->zmq_pubsocks[i], pubsockname); } + loc->email_worker_queues = calloc(glob->email_threads, sizeof(void *)); + for (i = 0; i < glob->email_threads; i++) { + char pubsockname[128]; + + snprintf(pubsockname, 128, "inproc://openliemailworker-colrecv%d", i); + loc->email_worker_queues[i] = zmq_socket(glob->zmq_ctxt, ZMQ_PUSH); + zmq_setsockopt(loc->email_worker_queues[i], ZMQ_SNDHWM, &hwm, + sizeof(hwm)); + zmq_connect(loc->email_worker_queues[i], pubsockname); + } + loc->fragreass = create_new_ipfrag_reassembler(); loc->tosyncq_ip = zmq_socket(glob->zmq_ctxt, ZMQ_PUSH); @@ -386,12 +424,19 @@ static void stop_processing_thread(libtrace_t *trace, libtrace_thread_t *t, zmq_close(loc->zmq_pubsocks[i]); } + for (i = 0; i < glob->email_threads; i++) { + zmq_setsockopt(loc->email_worker_queues[i], ZMQ_LINGER, &zero, + sizeof(zero)); + zmq_close(loc->email_worker_queues[i]); + } + zmq_setsockopt(loc->tosyncq_ip, ZMQ_LINGER, &zero, sizeof(zero)); zmq_close(loc->tosyncq_ip); zmq_setsockopt(loc->tosyncq_voip, ZMQ_LINGER, &zero, sizeof(zero)); zmq_close(loc->tosyncq_voip); free(loc->zmq_pubsocks); + free(loc->email_worker_queues); HASH_ITER(hh, loc->activeipv4intercepts, v4, tmp) { free_all_ipsessions(&(v4->intercepts)); @@ -413,6 +458,9 @@ static void stop_processing_thread(libtrace_t *trace, libtrace_thread_t *t, free_coreserver_list(loc->radiusservers); free_coreserver_list(loc->gtpservers); free_coreserver_list(loc->sipservers); + free_coreserver_list(loc->smtpservers); + free_coreserver_list(loc->imapservers); + free_coreserver_list(loc->pop3servers); destroy_ipfrag_reassembler(loc->fragreass); @@ -470,6 +518,16 @@ static inline void send_packet_to_sync(libtrace_packet_t *pkt, zmq_send(q, (void *)(&syncup), sizeof(syncup), 0); } +static void send_packet_to_emailworker(libtrace_packet_t *pkt, + void **queues, int qcount, uint32_t hashval, uint8_t pkttype) { + + int destind; + + assert(hashval != 0); + destind = (hashval - 1) % qcount; + send_packet_to_sync(pkt, queues[destind], pkttype); +} + static inline uint8_t check_for_invalid_sip(libtrace_packet_t *pkt, uint16_t fragoff) { @@ -534,10 +592,12 @@ static inline uint8_t check_for_invalid_sip(libtrace_packet_t *pkt, return 0; } -static inline int is_core_server_packet(libtrace_packet_t *pkt, +static inline uint32_t is_core_server_packet(libtrace_packet_t *pkt, packet_info_t *pinfo, coreserver_t *servers) { coreserver_t *rad, *tmp; + coreserver_t *found = NULL; + uint32_t hashval = 0; if (pinfo->srcport == 0 || pinfo->destport == 0) { return 0; @@ -568,27 +628,45 @@ static inline int is_core_server_packet(libtrace_packet_t *pkt, sa = (struct sockaddr_in *)(&(pinfo->srcip)); if (CORESERVER_MATCH_V4(rad, sa, pinfo->srcport)) { - return 1; + found = rad; + break; } sa = (struct sockaddr_in *)(&(pinfo->destip)); if (CORESERVER_MATCH_V4(rad, sa, pinfo->destport)) { - return 1; + found = rad; + break; } } else if (pinfo->family == AF_INET6) { struct sockaddr_in6 *sa6; sa6 = (struct sockaddr_in6 *)(&(pinfo->srcip)); if (CORESERVER_MATCH_V6(rad, sa6, pinfo->srcport)) { - return 1; + found = rad; + break; } sa6 = (struct sockaddr_in6 *)(&(pinfo->destip)); if (CORESERVER_MATCH_V6(rad, sa6, pinfo->destport)) { - return 1; + found = rad; + break; } } } /* Doesn't match any of our known core servers */ - return 0; + if (found == NULL) { + return 0; + } + + /* Not technically an LIID, but we just need a hashed ID for the server + * entity. + */ + hashval = hash_liid(found->serverkey); + + /* 0 is our value for "not found", so make sure we never use it... */ + if (hashval == 0) { + hashval = 1; + } + return hashval; + } static libtrace_packet_t *process_packet(libtrace_t *trace, @@ -602,8 +680,9 @@ static libtrace_packet_t *process_packet(libtrace_t *trace, uint32_t rem, iprem; uint8_t proto; int forwarded = 0, ret; - int ipsynced = 0, voipsynced = 0; + int ipsynced = 0, voipsynced = 0, emailsynced = 0; uint16_t fragoff = 0; + uint32_t servhash = 0; openli_pushed_t syncpush; packet_info_t pinfo; @@ -762,6 +841,31 @@ static libtrace_packet_t *process_packet(libtrace_t *trace, send_packet_to_sync(pkt, loc->tosyncq_voip, OPENLI_UPDATE_SIP); voipsynced = 1; } + + else if (loc->smtpservers && + (servhash = is_core_server_packet(pkt, &pinfo, + loc->smtpservers))) { + send_packet_to_emailworker(pkt, loc->email_worker_queues, + glob->email_threads, servhash, OPENLI_UPDATE_SMTP); + emailsynced = 1; + + } + + else if (loc->imapservers && + (servhash = is_core_server_packet(pkt, &pinfo, + loc->imapservers))) { + send_packet_to_emailworker(pkt, loc->email_worker_queues, + glob->email_threads, servhash, OPENLI_UPDATE_IMAP); + emailsynced = 1; + } + + else if (loc->pop3servers && + (servhash = is_core_server_packet(pkt, &pinfo, + loc->pop3servers))) { + send_packet_to_emailworker(pkt, loc->email_worker_queues, + glob->email_threads, servhash, OPENLI_UPDATE_POP3); + emailsynced = 1; + } } @@ -808,6 +912,12 @@ static libtrace_packet_t *process_packet(libtrace_t *trace, } processdone: + if (emailsynced) { + pthread_mutex_lock(&(glob->stats_mutex)); + glob->stats.packets_sync_email ++; + pthread_mutex_unlock(&(glob->stats_mutex)); + } + if (ipsynced) { pthread_mutex_lock(&(glob->stats_mutex)); glob->stats.packets_sync_ip ++; @@ -1070,12 +1180,12 @@ static void destroy_collector_state(collector_global_t *glob) { free_sync_thread_data(&(glob->syncip)); free_sync_thread_data(&(glob->syncvoip)); - libtrace_message_queue_destroy(&(glob->intersyncq)); - - if (glob->zmq_forwarder_ctrl) { - zmq_close(glob->zmq_forwarder_ctrl); + if (glob->emailworkers) { + free(glob->emailworkers); } + libtrace_message_queue_destroy(&(glob->intersyncq)); + if (glob->zmq_encoder_ctrl) { zmq_close(glob->zmq_encoder_ctrl); } @@ -1119,6 +1229,7 @@ static void destroy_collector_state(collector_global_t *glob) { } pthread_mutex_destroy(&(glob->stats_mutex)); + pthread_rwlock_destroy(&(glob->email_config_mutex)); pthread_rwlock_destroy(&glob->config_mutex); free(glob); } @@ -1158,6 +1269,9 @@ static void clear_global_config(collector_global_t *glob) { if (glob->RMQ_conf.pass) { free(glob->RMQ_conf.pass); } + if (glob->RMQ_conf.internalpass) { + free(glob->RMQ_conf.internalpass); + } if (glob->RMQ_conf.hostname) { free(glob->RMQ_conf.hostname); } @@ -1170,6 +1284,19 @@ static void clear_global_config(collector_global_t *glob) { if (glob->jmirrors) { free_coreserver_list(glob->jmirrors); } + + if (glob->emailconf.listenaddr) { + free(glob->emailconf.listenaddr); + } + if (glob->emailconf.listenport) { + free(glob->emailconf.listenport); + } + if (glob->emailconf.authpassword) { + free(glob->emailconf.authpassword); + } + if (glob->email_ingestor) { + free(glob->email_ingestor); + } } static inline void push_hello_message(void *atob, @@ -1246,13 +1373,6 @@ static int prepare_collector_glob(collector_global_t *glob) { glob->syncgenericfreelist = create_etsili_generic_freelist(1); - glob->zmq_forwarder_ctrl = zmq_socket(glob->zmq_ctxt, ZMQ_PUB); - if (zmq_connect(glob->zmq_forwarder_ctrl, - "inproc://openliforwardercontrol") != 0) { - logger(LOG_INFO, "OpenLI: unable to connect to zmq control socket for forwarding threads. Exiting."); - return -1; - } - glob->zmq_encoder_ctrl = zmq_socket(glob->zmq_ctxt, ZMQ_PUB); if (zmq_bind(glob->zmq_encoder_ctrl, "inproc://openliencodercontrol") != 0) { @@ -1260,6 +1380,12 @@ static int prepare_collector_glob(collector_global_t *glob) { return -1; } + if (glob->email_ingestor) { + glob->email_ingestor->email_worker_count = glob->email_threads; + glob->email_ingestor->zmq_publishers = NULL; + glob->email_ingestor->zmq_ctxt = glob->zmq_ctxt; + } + return 0; } @@ -1269,6 +1395,7 @@ static void init_collector_global(collector_global_t *glob) { glob->seqtracker_threads = 1; glob->forwarding_threads = 1; glob->encoding_threads = 2; + glob->email_threads = 1; glob->sharedinfo.intpointid = NULL; glob->sharedinfo.intpointid_len = 0; glob->sharedinfo.operatorid = NULL; @@ -1296,11 +1423,20 @@ static void init_collector_global(collector_global_t *glob) { glob->RMQ_conf.name = NULL; glob->RMQ_conf.pass = NULL; + glob->RMQ_conf.internalpass = NULL; glob->RMQ_conf.hostname = NULL; glob->RMQ_conf.port = 0; glob->RMQ_conf.heartbeatFreq = 0; glob->RMQ_conf.enabled = 0; + glob->emailconf.enabled = 255; + glob->emailconf.authrequired = 0; + glob->emailconf.tlsrequired = 0; + glob->emailconf.maxclients = 20; + glob->emailconf.listenport = NULL; + glob->emailconf.listenaddr = NULL; + glob->emailconf.authpassword = NULL; + glob->etsitls = 1; glob->ignore_sdpo_matches = 0; glob->encoding_method = OPENLI_ENCODING_DER; @@ -1309,6 +1445,17 @@ static void init_collector_global(collector_global_t *glob) { glob->stat_frequency = 0; glob->ticks_since_last_stat = 0; + glob->emailsockfd = -1; + glob->email_ingestor = NULL; + + /* TODO add config options to change these values + * also make sure changes are actions post config-reload */ + glob->email_timeouts.smtp = 5; + glob->email_timeouts.pop3 = 10; + glob->email_timeouts.imap = 30; + glob->mask_imap_creds = 1; // defaults to "enabled" + glob->mask_pop3_creds = 1; // defaults to "enabled" + } static collector_global_t *parse_global_config(char *configfile) { @@ -1320,6 +1467,7 @@ static collector_global_t *parse_global_config(char *configfile) { glob->configfile = configfile; pthread_mutex_init(&(glob->stats_mutex), NULL); + pthread_rwlock_init(&(glob->email_config_mutex), NULL); libtrace_message_queue_init(&glob->intersyncq, sizeof(openli_intersync_msg_t)); @@ -1331,6 +1479,36 @@ static collector_global_t *parse_global_config(char *configfile) { return NULL; } + /* Disable by default, unless the user has configured EITHER: + * a) set the enabled flag to true (obviously) + * b) provided a listen address or port for the ingestion service + */ + if (glob->emailconf.listenaddr || glob->emailconf.listenport) { + if (glob->emailconf.enabled == 255) { + glob->emailconf.enabled = 1; + } + } else { + if (glob->emailconf.enabled == 255) { + glob->emailconf.enabled = 0; + } + } + + if (glob->emailconf.enabled) { + glob->email_ingestor = calloc(1, sizeof(email_ingestor_state_t)); + if (glob->emailconf.listenaddr == NULL) { + glob->emailconf.listenaddr = strdup("0.0.0.0"); + } + if (glob->emailconf.listenport == NULL) { + glob->emailconf.listenport = strdup("19999"); + } + logger(LOG_INFO, "OpenLI: starting email ingestor service on %s:%s -- auth %s, TLS %s", + glob->emailconf.listenaddr, + glob->emailconf.listenport, + glob->emailconf.authrequired ? "required": "disabled", + glob->emailconf.tlsrequired ? "required": "disabled"); + + } + logger(LOG_DEBUG, "OpenLI: Encoding Method: %s", glob->encoding_method == OPENLI_ENCODING_BER ? "BER" : "DER"); @@ -1341,6 +1519,18 @@ static collector_global_t *parse_global_config(char *configfile) { logger(LOG_INFO, "Allowing SIP From: URIs to be used for target identification"); } + if (glob->mask_imap_creds) { + logger(LOG_INFO, "Email interception: rewriting IMAP auth credentials to avoid leaking passwords to agencies"); + } + + if (glob->mask_pop3_creds) { + logger(LOG_INFO, "Email interception: rewriting POP3 plain text passwords to avoid leaking passwords to agencies"); + } + + logger(LOG_DEBUG, "OpenLI: session idle timeout for SMTP sessions: %u minutes", glob->email_timeouts.smtp); + logger(LOG_DEBUG, "OpenLI: session idle timeout for IMAP sessions: %u minutes", glob->email_timeouts.imap); + logger(LOG_DEBUG, "OpenLI: session idle timeout for POP3 sessions: %u minutes", glob->email_timeouts.pop3); + if (create_ssl_context(&(glob->sslconf)) < 0) { return NULL; } @@ -1462,6 +1652,34 @@ static int reload_collector_config(collector_global_t *glob, pthread_rwlock_unlock(&(glob->config_mutex)); + pthread_rwlock_wrlock(&(glob->email_config_mutex)); + if (glob->mask_imap_creds != newstate.mask_imap_creds) { + if (newstate.mask_imap_creds) { + logger(LOG_INFO, "OpenLI: Email interception: rewriting IMAP auth credentials to avoid leaking passwords to agencies"); + } else { + logger(LOG_INFO, "OpenLI: Email interception: no longer rewriting IMAP auth credentials to avoid leaking passwords to agencies"); + } + } + + if (glob->mask_pop3_creds != newstate.mask_pop3_creds) { + if (newstate.mask_pop3_creds) { + logger(LOG_INFO, "OpenLI: Email interception: rewriting POP3 plain text passwords to avoid leaking passwords to agencies"); + } else { + logger(LOG_INFO, "OpenLI: Email interception: no longer rewriting POP3 plain text passwords to avoid leaking passwords to agencies"); + } + } + + glob->mask_imap_creds = newstate.mask_imap_creds; + glob->mask_pop3_creds = newstate.mask_pop3_creds; + glob->email_timeouts.smtp = newstate.email_timeouts.smtp; + glob->email_timeouts.imap = newstate.email_timeouts.imap; + glob->email_timeouts.pop3 = newstate.email_timeouts.pop3; + + logger(LOG_DEBUG, "OpenLI: session idle timeout for SMTP sessions is now %u minutes", glob->email_timeouts.smtp); + logger(LOG_DEBUG, "OpenLI: session idle timeout for IMAP sessions is now %u minutes", glob->email_timeouts.imap); + logger(LOG_DEBUG, "OpenLI: session idle timeout for POP3 sessions is now %u minutes", glob->email_timeouts.pop3); + pthread_rwlock_unlock(&(glob->email_config_mutex)); + endreload: clear_global_config(&newstate); return ret; @@ -1703,6 +1921,41 @@ int main(int argc, char *argv[]) { pthread_setname_np(glob->forwarders[i].threadid, name); } + glob->emailworkers = calloc(glob->email_threads, + sizeof(openli_email_worker_t)); + + for (i = 0; i < glob->email_threads; i++) { + snprintf(name, 1024, "emailworker-%d", i); + + glob->emailworkers[i].zmq_ctxt = glob->zmq_ctxt; + glob->emailworkers[i].topoll = NULL; + glob->emailworkers[i].topoll_size = 0; + glob->emailworkers[i].emailid = i; + glob->emailworkers[i].tracker_threads = glob->seqtracker_threads; + glob->emailworkers[i].fwd_threads = glob->forwarding_threads; + glob->emailworkers[i].zmq_pubsocks = NULL; + glob->emailworkers[i].zmq_fwdsocks = NULL; + glob->emailworkers[i].zmq_ingest_recvsock = NULL; + glob->emailworkers[i].zmq_colthread_recvsock = NULL; + glob->emailworkers[i].zmq_ii_sock = NULL; + + glob->emailworkers[i].timeouts = NULL; + glob->emailworkers[i].allintercepts = NULL; + glob->emailworkers[i].alltargets = NULL; + glob->emailworkers[i].activesessions = NULL; + glob->emailworkers[i].stats_mutex = &(glob->stats_mutex); + glob->emailworkers[i].stats = &(glob->stats); + + glob->emailworkers[i].glob_config_mutex = &(glob->email_config_mutex); + glob->emailworkers[i].mask_imap_creds = &(glob->mask_imap_creds); + glob->emailworkers[i].mask_pop3_creds = &(glob->mask_pop3_creds); + glob->emailworkers[i].timeout_thresholds = &(glob->email_timeouts); + + pthread_create(&(glob->emailworkers[i].threadid), NULL, + start_email_worker_thread, (void *)&(glob->emailworkers[i])); + pthread_setname_np(glob->emailworkers[i].threadid, name); + } + glob->seqtrackers = calloc(glob->seqtracker_threads, sizeof(seqtracker_thread_data_t)); @@ -1744,6 +1997,19 @@ int main(int argc, char *argv[]) { pthread_setname_np(glob->encoders[i].threadid, name); } + /* Start email ingesting daemon, if required */ + if (glob->emailconf.enabled) { + glob->emailsockfd = create_listener(glob->emailconf.listenaddr, + glob->emailconf.listenport, "email ingestor socket"); + if (glob->emailsockfd == -1) { + logger(LOG_INFO, "OpenLI: WARNING unable to create listening socket for email ingestion service"); + } else if (start_email_mhd_daemon(&(glob->emailconf), + glob->emailsockfd, glob->email_ingestor, &glob->sslconf) + == NULL) { + logger(LOG_INFO, "OpenLI: WARNING unable to start email ingestion service"); + } + } + /* Start IP intercept sync thread */ ret = pthread_create(&(glob->syncip.threadid), NULL, start_ip_sync_thread, (void *)glob); @@ -1830,6 +2096,10 @@ int main(int argc, char *argv[]) { } } + if (glob->email_ingestor) { + stop_email_mhd_daemon(glob->email_ingestor); + } + pthread_join(glob->syncip.threadid, NULL); pthread_join(glob->syncvoip.threadid, NULL); for (i = 0; i < glob->seqtracker_threads; i++) { @@ -1842,6 +2112,9 @@ int main(int argc, char *argv[]) { for (i = 0; i < glob->forwarding_threads; i++) { pthread_join(glob->forwarders[i].threadid, NULL); } + for (i = 0; i < glob->email_threads; i++) { + pthread_join(glob->emailworkers[i].threadid, NULL); + } logger(LOG_INFO, "OpenLI: exiting OpenLI Collector."); /* Tidy up, exit */ diff --git a/src/collector/collector.h b/src/collector/collector.h index 13b3095a..b1752df7 100644 --- a/src/collector/collector.h +++ b/src/collector/collector.h @@ -50,6 +50,8 @@ #include "collector_base.h" #include "openli_tls.h" #include "radius_hasher.h" +#include "email_ingest_service.h" +#include "email_worker.h" enum { OPENLI_PUSH_IPINTERCEPT = 1, @@ -75,6 +77,9 @@ enum { OPENLI_UPDATE_DHCP = 2, OPENLI_UPDATE_SIP = 3, OPENLI_UPDATE_GTP = 4, + OPENLI_UPDATE_SMTP = 5, + OPENLI_UPDATE_IMAP = 6, + OPENLI_UPDATE_POP3 = 7, }; typedef struct openli_intersync_msg { @@ -158,15 +163,6 @@ typedef struct export_queue_set { } export_queue_set_t; - -typedef struct sync_epoll { - uint8_t fdtype; - int fd; - void *ptr; - libtrace_thread_t *parent; - UT_hash_handle hh; -} sync_epoll_t; - typedef struct sync_sendq { libtrace_message_queue_t *q; libtrace_thread_t *parent; @@ -203,6 +199,8 @@ typedef struct colthread_local { thread */ libtrace_message_queue_t fromsyncq_voip; + void **email_worker_queues; + /* Current intercepts */ ipv4_target_t *activeipv4intercepts; @@ -231,6 +229,21 @@ typedef struct colthread_local { */ coreserver_t *gtpservers; + /* Known SMTP servers, i.e. if we see traffic to or from these + * servers, we assume it is SMTP. + */ + coreserver_t *smtpservers; + + /* Known IMAP servers, i.e. if we see traffic to or from these + * servers, we assume it is IMAP. + */ + coreserver_t *imapservers; + + /* Known POP3 servers, i.e. if we see traffic to or from these + * servers, we assume it is POP3. + */ + coreserver_t *pop3servers; + patricia_tree_t *staticv4ranges; patricia_tree_t *staticv6ranges; patricia_tree_t *dynamicv6ranges; @@ -252,8 +265,8 @@ typedef struct collector_global { int seqtracker_threads; int encoding_threads; int forwarding_threads; + int email_threads; - void *zmq_forwarder_ctrl; void *zmq_encoder_ctrl; pthread_rwlock_t config_mutex; @@ -267,6 +280,7 @@ typedef struct collector_global { seqtracker_thread_data_t *seqtrackers; openli_encoder_t *encoders; forwarding_thread_data_t *forwarders; + openli_email_worker_t *emailworkers; colthread_local_t **collocals; int nextloc; @@ -294,7 +308,16 @@ typedef struct collector_global { uint8_t encoding_method; openli_ssl_config_t sslconf; - openli_RMQ_config_t RMQ_conf; + openli_RMQ_config_t RMQ_conf; + openli_email_ingest_config_t emailconf; + + pthread_rwlock_t email_config_mutex; + openli_email_timeouts_t email_timeouts; + uint8_t mask_imap_creds; + uint8_t mask_pop3_creds; + + int emailsockfd; + email_ingestor_state_t *email_ingestor; } collector_global_t; diff --git a/src/collector/collector_base.h b/src/collector/collector_base.h index 6df3639a..39d65fab 100644 --- a/src/collector/collector_base.h +++ b/src/collector/collector_base.h @@ -34,6 +34,8 @@ #include #include #include +#include +#include #include "export_shared.h" #include "etsili_core.h" @@ -43,6 +45,14 @@ #define MAX_ENCODED_RESULT_BATCH 50 +typedef struct sync_epoll { + uint8_t fdtype; + int fd; + void *ptr; + libtrace_thread_t *parent; + UT_hash_handle hh; +} sync_epoll_t; + typedef struct export_dest { int failmsg; int fd; @@ -71,11 +81,14 @@ typedef struct collector_stats { uint64_t packets_intercepted; uint64_t packets_sync_ip; uint64_t packets_sync_voip; + uint64_t packets_sync_email; uint64_t ipcc_created; uint64_t ipiri_created; uint64_t mobiri_created; uint64_t ipmmcc_created; uint64_t ipmmiri_created; + uint64_t emailcc_created; + uint64_t emailiri_created; uint64_t bad_sip_packets; uint64_t bad_ip_session_packets; @@ -83,19 +96,27 @@ typedef struct collector_stats { uint64_t ipintercepts_added_total; uint64_t voipintercepts_added_diff; uint64_t voipintercepts_added_total; + uint64_t emailintercepts_added_diff; + uint64_t emailintercepts_added_total; uint64_t ipintercepts_ended_diff; uint64_t ipintercepts_ended_total; uint64_t voipintercepts_ended_diff; uint64_t voipintercepts_ended_total; + uint64_t emailintercepts_ended_diff; + uint64_t emailintercepts_ended_total; uint64_t ipsessions_added_diff; uint64_t ipsessions_added_total; uint64_t voipsessions_added_diff; uint64_t voipsessions_added_total; + uint64_t emailsessions_added_diff; + uint64_t emailsessions_added_total; uint64_t ipsessions_ended_diff; uint64_t ipsessions_ended_total; uint64_t voipsessions_ended_diff; uint64_t voipsessions_ended_total; + uint64_t emailsessions_ended_diff; + uint64_t emailsessions_ended_total; } collector_stats_t; diff --git a/src/collector/collector_forwarder.c b/src/collector/collector_forwarder.c index 1e358c46..9bb5a426 100644 --- a/src/collector/collector_forwarder.c +++ b/src/collector/collector_forwarder.c @@ -315,7 +315,7 @@ static int handle_ctrl_message(forwarding_thread_data_t *fwd, return 0; } - if (msg->type == OPENLI_EXPORT_INTERCEPT_DETAILS) { + if (msg->type == OPENLI_EXPORT_INTERCEPT_OVER) { remove_reorderers(fwd, msg->data.cept.liid, &(fwd->intreorderer_cc)); remove_reorderers(fwd, msg->data.cept.liid, &(fwd->intreorderer_iri)); @@ -363,7 +363,8 @@ static inline int enqueue_result(forwarding_thread_data_t *fwd, if (res->origreq->type == OPENLI_EXPORT_IPCC || res->origreq->type == OPENLI_EXPORT_IPMMCC || - res->origreq->type == OPENLI_EXPORT_UMTSCC) { + res->origreq->type == OPENLI_EXPORT_UMTSCC || + res->origreq->type == OPENLI_EXPORT_EMAILCC ) { reorderer = &(fwd->intreorderer_cc); } else { @@ -1142,18 +1143,8 @@ void *start_forwarding_thread(void *data) { break; } - if (res.msgbody) { - free(res.msgbody->encoded); - free(res.msgbody); - } - - if (res.liid) { - free(res.liid); - } + free_encoded_result(&res); - if (res.ipcontents) { - free(res.ipcontents); - } } while (x > 0); haltforwarder: diff --git a/src/collector/collector_publish.c b/src/collector/collector_publish.c index 6ebb3bba..10639a97 100644 --- a/src/collector/collector_publish.c +++ b/src/collector/collector_publish.c @@ -34,6 +34,7 @@ #include "logger.h" #include "util.h" #include "collector_publish.h" +#include "emailiri.h" int publish_openli_msg(void *pubsock, openli_export_recv_t *msg) { @@ -62,6 +63,19 @@ void free_published_message(openli_export_recv_t *msg) { if (msg->data.ipcc.ipcontent) { free(msg->data.ipcc.ipcontent); } + } else if (msg->type == OPENLI_EXPORT_EMAILCC) { + if (msg->data.emailcc.liid) { + free(msg->data.emailcc.liid); + } + if (msg->data.emailcc.cc_content) { + free(msg->data.emailcc.cc_content); + } + } else if (msg->type == OPENLI_EXPORT_EMAILIRI) { + if (msg->data.emailiri.liid) { + free(msg->data.emailiri.liid); + } + free_email_iri_content(&(msg->data.emailiri.content)); + } else if (msg->type == OPENLI_EXPORT_IPMMIRI) { if (msg->data.ipmmiri.liid) { free(msg->data.ipmmiri.liid); diff --git a/src/collector/collector_publish.h b/src/collector/collector_publish.h index b0ce26b5..3500c72a 100644 --- a/src/collector/collector_publish.h +++ b/src/collector/collector_publish.h @@ -55,6 +55,9 @@ enum { OPENLI_EXPORT_UMTSIRI = 17, OPENLI_EXPORT_RAW_SYNC = 18, OPENLI_EXPORT_INTERCEPT_CHANGED = 19, + OPENLI_EXPORT_PROVISIONER_MESSAGE = 20, + OPENLI_EXPORT_EMAILCC = 21, + OPENLI_EXPORT_EMAILIRI = 22, }; /* This structure is also used for IPMMCCs since they require the same @@ -70,6 +73,23 @@ typedef struct openli_ipcc_job { uint8_t dir; } PACKED openli_ipcc_job_t; +typedef struct openli_emailiri_job { + char *liid; + uint32_t cin; + etsili_iri_type_t iritype; + etsili_email_iri_content_t content; + etsili_generic_t *customparams; +} openli_emailiri_job_t; + +typedef struct openli_emailcc_job { + char *liid; + uint32_t cin; + uint8_t format; + uint8_t dir; + uint8_t *cc_content; + int cc_content_len; +} openli_emailcc_job_t; + typedef struct openli_ipmmiri_job { char *liid; uint32_t cin; @@ -135,6 +155,12 @@ typedef struct published_intercept_msg { int seqtrackerid; } published_intercept_msg_t; +typedef struct provisioner_msg { + uint8_t msgtype; + uint8_t *msgbody; + uint16_t msglen; +} provisioner_msg_t; + typedef struct openli_export_recv openli_export_recv_t; struct openli_export_recv { @@ -145,11 +171,14 @@ struct openli_export_recv { openli_mediator_t med; libtrace_packet_t *packet; published_intercept_msg_t cept; + provisioner_msg_t provmsg; openli_ipcc_job_t ipcc; openli_ipmmiri_job_t ipmmiri; openli_ipiri_job_t ipiri; openli_mobiri_job_t mobiri; openli_rawip_job_t rawip; + openli_emailiri_job_t emailiri; + openli_emailcc_job_t emailcc; } data; }; diff --git a/src/collector/collector_push_messaging.c b/src/collector/collector_push_messaging.c index a4794a9a..8d5b35fb 100644 --- a/src/collector/collector_push_messaging.c +++ b/src/collector/collector_push_messaging.c @@ -297,6 +297,7 @@ static int update_ipv4_intercept(colthread_local_t *loc, ipsession_t *toup) { found->common.tostart_time = toup->common.tostart_time; found->common.toend_time = toup->common.toend_time; + found->common.tomediate = toup->common.tomediate; return 1; } @@ -378,6 +379,7 @@ static int update_ipv6_intercept(colthread_local_t *loc, ipsession_t *toup) { found->common.tostart_time = toup->common.tostart_time; found->common.toend_time = toup->common.toend_time; + found->common.tomediate = toup->common.tomediate; return 1; } @@ -566,6 +568,15 @@ void handle_push_coreserver(libtrace_thread_t *t, colthread_local_t *loc, case OPENLI_CORE_SERVER_SIP: servlist = &(loc->sipservers); break; + case OPENLI_CORE_SERVER_SMTP: + servlist = &(loc->smtpservers); + break; + case OPENLI_CORE_SERVER_IMAP: + servlist = &(loc->imapservers); + break; + case OPENLI_CORE_SERVER_POP3: + servlist = &(loc->pop3servers); + break; case OPENLI_CORE_SERVER_GTP: servlist = &(loc->gtpservers); break; @@ -601,6 +612,15 @@ void handle_remove_coreserver(libtrace_thread_t *t, colthread_local_t *loc, case OPENLI_CORE_SERVER_SIP: servlist = &(loc->sipservers); break; + case OPENLI_CORE_SERVER_SMTP: + servlist = &(loc->smtpservers); + break; + case OPENLI_CORE_SERVER_IMAP: + servlist = &(loc->imapservers); + break; + case OPENLI_CORE_SERVER_POP3: + servlist = &(loc->pop3servers); + break; case OPENLI_CORE_SERVER_GTP: servlist = &(loc->gtpservers); break; @@ -799,6 +819,7 @@ void handle_change_voip_intercept(libtrace_thread_t *t, colthread_local_t *loc, rtp->common.tostart_time = tochange->common.tostart_time; rtp->common.toend_time = tochange->common.toend_time; + rtp->common.tomediate = tochange->common.tomediate; free_single_rtpstream(tochange); } @@ -839,6 +860,7 @@ void handle_change_vendmirror_intercept(libtrace_thread_t *t, found->common.tostart_time = vend->common.tostart_time; found->common.toend_time = vend->common.toend_time; + found->common.tomediate = vend->common.tomediate; free_single_vendmirror_intercept(vend); } @@ -853,6 +875,7 @@ void handle_change_iprange_intercept(libtrace_thread_t *t, if (sessrec) { sessrec->common.tostart_time = ipr->common.tostart_time; sessrec->common.toend_time = ipr->common.toend_time; + sessrec->common.tomediate = ipr->common.tomediate; tmp = sessrec->common.authcc; sessrec->common.authcc = ipr->common.authcc; diff --git a/src/collector/collector_seqtracker.c b/src/collector/collector_seqtracker.c index 9a316441..4868f7f6 100644 --- a/src/collector/collector_seqtracker.c +++ b/src/collector/collector_seqtracker.c @@ -73,6 +73,10 @@ static inline char *extract_liid_from_job(openli_export_recv_t *recvd) { return recvd->data.mobiri.liid; case OPENLI_EXPORT_RAW_SYNC: return recvd->data.rawip.liid; + case OPENLI_EXPORT_EMAILIRI: + return recvd->data.emailiri.liid; + case OPENLI_EXPORT_EMAILCC: + return recvd->data.emailcc.liid; } return NULL; } @@ -92,6 +96,10 @@ static inline uint32_t extract_cin_from_job(openli_export_recv_t *recvd) { return recvd->data.mobiri.cin; case OPENLI_EXPORT_RAW_SYNC: return recvd->data.rawip.cin; + case OPENLI_EXPORT_EMAILIRI: + return recvd->data.emailiri.cin; + case OPENLI_EXPORT_EMAILCC: + return recvd->data.emailcc.cin; } logger(LOG_INFO, "OpenLI: invalid message type in extract_cin_from_job: %u", @@ -352,9 +360,11 @@ static int run_encoding_job(seqtracker_thread_data_t *seqdata, if (recvd->type == OPENLI_EXPORT_IPMMCC || recvd->type == OPENLI_EXPORT_IPCC || - recvd->type == OPENLI_EXPORT_UMTSCC) { + recvd->type == OPENLI_EXPORT_UMTSCC || + recvd->type == OPENLI_EXPORT_EMAILCC) { job.seqno = cinseq->cc_seqno; cinseq->cc_seqno ++; + } else { job.seqno = cinseq->iri_seqno; cinseq->iri_seqno ++; @@ -430,6 +440,8 @@ static void seqtracker_main(seqtracker_thread_data_t *seqdata) { case OPENLI_EXPORT_IPIRI: case OPENLI_EXPORT_UMTSCC: case OPENLI_EXPORT_UMTSIRI: + case OPENLI_EXPORT_EMAILIRI: + case OPENLI_EXPORT_EMAILCC: case OPENLI_EXPORT_RAW_SYNC: run_encoding_job(seqdata, job); sincepurge ++; diff --git a/src/collector/collector_sync.c b/src/collector/collector_sync.c index 74f8bc00..114c2b2b 100644 --- a/src/collector/collector_sync.c +++ b/src/collector/collector_sync.c @@ -89,9 +89,11 @@ collector_sync_t *init_sync_data(collector_global_t *glob) { sync->pubsockcount = glob->seqtracker_threads; sync->forwardcount = glob->forwarding_threads; + sync->emailcount = glob->email_threads; sync->zmq_pubsocks = calloc(sync->pubsockcount, sizeof(void *)); sync->zmq_fwdctrlsocks = calloc(sync->forwardcount, sizeof(void *)); + sync->zmq_emailsocks = calloc(sync->emailcount, sizeof(void *)); sync->ctx = glob->sslconf.ctx; sync->ssl = NULL; @@ -115,6 +117,17 @@ collector_sync_t *init_sync_data(collector_global_t *glob) { } } + for (i = 0; i < sync->emailcount; i++) { + sync->zmq_emailsocks[i] = zmq_socket(glob->zmq_ctxt, ZMQ_PUSH); + snprintf(sockname, 128, "inproc://openliemailcontrol_sync-%d", i); + if (zmq_connect(sync->zmq_emailsocks[i], sockname) != 0) { + logger(LOG_INFO, "OpenLI: colsync thread unable to connect to zmq control socket for email threads: %s", + strerror(errno)); + zmq_close(sync->zmq_emailsocks[i]); + sync->zmq_emailsocks[i] = NULL; + } + } + for (i = 0; i < sync->pubsockcount; i++) { sync->zmq_pubsocks[i] = zmq_socket(glob->zmq_ctxt, ZMQ_PUSH); snprintf(sockname, 128, "inproc://openlipub-%d", i); @@ -133,12 +146,35 @@ collector_sync_t *init_sync_data(collector_global_t *glob) { } +static int send_halt_message_over_zmq(void *zmqsock) { + openli_export_recv_t *haltmsg; + int zero = 0, ret; + + if (zmqsock == NULL) { + return 1; + } + + /* Send a halt message to get the tracker thread to stop */ + haltmsg = (openli_export_recv_t *)calloc(1, sizeof(openli_export_recv_t)); + haltmsg->type = OPENLI_EXPORT_HALT; + ret = zmq_send(zmqsock, &haltmsg, sizeof(haltmsg), ZMQ_NOBLOCK); + if (ret < 0 && errno == EAGAIN) { + free(haltmsg); + return 0; + } else if (ret <= 0) { + free(haltmsg); + } + + zmq_setsockopt(zmqsock, ZMQ_LINGER, &zero, sizeof(zero)); + zmq_close(zmqsock); + return 1; +} + void clean_sync_data(collector_sync_t *sync) { - int i = 0, zero=0, ret; + int i = 0, zero=0; int haltattempts = 0, haltfails = 0; ip_to_session_t *iter, *tmp; - openli_export_recv_t *haltmsg; default_radius_user_t *raditer, *radtmp; if (sync->instruct_fd != -1) { @@ -231,58 +267,46 @@ void clean_sync_data(collector_sync_t *sync) { } for (i = 0; i < sync->pubsockcount; i++) { - if (sync->zmq_pubsocks[i] == NULL) { - continue; - } + int r; - /* Send a halt message to get the tracker thread to stop */ - haltmsg = (openli_export_recv_t *)calloc(1, - sizeof(openli_export_recv_t)); - haltmsg->type = OPENLI_EXPORT_HALT; - ret = zmq_send(sync->zmq_pubsocks[i], &haltmsg, sizeof(haltmsg), - ZMQ_NOBLOCK); - if (ret < 0 && errno == EAGAIN) { + r = send_halt_message_over_zmq(sync->zmq_pubsocks[i]); + if (r == 0) { haltfails ++; - free(haltmsg); if (haltattempts < 9) { continue; } - } else if (ret <= 0) { - free(haltmsg); } - - zmq_setsockopt(sync->zmq_pubsocks[i], ZMQ_LINGER, &zero, - sizeof(zero)); - zmq_close(sync->zmq_pubsocks[i]); sync->zmq_pubsocks[i] = NULL; } for (i = 0; i < sync->forwardcount; i++) { - if (sync->zmq_fwdctrlsocks[i] == NULL) { - continue; - } + int r; - /* Send a halt message to get the forwarder thread to stop */ - haltmsg = (openli_export_recv_t *)calloc(1, - sizeof(openli_export_recv_t)); - haltmsg->type = OPENLI_EXPORT_HALT; - ret = zmq_send(sync->zmq_fwdctrlsocks[i], &haltmsg, sizeof(haltmsg), - ZMQ_NOBLOCK); - if (ret < 0 && errno == EAGAIN) { + r = send_halt_message_over_zmq(sync->zmq_fwdctrlsocks[i]); + if (r == 0) { haltfails ++; - free(haltmsg); if (haltattempts < 9) { + i--; continue; } - } else if (ret <= 0) { - free(haltmsg); } - zmq_setsockopt(sync->zmq_fwdctrlsocks[i], ZMQ_LINGER, &zero, - sizeof(zero)); - zmq_close(sync->zmq_fwdctrlsocks[i]); sync->zmq_fwdctrlsocks[i] = NULL; } + for (i = 0; i < sync->emailcount; i++) { + int r; + + r = send_halt_message_over_zmq(sync->zmq_emailsocks[i]); + if (r == 0) { + haltfails ++; + if (haltattempts < 9) { + i--; + continue; + } + } + sync->zmq_emailsocks[i] = NULL; + } + if (haltfails == 0) { break; } @@ -290,11 +314,44 @@ void clean_sync_data(collector_sync_t *sync) { usleep(250000); } + free(sync->zmq_emailsocks); free(sync->zmq_pubsocks); free(sync->zmq_fwdctrlsocks); } +static int forward_provmsg_to_email_workers(collector_sync_t *sync, + uint8_t *provmsg, uint16_t msglen, openli_proto_msgtype_t msgtype) { + + openli_export_recv_t *topush; + int i, ret, errcount = 0; + + for (i = 0; i < sync->emailcount; i++) { + topush = (openli_export_recv_t *)calloc(1, + sizeof(openli_export_recv_t)); + + topush->type = OPENLI_EXPORT_PROVISIONER_MESSAGE; + topush->data.provmsg.msgtype = msgtype; + topush->data.provmsg.msgbody = (uint8_t *)malloc(msglen); + memcpy(topush->data.provmsg.msgbody, provmsg, msglen); + topush->data.provmsg.msglen = msglen; + + ret = zmq_send(sync->zmq_emailsocks[i], &topush, sizeof(topush), 0); + if (ret < 0) { + logger(LOG_INFO, "Unable to forward provisioner message to email worker %d: %s", i, strerror(errno)); + + free(topush->data.provmsg.msgbody); + free(topush); + + errcount ++; + continue; + } + } + + return 1; + +} + static int forward_provmsg_to_voipsync(collector_sync_t *sync, uint8_t *provmsg, uint16_t msglen, openli_proto_msgtype_t msgtype) { @@ -381,6 +438,10 @@ static int create_iri_from_packet_event(collector_sync_t *sync, void *parseddata) { struct timeval now; + if (ipint->common.tomediate == OPENLI_INTERCEPT_OUTPUTS_CCONLY) { + return 0; + } + gettimeofday(&now, NULL); if (!INTERCEPT_IS_ACTIVE(ipint, now)) { return 0; @@ -398,6 +459,10 @@ static int create_iri_from_session(collector_sync_t *sync, struct timeval now; + if (ipint->common.tomediate == OPENLI_INTERCEPT_OUTPUTS_CCONLY) { + return 0; + } + gettimeofday(&now, NULL); if (!INTERCEPT_IS_ACTIVE(ipint, now)) { return 0; @@ -648,10 +713,12 @@ static int new_staticiprange(collector_sync_t *sync, uint8_t *intmsg, HASH_ADD_KEYPTR(hh, ipint->statics, ipr->rangestr, strlen(ipr->rangestr), ipr); - gettimeofday(&now, NULL); - if (INTERCEPT_IS_ACTIVE(ipint, now)) { - create_ipiri_job_from_iprange(sync, ipr, ipint, OPENLI_IPIRI_STARTWHILEACTIVE); - } + if (ipint->common.tomediate != OPENLI_INTERCEPT_OUTPUTS_CCONLY) { + gettimeofday(&now, NULL); + if (INTERCEPT_IS_ACTIVE(ipint, now)) { + create_ipiri_job_from_iprange(sync, ipr, ipint, OPENLI_IPIRI_STARTWHILEACTIVE); + } + } HASH_ITER(hh, (sync_sendq_t *)(sync->glob->collector_queues), sendq, tmp) { @@ -906,6 +973,7 @@ static void push_ipintercept_update_to_threads(collector_sync_t *sync, ipint->common.tostart_time = modified->common.tostart_time; ipint->common.toend_time = modified->common.toend_time; + ipint->common.tomediate = modified->common.tomediate; /* Update all static IP ranges for this intercept */ HASH_ITER(hh, ipint->statics, ipr, tmpr) { @@ -1128,7 +1196,6 @@ static void push_existing_user_sessions(collector_sync_t *sync, static int insert_new_ipintercept(collector_sync_t *sync, ipintercept_t *cept) { openli_export_recv_t *expmsg; - int i; if (cept->vendmirrorid != OPENLI_VENDOR_MIRROR_NONE) { @@ -1169,11 +1236,6 @@ static int insert_new_ipintercept(collector_sync_t *sync, ipintercept_t *cept) { expmsg = create_intercept_details_msg(&(cept->common)); publish_openli_msg(sync->zmq_pubsocks[cept->common.seqtrackerid], expmsg); - for (i = 0; i < sync->forwardcount; i++) { - expmsg = create_intercept_details_msg(&(cept->common)); - publish_openli_msg(sync->zmq_fwdctrlsocks[i], expmsg); - } - if (cept->username) { push_existing_user_sessions(sync, cept); add_intercept_to_user_intercept_list(&sync->userintercepts, cept); @@ -1245,6 +1307,7 @@ static int modify_ipintercept(collector_sync_t *sync, uint8_t *intmsg, ipintercept_t *ipint, *modified; openli_export_recv_t *expmsg; + int changed = 0; modified = calloc(1, sizeof(ipintercept_t)); @@ -1263,8 +1326,6 @@ static int modify_ipintercept(collector_sync_t *sync, uint8_t *intmsg, return insert_new_ipintercept(sync, modified); } - /* TODO apply any changes to authcc or delivcc */ - if (strcmp(ipint->username, modified->username) != 0) { push_ipintercept_halt_to_threads(sync, ipint); remove_intercept_from_user_intercept_list(&sync->userintercepts, ipint); @@ -1292,18 +1353,39 @@ static int modify_ipintercept(collector_sync_t *sync, uint8_t *intmsg, ipint->common.toend_time != modified->common.toend_time) { logger(LOG_INFO, "OpenLI: IP intercept %s has changed start / end times -- now %lu, %lu", ipint->common.liid, modified->common.tostart_time, modified->common.toend_time); + ipint->common.tostart_time = modified->common.tostart_time; + ipint->common.toend_time = modified->common.toend_time; update_intercept_time_event(&(sync->upcoming_intercept_events), ipint, &(ipint->common), &(modified->common)); - push_ipintercept_update_to_threads(sync, ipint, modified); - } else if (strcmp(ipint->common.delivcc, modified->common.delivcc) != 0 || + changed = 1; + } + + if (ipint->common.tomediate != modified->common.tomediate) { + char space[1024]; + intercept_mediation_mode_as_string(modified->common.tomediate, space, + 1024); + logger(LOG_INFO, + "OpenLI: IP intercept %s has changed mediation mode to: %s", + ipint->common.liid, space); + ipint->common.tomediate = modified->common.tomediate; + changed = 1; + + } + + + if (strcmp(ipint->common.delivcc, modified->common.delivcc) != 0 || strcmp(ipint->common.authcc, modified->common.authcc) != 0) { - push_ipintercept_update_to_threads(sync, ipint, modified); + changed = 1; expmsg = create_intercept_details_msg(&(ipint->common)); expmsg->type = OPENLI_EXPORT_INTERCEPT_CHANGED; publish_openli_msg(sync->zmq_pubsocks[ipint->common.seqtrackerid], expmsg); } + if (changed) { + push_ipintercept_update_to_threads(sync, ipint, modified); + } + free_single_ipintercept(modified); return 0; } @@ -1320,6 +1402,7 @@ static int halt_ipintercept(collector_sync_t *sync, uint8_t *intmsg, logger(LOG_INFO, "OpenLI: received invalid IP intercept withdrawal from provisioner."); } + free_single_ipintercept(torem); return -1; } @@ -1329,6 +1412,7 @@ static int halt_ipintercept(collector_sync_t *sync, uint8_t *intmsg, if (!ipint) { logger(LOG_INFO, "OpenLI: tried to halt IP intercept %s but this was not present in the intercept map?", torem->common.liid); + free_single_ipintercept(torem); return -1; } @@ -1502,51 +1586,6 @@ static int new_ipintercept(collector_sync_t *sync, uint8_t *intmsg, return insert_new_ipintercept(sync, cept); } - -static int new_voipintercept(collector_sync_t *sync, uint8_t *intmsg, - uint16_t msglen) { - - voipintercept_t *vint, *found; - openli_export_recv_t *expmsg; - int i; - - /* Most of the new VOIP intercept stuff is handled by the VOIP sync - * thread, but we also need to let the forwarder threads know that - * a new intercept is starting and only the IP sync thread has - * sockets for sending messages to the forwarders. - * - * Technically, this is only to handle an edge case that should - * never happen (i.e. an intercept ID being re-used after it had - * previously been used and withdrawn) but we should try to do the - * right thing if it ever happens (most likely to be when users - * are testing deployments, of course). - */ - - vint = (voipintercept_t *)calloc(1, sizeof(voipintercept_t)); - - if (decode_voipintercept_start(intmsg, msglen, vint) == -1) { - /* Don't bother logging, the VOIP sync thread should handle that */ - return -1; - } - - HASH_FIND(hh_liid, sync->knownvoips, vint->common.liid, - vint->common.liid_len, found); - - if (found == NULL) { - HASH_ADD_KEYPTR(hh_liid, sync->knownvoips, vint->common.liid, - vint->common.liid_len, vint); - - for (i = 0; i < sync->forwardcount; i++) { - expmsg = create_intercept_details_msg(&(vint->common)); - publish_openli_msg(sync->zmq_fwdctrlsocks[i], expmsg); - } - } else { - free_single_voipintercept(vint); - } - - return 1; -} - static void disable_unconfirmed_intercepts(collector_sync_t *sync) { coreserver_t *cs, *tmp3; ipintercept_t *ipint, *tmp; @@ -1707,17 +1746,6 @@ static int recv_from_provisioner(collector_sync_t *sync) { return -1; } break; - case OPENLI_PROTO_START_VOIPINTERCEPT: - ret = new_voipintercept(sync, provmsg, msglen); - if (ret == -1) { - return -1; - } - ret = forward_provmsg_to_voipsync(sync, provmsg, msglen, - msgtype); - if (ret == -1) { - return -1; - } - break; case OPENLI_PROTO_MODIFY_IPINTERCEPT: ret = modify_ipintercept(sync, provmsg, msglen); @@ -1726,6 +1754,7 @@ static int recv_from_provisioner(collector_sync_t *sync) { } break; + case OPENLI_PROTO_START_VOIPINTERCEPT: case OPENLI_PROTO_HALT_VOIPINTERCEPT: case OPENLI_PROTO_MODIFY_VOIPINTERCEPT: case OPENLI_PROTO_ANNOUNCE_SIP_TARGET: @@ -1740,7 +1769,28 @@ static int recv_from_provisioner(collector_sync_t *sync) { disable_unconfirmed_intercepts(sync); ret = forward_provmsg_to_voipsync(sync, provmsg, msglen, msgtype); + if (ret == -1) { + return -1; + } + ret = forward_provmsg_to_email_workers(sync, provmsg, msglen, + msgtype); + if (ret == -1) { + return -1; + } break; + + case OPENLI_PROTO_START_EMAILINTERCEPT: + case OPENLI_PROTO_HALT_EMAILINTERCEPT: + case OPENLI_PROTO_MODIFY_EMAILINTERCEPT: + case OPENLI_PROTO_ANNOUNCE_EMAIL_TARGET: + case OPENLI_PROTO_WITHDRAW_EMAIL_TARGET: + ret = forward_provmsg_to_email_workers(sync, provmsg, msglen, + msgtype); + if (ret == -1) { + return -1; + } + break; + default: if (sync->instruct_log) { logger(LOG_INFO, "Received unexpected message of type %d from provisioner.", msgtype); @@ -1892,6 +1942,7 @@ void sync_disconnect_provisioner(collector_sync_t *sync, uint8_t dropmeds) { /* Tell other sync thread to flag its intercepts too */ forward_provmsg_to_voipsync(sync, NULL, 0, OPENLI_PROTO_DISCONNECT); + forward_provmsg_to_email_workers(sync, NULL, 0, OPENLI_PROTO_DISCONNECT); /* Same with mediators -- keep exporting to them, but flag them to be * disconnected if they are not announced after we reconnect. */ diff --git a/src/collector/collector_sync.h b/src/collector/collector_sync.h index 914744c0..dce23277 100644 --- a/src/collector/collector_sync.h +++ b/src/collector/collector_sync.h @@ -48,8 +48,11 @@ typedef struct colsync_data { int pubsockcount; int forwardcount; + int emailcount; + void **zmq_pubsocks; void **zmq_fwdctrlsocks; + void **zmq_emailsocks; void *zmq_colsock; internet_user_t *allusers; diff --git a/src/collector/collector_sync_voip.c b/src/collector/collector_sync_voip.c index 899a3b44..fe8dd2a0 100644 --- a/src/collector/collector_sync_voip.c +++ b/src/collector/collector_sync_voip.c @@ -65,6 +65,9 @@ collector_sync_voip_t *init_voip_sync_data(collector_global_t *glob) { sync->pubsockcount = glob->seqtracker_threads; sync->zmq_pubsocks = calloc(sync->pubsockcount, sizeof(void *)); + sync->forwardcount = glob->forwarding_threads; + sync->zmq_fwdctrlsocks = calloc(sync->forwardcount, sizeof(void *)); + sync->topoll = calloc(128, sizeof(zmq_pollitem_t)); sync->topoll_size = 128; sync->expiring_streams = calloc(128, sizeof(struct rtpstreaminf *)); @@ -88,6 +91,17 @@ collector_sync_voip_t *init_voip_sync_data(collector_global_t *glob) { /* Do we need to set a HWM? */ } + for (i = 0; i < sync->forwardcount; i++) { + sync->zmq_fwdctrlsocks[i] = zmq_socket(glob->zmq_ctxt, ZMQ_PUSH); + snprintf(sockname, 128, "inproc://openliforwardercontrol_sync-%d", i); + if (zmq_connect(sync->zmq_fwdctrlsocks[i], sockname) != 0) { + logger(LOG_INFO, "OpenLI: colsyncvoip thread unable to connect to zmq control socket for forwarding threads: %s", + strerror(errno)); + zmq_close(sync->zmq_fwdctrlsocks[i]); + sync->zmq_fwdctrlsocks[i] = NULL; + } + } + sync->zmq_colsock = zmq_socket(glob->zmq_ctxt, ZMQ_PULL); if (zmq_bind(sync->zmq_colsock, "inproc://openli-voipsync") != 0) { logger(LOG_INFO, "OpenLI: colsync VOIP thread unable to bind to zmq socket for collector updates: %s", @@ -168,13 +182,23 @@ void clean_sync_voip_data(collector_sync_voip_t *sync) { zmq_close(sync->zmq_pubsocks[i]); } + for (i = 0; i < sync->forwardcount; i++) { + if (sync->zmq_fwdctrlsocks[i] == NULL) { + continue; + } + zmq_setsockopt(sync->zmq_fwdctrlsocks[i], ZMQ_LINGER, &zero, + sizeof(zero)); + zmq_close(sync->zmq_fwdctrlsocks[i]); + sync->zmq_fwdctrlsocks[i] = NULL; + } + if (sync->zmq_colsock) { zmq_setsockopt(sync->zmq_colsock, ZMQ_LINGER, &zero, sizeof(zero)); zmq_close(sync->zmq_colsock); } free(sync->zmq_pubsocks); - + free(sync->zmq_fwdctrlsocks); } @@ -431,6 +455,8 @@ static inline int announce_rtp_streams_if_required( } rtp->active = 1; rtp->changed = 0; + free(rtp->invitecseq); + rtp->invitecseq = NULL; return 1; } @@ -905,8 +931,6 @@ static int process_sip_183sessprog(collector_sync_voip_t *sync, thisrtp->changed = 1; } } - free(thisrtp->invitecseq); - thisrtp->invitecseq = NULL; announce_rtp_streams_if_required(sync, thisrtp); } @@ -948,8 +972,6 @@ static int process_sip_200ok(collector_sync_voip_t *sync, rtpstreaminf_t *thisrt thisrtp->changed = 1; } } - free(thisrtp->invitecseq); - thisrtp->invitecseq = NULL; announce_rtp_streams_if_required(sync, thisrtp); } else if (thisrtp->byecseq && strcmp(thisrtp->byecseq, @@ -987,6 +1009,10 @@ static inline void create_sip_ipiri(collector_sync_voip_t *sync, openli_export_recv_t *copy; + if (vint->common.tomediate == OPENLI_INTERCEPT_OUTPUTS_CCONLY) { + return; + } + if (vint->common.tostart_time > irimsg->ts.tv_sec) { return; } @@ -1036,7 +1062,7 @@ static int process_sip_other(collector_sync_voip_t *sync, char *callid, voipintshared_t *vshared; char rtpkey[256]; rtpstreaminf_t *thisrtp; - etsili_iri_type_t iritype = ETSILI_IRI_REPORT; + etsili_iri_type_t iritype = ETSILI_IRI_CONTINUE; int exportcount = 0; int badsip = 0; @@ -1201,8 +1227,6 @@ static int process_sip_register(collector_sync_voip_t *sync, char *callid, sipreg = create_new_voip_registration(sync, vint, callid, matched); } else { - int found; - matched = check_sip_identity_fields(sync, vint, &passertid, &remotepartyid); if (!matched) { @@ -1615,6 +1639,16 @@ static int modify_voipintercept(collector_sync_voip_t *sync, uint8_t *intmsg, "OpenLI: VOIP intercept %s has changed start / end times -- now %lu, %lu", tomod.common.liid, tomod.common.tostart_time, tomod.common.toend_time); } + if (tomod.common.tomediate != vint->common.tomediate) { + char space[1024]; + changed = 1; + intercept_mediation_mode_as_string(tomod.common.tomediate, space, + 1024); + logger(LOG_INFO, + "OpenLI: VOIP intercept %s has changed mediation mode to: %s", + vint->common.liid, space); + } + if (strcmp(tomod.common.delivcc, vint->common.delivcc) != 0 || strcmp(tomod.common.authcc, vint->common.authcc) != 0) { char *tmp; @@ -1635,6 +1669,7 @@ static int modify_voipintercept(collector_sync_voip_t *sync, uint8_t *intmsg, vint->options = tomod.options; vint->common.tostart_time = tomod.common.tostart_time; vint->common.toend_time = tomod.common.toend_time; + vint->common.tomediate = tomod.common.tomediate; if (changed) { push_voip_intercept_update_to_threads(sync, vint); @@ -1647,7 +1682,8 @@ static int modify_voipintercept(collector_sync_voip_t *sync, uint8_t *intmsg, static inline void remove_voipintercept(collector_sync_voip_t *sync, voipintercept_t *vint) { - openli_export_recv_t *expmsg; + openli_export_recv_t *expmsg, *fwdmsg; + int i; push_voipintercept_halt_to_threads(sync, vint); @@ -1663,6 +1699,17 @@ static inline void remove_voipintercept(collector_sync_voip_t *sync, pthread_mutex_unlock(sync->glob->stats_mutex); publish_openli_msg(sync->zmq_pubsocks[vint->common.seqtrackerid], expmsg); + for (i = 0; i < sync->forwardcount; i++) { + fwdmsg = (openli_export_recv_t *)calloc(1, + sizeof(openli_export_recv_t)); + fwdmsg->type = OPENLI_EXPORT_INTERCEPT_OVER; + fwdmsg->data.cept.liid = strdup(vint->common.liid); + fwdmsg->data.cept.authcc = strdup(vint->common.authcc); + fwdmsg->data.cept.delivcc = strdup(vint->common.delivcc); + publish_openli_msg(sync->zmq_fwdctrlsocks[i], fwdmsg); + } + + HASH_DELETE(hh_liid, sync->voipintercepts, vint); free_single_voipintercept(vint); } diff --git a/src/collector/collector_sync_voip.h b/src/collector/collector_sync_voip.h index d5bfa84d..c7002642 100644 --- a/src/collector/collector_sync_voip.h +++ b/src/collector/collector_sync_voip.h @@ -41,8 +41,10 @@ typedef struct collector_sync_voip_data { collector_identity_t *info; int pubsockcount; + int forwardcount; void **zmq_pubsocks; void *zmq_colsock; + void **zmq_fwdctrlsocks; voipintercept_t *voipintercepts; voipcinmap_t *knowncallids; diff --git a/src/collector/email_ingest_service.c b/src/collector/email_ingest_service.c new file mode 100644 index 00000000..d94cdb70 --- /dev/null +++ b/src/collector/email_ingest_service.c @@ -0,0 +1,468 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "logger.h" +#include "email_ingest_service.h" + +const char *busypage = + "This server is busy, please try again later."; + +const char *completepage = + "Your message has been received."; + +const char *errorpage = + "Something went horribly wrong..."; +const char *servererrorpage = + "An internal server error has occured."; +const char *accessdenied = "Access DENIED"; + +#define REALM "emailingest@openli.nz" +#define OPAQUE "2153ab20f777ce3106003ac9af7da810ea97dd20" + +static unsigned int uploading_clients = 0; + +static int init_email_ingest_state(email_ingestor_state_t *state, + openli_email_ingest_config_t *config, openli_ssl_config_t *sslconf) { + + state->daemon = NULL; + state->config = config; + state->zmq_publishers = NULL; + state->key_pem = NULL; + state->cert_pem = NULL; + + if (config->tlsrequired) { + if (!sslconf->certfile) { + logger(LOG_INFO, "OpenLI: email ingest socket requires TLS but no certificate has been provided -- not creating ingestion socket"); + return -1; + } + + if (!sslconf->keyfile) { + logger(LOG_INFO, "OpenLI: email ingest socket requires TLS but no key has been provided -- not creating ingestion socket"); + return -1; + } + + if (load_pem_into_memory(sslconf->keyfile, &(state->key_pem)) < 0) { + logger(LOG_INFO, "OpenLI: failed to load SSL key file for email ingestion socket -- not creating ingestion socket"); + return -1; + } + + if (load_pem_into_memory(sslconf->certfile, &(state->cert_pem)) < 0) { + logger(LOG_INFO, "OpenLI: failed to load SSL certificate file for email ingestion socket -- not creating ingestion socket"); + return -1; + } + } + return 0; +} + +static MHD_RESULT iterate_post (void *coninfo_cls, enum MHD_ValueKind kind, + const char *key, const char *filename, const char *content_type, + const char *transfer_encoding, const char *data, uint64_t off, + size_t size) { + + email_connection_t *con_info = (email_connection_t *)(coninfo_cls); + char *ptr; + + if (con_info->thismsg == NULL) { + con_info->thismsg = calloc(1, sizeof(openli_email_captured_t)); + } + + if (strcmp(key, "TARGET_ID") == 0) { + con_info->thismsg->target_id = strdup(data); + } else if (strcmp(key, "REMOTE_IP") == 0) { + con_info->thismsg->remote_ip = strdup(data); + } else if (strcmp(key, "REMOTE_PORT") == 0) { + con_info->thismsg->remote_port = strdup(data); + } else if (strcmp(key, "HOST_IP") == 0) { + con_info->thismsg->host_ip = strdup(data); + } else if (strcmp(key, "HOST_PORT") == 0) { + con_info->thismsg->host_port = strdup(data); + } else if (strcmp(key, "DATA_SOURCE") == 0) { + con_info->thismsg->datasource = strdup(data); + } else if (strcmp(key, "SESSION_ID") == 0) { + con_info->thismsg->session_id = strdup(data); + } else if (strcmp(key, "DIRECTION") == 0) { + if (strcasecmp(data, "out") == 0) { + con_info->thismsg->direction = OPENLI_EMAIL_DIRECTION_OUTBOUND; + } else if (strcasecmp(data, "in") == 0) { + con_info->thismsg->direction = OPENLI_EMAIL_DIRECTION_INBOUND; + } else { + con_info->thismsg->direction = OPENLI_EMAIL_DIRECTION_UNKNOWN; + } + + } else if (strcmp(key, "TIMESTAMP") == 0) { + con_info->thismsg->timestamp = strtoul(data, NULL, 10); + + } else if (strcmp(key, "MAIL_ID") == 0) { + con_info->thismsg->mail_id = strtoul(data, NULL, 10); + } else if (strcmp(key, "SERVICE") == 0) { + + if (strcasecmp(data, "smtp") == 0) { + con_info->thismsg->type = OPENLI_EMAIL_TYPE_SMTP; + } else if (strcasecmp(data, "pop3") == 0) { + con_info->thismsg->type = OPENLI_EMAIL_TYPE_POP3; + } else if (strcasecmp(data, "imap") == 0) { + con_info->thismsg->type = OPENLI_EMAIL_TYPE_IMAP; + } else { + con_info->thismsg->type = OPENLI_EMAIL_TYPE_UNKNOWN; + } + + + } else if (strcmp(key, "BYTES") == 0) { + //con_info->thismsg->msg_length = strtoul(data, NULL, 10); + } else if (strcmp(key, "BUFFER") == 0) { + int datalen = 0; + + ptr = (char *)data; + while (*ptr == 0x0a || *ptr == 0x0d) { + ptr ++; + } + + if (*ptr == '\0' || ptr - data >= size) { + free(con_info->thismsg->content); + con_info->thismsg->content = NULL; + } + + datalen = strlen(ptr); + con_info->thismsg->own_content = 1; + con_info->thismsg->content = strdup(ptr); + con_info->thismsg->msg_length = datalen; + } + + //logger(LOG_INFO, "KEY %s", key); + //logger(LOG_INFO, "VALUE %s", data); + + con_info->answerstring = completepage; + con_info->answercode = MHD_HTTP_OK; + + return MHD_YES; + +} + +static int send_auth_fail_page(struct MHD_Connection *connection, + const char *page, int invalid_nonce) { + + int ret; + struct MHD_Response *response; + + response = + MHD_create_response_from_buffer (strlen (page), (void *) page, + MHD_RESPMEM_MUST_COPY); + if (!response) { + return MHD_NO; + } + MHD_add_response_header (response, MHD_HTTP_HEADER_CONTENT_TYPE, + "text/html"); + + ret = MHD_queue_auth_fail_response(connection, REALM, OPAQUE, + response, (invalid_nonce == MHD_INVALID_NONCE) ? MHD_YES : MHD_NO); + MHD_destroy_response(response); + return ret; +} + +static int send_page(struct MHD_Connection *connection, const char *page, + int status_code) { + + int ret; + struct MHD_Response *response; + + response = + MHD_create_response_from_buffer (strlen (page), (void *) page, + MHD_RESPMEM_MUST_COPY); + if (!response) { + return MHD_NO; + } + MHD_add_response_header (response, MHD_HTTP_HEADER_CONTENT_TYPE, + "text/html"); + ret = MHD_queue_response (connection, status_code, response); + MHD_destroy_response (response); + + return ret; +} + +static void email_request_completed(void *cls, + struct MHD_Connection *connection, + void **con_cls, enum MHD_RequestTerminationCode toe) { + + email_connection_t *con_info = (email_connection_t *)(*con_cls); + + if (con_info == NULL) { + return; + } + + if (con_info->thismsg) { + int r = 0; + while (1) { + r = zmq_send(con_info->parentstate->zmq_publishers[0], + &(con_info->thismsg), sizeof(openli_email_captured_t *), + 0); + if (r < 0 && errno == EAGAIN) { + continue; + } + + if (r < 0) { + logger(LOG_INFO, "OpenLI: email ingestor thread failed to send captured email to worker thread %d: %s", 0, strerror(errno)); + free_captured_email(con_info->thismsg); + break; + } + + break; + } + } + + if (con_info->postproc) { + MHD_destroy_post_processor(con_info->postproc); + uploading_clients --; + } + + free(con_info); + *con_cls = NULL; +} + + +static MHD_RESULT answer_email_connection(void *cls, + struct MHD_Connection *connection, + const char *url, const char *method, + const char *version, const char *upload_data, + size_t *upload_data_size, void **con_cls) { + + email_connection_t *con_info = (email_connection_t *)(*con_cls); + email_ingestor_state_t *state = (email_ingestor_state_t *)cls; + + if (con_info == NULL) { + if (state->config->authrequired) { + char *username; + int r; + + if (state->config->authpassword == NULL) { + return send_page(connection, accessdenied, + MHD_HTTP_PRECONDITION_FAILED); + } + + username = MHD_digest_auth_get_username(connection); + if (username == NULL) { + return send_auth_fail_page(connection, accessdenied, MHD_NO); + } + + r = MHD_digest_auth_check(connection, "emailingest@openli.nz", + username, state->config->authpassword, 300); + free(username); + + if (r == MHD_INVALID_NONCE || r == MHD_NO) { + return send_auth_fail_page(connection, accessdenied, r); + } + + } + + if (uploading_clients >= state->config->maxclients) { + return send_page(connection, busypage, + MHD_HTTP_SERVICE_UNAVAILABLE); + } + + con_info = calloc(1, sizeof(email_connection_t)); + if (con_info == NULL) { + return MHD_NO; + } + con_info->parentstate = state; + if (strcmp(method, "POST") == 0) { + con_info->postproc = MHD_create_post_processor(connection, + 16 * 1024, iterate_post, (void *)con_info); + if (con_info->postproc == NULL) { + free(con_info); + return MHD_NO; + } + + uploading_clients ++; + con_info->answercode = MHD_HTTP_OK; + con_info->answerstring = completepage; + } + + con_info->thismsg = NULL; + *con_cls = (void *)con_info; + return MHD_YES; + } + + if (strcmp(method, "POST") == 0) { + if (*upload_data_size != 0) { + MHD_post_process(con_info->postproc, upload_data, + *upload_data_size); + *upload_data_size = 0; + return MHD_YES; + } else { + return send_page(connection, con_info->answerstring, + con_info->answercode); + } + } + + return send_page(connection, errorpage, MHD_HTTP_BAD_REQUEST); +} + +static void connect_email_worker_sockets(email_ingestor_state_t *state) { + + int i; + char sockname[256]; + + state->zmq_publishers = calloc(state->email_worker_count, sizeof(void *)); + + for (i = 0; i < state->email_worker_count; i++) { + state->zmq_publishers[i] = zmq_socket(state->zmq_ctxt, ZMQ_PUSH); + snprintf(sockname, 256, "inproc://openliemailworker-ingest%d", i); + if (zmq_connect(state->zmq_publishers[i], sockname) != 0) { + logger(LOG_INFO, "OpenLI: email ingestor thread is unable to connect to RMQ socket for email worker thread %d: %s", i, strerror(errno)); + zmq_close(state->zmq_publishers[i]); + state->zmq_publishers[i] = NULL; + } + } + +} + +struct MHD_Daemon *start_email_mhd_daemon(openli_email_ingest_config_t *config, + int sockfd, email_ingestor_state_t *state, + openli_ssl_config_t *sslconf) { + + int fd, off, len; + char rndseed[8]; + + fd = -1; + if (sockfd <= 0) { + return NULL; + } + + memset(rndseed, 0, sizeof(rndseed)); + + if (config->authrequired) { + fd = open("/dev/urandom", O_RDONLY); + if (fd == -1) { + logger(LOG_INFO, "Failed to generate random seed for authentication for email ingestion socket: %s", strerror(errno)); + return NULL; + } + + off = 0; + while (off < 8) { + if ((len = read(fd, rndseed + off, 8 - off)) == -1) { + logger(LOG_INFO, "Failed to populate random seed for authentication for email ingestion socket: %s", strerror(errno)); + close(fd); + return NULL; + } + off += len; + } + close(fd); + } + + if (init_email_ingest_state(state, config, sslconf) < 0) { + if (fd != -1) { + close(fd); + } + if (state->key_pem) { + free(state->key_pem); + } + if (state->cert_pem) { + free(state->cert_pem); + } + return NULL; + } + connect_email_worker_sockets(state); + + if (state->key_pem && state->cert_pem) { + state->daemon = MHD_start_daemon( + MHD_USE_SELECT_INTERNALLY | MHD_USE_SSL, + 0, + NULL, + NULL, + &answer_email_connection, + state, + MHD_OPTION_LISTEN_SOCKET, + sockfd, + MHD_OPTION_NOTIFY_COMPLETED, + &email_request_completed, + state, + MHD_OPTION_NONCE_NC_SIZE, + 300, + MHD_OPTION_DIGEST_AUTH_RANDOM, + sizeof(rndseed), rndseed, + MHD_OPTION_HTTPS_MEM_KEY, + state->key_pem, + MHD_OPTION_HTTPS_MEM_CERT, + state->cert_pem, + MHD_OPTION_END); + } else { + + state->daemon = MHD_start_daemon( + MHD_USE_SELECT_INTERNALLY, + 0, + NULL, + NULL, + &answer_email_connection, + state, + MHD_OPTION_LISTEN_SOCKET, + sockfd, + MHD_OPTION_NOTIFY_COMPLETED, + &email_request_completed, + state, + MHD_OPTION_NONCE_NC_SIZE, + 300, + MHD_OPTION_DIGEST_AUTH_RANDOM, + sizeof(rndseed), rndseed, + MHD_OPTION_END); + } + return state->daemon; + +} + +void stop_email_mhd_daemon(email_ingestor_state_t *state) { + int i, zero; + if (state->daemon) { + MHD_stop_daemon(state->daemon); + } + + if (state->zmq_publishers) { + for (i = 0; i < state->email_worker_count; i++) { + zero = 0; + zmq_setsockopt(state->zmq_publishers[i], + ZMQ_LINGER, &zero, sizeof(zero)); + zmq_close(state->zmq_publishers[i]); + } + free(state->zmq_publishers); + } + + if (state->key_pem) { + free(state->key_pem); + } + if (state->cert_pem) { + free(state->cert_pem); + } +} + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/collector/email_ingest_service.h b/src/collector/email_ingest_service.h new file mode 100644 index 00000000..7ea6f8a3 --- /dev/null +++ b/src/collector/email_ingest_service.h @@ -0,0 +1,82 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#ifndef OPENLI_EMAIL_INGEST_SERVICE_H_ +#define OPENLI_EMAIL_INGEST_SERVICE_H_ + +#include +#include + +#include "email_worker.h" +#include "openli_tls.h" + +#if MHD_VERSION >= 0x00097002 +#define MHD_RESULT enum MHD_Result +#else +#define MHD_RESULT int +#endif + +typedef struct openli_email_ingest_config { + uint8_t enabled; + uint8_t authrequired; + uint8_t tlsrequired; + uint32_t maxclients; + + char *listenport; + char *listenaddr; + char *authpassword; + +} openli_email_ingest_config_t; + +typedef struct email_ingest_state { + struct MHD_Daemon *daemon; + openli_email_ingest_config_t *config; + int email_worker_count; + + void *zmq_ctxt; + void **zmq_publishers; + + char *key_pem; + char *cert_pem; + +} email_ingestor_state_t; + +typedef struct email_connection { + struct MHD_PostProcessor *postproc; + const char *answerstring; + int answercode; + email_ingestor_state_t *parentstate; + + openli_email_captured_t *thismsg; +} email_connection_t; + +void stop_email_mhd_daemon(email_ingestor_state_t *state); +struct MHD_Daemon *start_email_mhd_daemon(openli_email_ingest_config_t *config, + int sockfd, email_ingestor_state_t *state, + openli_ssl_config_t *sslconf); + +#endif +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/collector/email_worker.c b/src/collector/email_worker.c new file mode 100644 index 00000000..e32ce22a --- /dev/null +++ b/src/collector/email_worker.c @@ -0,0 +1,1379 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "util.h" +#include "logger.h" +#include "collector_base.h" +#include "collector_publish.h" +#include "email_worker.h" +#include "netcomms.h" +#include "intercept.h" +#include "timed_intercept.h" +#include "collector.h" + +static inline const char *email_type_to_string(openli_email_type_t t) { + if (t == OPENLI_EMAIL_TYPE_POP3) { + return "POP3"; + } + if (t == OPENLI_EMAIL_TYPE_SMTP) { + return "SMTP"; + } + if (t == OPENLI_EMAIL_TYPE_IMAP) { + return "IMAP"; + } + return "UNKNOWN"; +} + +static struct sockaddr_storage *construct_sockaddr(char *ip, char *port, + int *family) { + + struct sockaddr_storage *saddr; + struct addrinfo hints, *res; + int err; + + memset(&hints, 0, sizeof(hints)); + + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + + err = getaddrinfo(ip, port, &hints, &res); + if (err != 0) { + logger(LOG_INFO, "OpenLI: error in email worker thread converting %s:%s into a socket address: %s", ip, port, strerror(errno)); + return NULL; + } + + if (!res) { + logger(LOG_INFO, "OpenLI: email worker thread was unable to convert %s:%s into a valid socket address?", ip, port); + return NULL; + } + + /* Just use the first result -- there should only be one anyway... */ + + if (family) { + *family = res->ai_family; + } + saddr = calloc(1, sizeof(struct sockaddr_storage)); + memcpy(saddr, res->ai_addr, res->ai_addrlen); + + char host[256]; + char serv[256]; + + getnameinfo((struct sockaddr *)saddr, sizeof(struct sockaddr_storage), + host, 256, serv, 256, NI_NUMERICHOST | NI_NUMERICSERV); + + freeaddrinfo(res); + return saddr; +} + +void replace_email_session_serveraddr(emailsession_t *sess, + char *server_ip, char *server_port) { + + struct sockaddr_storage *repl = NULL; + + if (strcmp(server_port, "0") == 0) { + return; + } + + if (strcmp(server_ip, "") == 0) { + return; + } + + repl = construct_sockaddr(server_ip, server_port, &(sess->ai_family)); + if (repl == NULL) { + return; + } + if (sess->serveraddr) { + free(sess->serveraddr); + } + sess->serveraddr = repl; + +} + +void replace_email_session_clientaddr(emailsession_t *sess, + char *client_ip, char *client_port) { + + struct sockaddr_storage *repl = NULL; + + if (strcmp(client_port, "0") == 0) { + return; + } + + if (strcmp(client_ip, "") == 0) { + return; + } + + repl = construct_sockaddr(client_ip, client_port, &(sess->ai_family)); + if (repl == NULL) { + return; + } + if (sess->clientaddr) { + free(sess->clientaddr); + } + sess->clientaddr = repl; +} + +static openli_email_captured_t *convert_packet_to_email_captured( + libtrace_packet_t *pkt, uint8_t emailtype) { + + char space[256]; + int spacelen = 256; + char ip_a[INET6_ADDRSTRLEN]; + char ip_b[INET6_ADDRSTRLEN]; + char portstr[16]; + libtrace_tcp_t *tcp; + uint32_t rem; + uint8_t proto; + void *posttcp; + + uint16_t src_port, dest_port, rem_port, host_port; + openli_email_captured_t *cap = NULL; + + src_port = trace_get_source_port(pkt); + dest_port = trace_get_destination_port(pkt); + + if (src_port == 0 || dest_port == 0) { + return NULL; + } + + tcp = (libtrace_tcp_t *)(trace_get_transport(pkt, &proto, &rem)); + if (tcp == NULL || proto != TRACE_IPPROTO_TCP || rem == 0) { + return NULL; + } + + posttcp = trace_get_payload_from_tcp(tcp, &rem); + + /* Ensure that bi-directional flows return the same session ID by + * always putting the IP and port for the endpoint with the smallest of + * the two ports first... + */ + + if (src_port < dest_port) { + if (trace_get_source_address_string(pkt, ip_a, INET6_ADDRSTRLEN) + == NULL) { + return NULL; + } + if (trace_get_destination_address_string(pkt, ip_b, INET6_ADDRSTRLEN) + == NULL) { + return NULL; + } + + rem_port = dest_port; + host_port = src_port; + } else { + if (trace_get_source_address_string(pkt, ip_b, INET6_ADDRSTRLEN) + == NULL) { + return NULL; + } + if (trace_get_destination_address_string(pkt, ip_a, INET6_ADDRSTRLEN) + == NULL) { + return NULL; + } + host_port = dest_port; + rem_port = src_port; + } + + snprintf(space, spacelen, "%s-%s-%u-%u", ip_a, ip_b, host_port, + rem_port); + + cap = calloc(1, sizeof(openli_email_captured_t)); + if (emailtype == OPENLI_UPDATE_SMTP) { + cap->type = OPENLI_EMAIL_TYPE_SMTP; + } else if (emailtype == OPENLI_UPDATE_IMAP) { + cap->type = OPENLI_EMAIL_TYPE_IMAP; + } else if (emailtype == OPENLI_UPDATE_POP3) { + cap->type = OPENLI_EMAIL_TYPE_POP3; + } else { + cap->type = OPENLI_EMAIL_TYPE_UNKNOWN; + } + + cap->session_id = strdup(space); + cap->target_id = NULL; + cap->datasource = NULL; + cap->remote_ip = strdup(ip_b); + cap->host_ip = strdup(ip_a); + + + snprintf(portstr, 16, "%u", rem_port); + cap->remote_port = strdup(portstr); + snprintf(portstr, 16, "%u", host_port); + cap->host_port = strdup(portstr); + + cap->timestamp = (trace_get_seconds(pkt) * 1000); + cap->mail_id = 0; + cap->msg_length = trace_get_payload_length(pkt); + + if (cap->msg_length > rem) { + cap->msg_length = rem; + } + + + cap->own_content = 0; + if (cap->msg_length > 0 && posttcp != NULL) { + cap->content = (char *)posttcp; + } else { + cap->content = NULL; + } + return cap; +} + +static void init_email_session(emailsession_t *sess, + openli_email_captured_t *cap, char *sesskey, + openli_email_worker_t *state) { + + sess->key = strdup(sesskey); + sess->cin = hashlittle(cap->session_id, strlen(cap->session_id), + 1872422); + sess->session_id = strdup(cap->session_id); + + if (cap->type == OPENLI_EMAIL_TYPE_SMTP || + cap->type == OPENLI_EMAIL_TYPE_IMAP || + cap->type == OPENLI_EMAIL_TYPE_POP3) { + sess->serveraddr = construct_sockaddr(cap->host_ip, cap->host_port, + &sess->ai_family); + sess->clientaddr = construct_sockaddr(cap->remote_ip, cap->remote_port, + NULL); + } else { + /* TODO */ + } + + if (cap->type == OPENLI_EMAIL_TYPE_IMAP) { + pthread_rwlock_rdlock(state->glob_config_mutex); + sess->mask_credentials = *(state->mask_imap_creds); + pthread_rwlock_unlock(state->glob_config_mutex); + } else if (cap->type == OPENLI_EMAIL_TYPE_POP3) { + pthread_rwlock_rdlock(state->glob_config_mutex); + sess->mask_credentials = *(state->mask_pop3_creds); + pthread_rwlock_unlock(state->glob_config_mutex); + } else { + sess->mask_credentials = 0; + } + + memset(&(sess->sender), 0, sizeof(email_participant_t)); + sess->participants = NULL; + sess->protocol = cap->type; + sess->currstate = 0; + sess->timeout_ev = NULL; + sess->proto_state = NULL; + sess->server_octets = 0; + sess->client_octets = 0; +} + +int extract_email_sender_from_body(openli_email_worker_t *state, + emailsession_t *sess, char *bodycontent, char **extracted) { + + char fromaddr[2048]; + int found = 0; + char *lt, *gt; + char *fromstart, *search, *next; + + memset(fromaddr, 0, 2048); + search = bodycontent; + + while (search) { + next = strstr(search, "\r\n"); + + if (strncasecmp(search, "From: ", 6) == 0) { + if (next - search > 2048) { + next = search + 2048; + } + memcpy(fromaddr, (search + 6), next - (search + 6)); + found = 1; + break; + } + if (next) { + search = (next + 2); + } else { + search = next; + } + } + + if (!found) { + return 0; + } + /* Account for From: fields which take the form: + * John Smith + */ + + /* Note: addresses that contain '<' or '>' within quotes are going + * to cause problems for this code... + */ + lt = strchr(fromaddr, '<'); + gt = strrchr(fromaddr, '>'); + + if (!lt || !gt || lt > gt) { + fromstart = fromaddr; + } else { + fromstart = (lt + 1); + *gt = '\0'; + } + + *extracted = strdup(fromstart); + return 1; +} + + +void add_email_participant(emailsession_t *sess, char *address, int issender) { + + email_participant_t *part; + + if (!issender) { + HASH_FIND(hh, sess->participants, address, strlen(address), part); + if (!part) { + part = calloc(1, sizeof(email_participant_t)); + part->emailaddr = address; + part->is_sender = 0; + HASH_ADD_KEYPTR(hh, sess->participants, part->emailaddr, + strlen(part->emailaddr), part); + + } + } else { + if (sess->sender.emailaddr) { + free(sess->sender.emailaddr); + } + sess->sender.emailaddr = address; + sess->sender.is_sender = 1; + } + +} + +void clear_email_participant_list(emailsession_t *sess) { + + email_participant_t *part, *tmp; + + if (!sess) { + return; + } + HASH_ITER(hh, sess->participants, part, tmp) { + HASH_DELETE(hh, sess->participants, part); + if (part->emailaddr) { + free(part->emailaddr); + } + free(part); + } + +} + +void clear_email_sender(emailsession_t *sess) { + + if (!sess) { + return; + } + if (sess->sender.emailaddr) { + free(sess->sender.emailaddr); + sess->sender.emailaddr = NULL; + } +} + +static void free_email_session(openli_email_worker_t *state, + emailsession_t *sess) { + + + if (!sess) { + return; + } + + clear_email_sender(sess); + clear_email_participant_list(sess); + + if (sess->timeout_ev) { + sync_epoll_t *ev, *found; + ev = (sync_epoll_t *)sess->timeout_ev; + HASH_FIND(hh, state->timeouts, &(ev->fd), sizeof(int), found); + if (found) { + HASH_DELETE(hh, state->timeouts, found); + } + close(ev->fd); + free(ev); + + } + + if (sess->protocol == OPENLI_EMAIL_TYPE_SMTP) { + free_smtp_session_state(sess, sess->proto_state); + } + + if (sess->protocol == OPENLI_EMAIL_TYPE_IMAP) { + free_imap_session_state(sess, sess->proto_state); + } + + if (sess->protocol == OPENLI_EMAIL_TYPE_POP3) { + free_pop3_session_state(sess, sess->proto_state); + } + + if (sess->serveraddr) { + free(sess->serveraddr); + } + if (sess->clientaddr) { + free(sess->clientaddr); + } + if (sess->session_id) { + free(sess->session_id); + } + if (sess->key) { + free(sess->key); + } + free(sess); + +} + +static void update_email_session_timeout(openli_email_worker_t *state, + emailsession_t *sess) { + sync_epoll_t *timerev, *syncev; + struct itimerspec its; + + if (sess->timeout_ev) { + timerev = (sync_epoll_t *)(sess->timeout_ev); + + HASH_FIND(hh, state->timeouts, &(timerev->fd), sizeof(int), syncev); + if (syncev) { + HASH_DELETE(hh, state->timeouts, syncev); + } + close(timerev->fd); + } else { + timerev = (sync_epoll_t *) calloc(1, sizeof(sync_epoll_t)); + } + + pthread_rwlock_rdlock(state->glob_config_mutex); + if (sess->protocol == OPENLI_EMAIL_TYPE_SMTP) { + its.it_value.tv_sec = state->timeout_thresholds->smtp * 60; + } else if (sess->protocol == OPENLI_EMAIL_TYPE_POP3) { + its.it_value.tv_sec = state->timeout_thresholds->pop3 * 60; + } else if (sess->protocol == OPENLI_EMAIL_TYPE_IMAP) { + its.it_value.tv_sec = state->timeout_thresholds->imap * 60; + } else { + its.it_value.tv_sec = 600; + } + pthread_rwlock_unlock(state->glob_config_mutex); + + its.it_value.tv_nsec = 0; + its.it_interval.tv_sec = 0; + its.it_interval.tv_nsec = 0; + + sess->timeout_ev = (void *)timerev; + timerev->fdtype = 0; + timerev->fd = timerfd_create(CLOCK_MONOTONIC, 0); + timerfd_settime(timerev->fd, 0, &its, NULL); + + timerev->ptr = sess; + HASH_ADD_KEYPTR(hh, state->timeouts, &(timerev->fd), sizeof(int), timerev); + +} + +void free_captured_email(openli_email_captured_t *cap) { + + if (cap == NULL) { + return; + } + + if (cap->session_id) { + free(cap->session_id); + } + + if (cap->target_id) { + free(cap->target_id); + } + + if (cap->remote_ip) { + free(cap->remote_ip); + } + + if (cap->remote_port) { + free(cap->remote_port); + } + + if (cap->host_ip) { + free(cap->host_ip); + } + + if (cap->host_port) { + free(cap->host_port); + } + + if (cap->datasource) { + free(cap->datasource); + } + + if (cap->content && cap->own_content) { + free(cap->content); + } + + free(cap); +} + +static void start_email_intercept(openli_email_worker_t *state, + emailintercept_t *em, int addtargets) { + + openli_export_recv_t *expmsg; + email_target_t *tgt, *tmp; + + if (state->tracker_threads <= 1) { + em->common.seqtrackerid = 0; + } else { + em->common.seqtrackerid = hash_liid(em->common.liid) % state->tracker_threads; + } + + HASH_ADD_KEYPTR(hh_liid, state->allintercepts, em->common.liid, + em->common.liid_len, em); + + if (addtargets) { + HASH_ITER(hh, em->targets, tgt, tmp) { + if (add_intercept_to_email_user_intercept_list( + &(state->alltargets), em, tgt) < 0) { + logger(LOG_INFO, "OpenLI: error while adding all email targets for intercept %s", em->common.liid); + break; + } + } + } + + if (state->emailid == 0) { + expmsg = (openli_export_recv_t *)calloc(1, sizeof(openli_export_recv_t)); + expmsg->type = OPENLI_EXPORT_INTERCEPT_DETAILS; + expmsg->data.cept.liid = strdup(em->common.liid); + expmsg->data.cept.authcc = strdup(em->common.authcc); + expmsg->data.cept.delivcc = strdup(em->common.delivcc); + expmsg->data.cept.seqtrackerid = em->common.seqtrackerid; + + publish_openli_msg(state->zmq_pubsocks[em->common.seqtrackerid], + expmsg); + } + em->awaitingconfirm = 0; +} + +static void update_email_intercept(openli_email_worker_t *state, + emailintercept_t *found, emailintercept_t *latest) { + + if (found->common.authcc) { + free(found->common.authcc); + } + found->common.authcc = latest->common.authcc; + found->common.authcc_len = latest->common.authcc_len; + latest->common.authcc = NULL; + + if (found->common.delivcc) { + free(found->common.delivcc); + } + found->common.delivcc = latest->common.delivcc; + found->common.delivcc_len = latest->common.delivcc_len; + latest->common.delivcc = NULL; + + found->common.tostart_time = latest->common.tostart_time; + found->common.toend_time = latest->common.toend_time; + found->common.tomediate = latest->common.tomediate; + + /* XXX targetagency and destid shouldn't matter, unless we actually + * use them in this thread. + * + * I think they're only relevant in the forwarding thread though */ + +} + +static void remove_email_intercept(openli_email_worker_t *state, + emailintercept_t *em, int removetargets) { + + openli_export_recv_t *expmsg; + int i; + email_target_t *tgt, *tmp; + + /* Either this intercept has been explicitly withdrawn, in which case + * we need to also purge any target addresses for it, OR the + * intercept has been reannounced so we're going to "update" it. For an + * update, we want to keep all existing targets active, but be prepared + * to drop any that are not subsequently confirmed by the provisioner. + */ + HASH_ITER(hh, em->targets, tgt, tmp) { + if (removetargets) { + if (remove_intercept_from_email_user_intercept_list( + &(state->alltargets), em, tgt) < 0) { + logger(LOG_INFO, "OpenLI: error while removing all email targets for intercept %s", em->common.liid); + break; + } + } else { + /* Flag this target as needing confirmation */ + tgt->awaitingconfirm = 1; + } + } + + HASH_DELETE(hh_liid, state->allintercepts, em); + + if (state->emailid == 0 && removetargets != 0) { + expmsg = (openli_export_recv_t *)calloc(1, + sizeof(openli_export_recv_t)); + expmsg->type = OPENLI_EXPORT_INTERCEPT_OVER; + expmsg->data.cept.liid = strdup(em->common.liid); + expmsg->data.cept.authcc = strdup(em->common.authcc); + expmsg->data.cept.delivcc = strdup(em->common.delivcc); + expmsg->data.cept.seqtrackerid = em->common.seqtrackerid; + + publish_openli_msg(state->zmq_pubsocks[em->common.seqtrackerid], + expmsg); + + for (i = 0; i < state->fwd_threads; i++) { + + expmsg = (openli_export_recv_t *)calloc(1, + sizeof(openli_export_recv_t)); + expmsg->type = OPENLI_EXPORT_INTERCEPT_OVER; + expmsg->data.cept.liid = strdup(em->common.liid); + expmsg->data.cept.authcc = strdup(em->common.authcc); + expmsg->data.cept.delivcc = strdup(em->common.delivcc); + expmsg->data.cept.seqtrackerid = em->common.seqtrackerid; + + publish_openli_msg(state->zmq_fwdsocks[i], expmsg); + } + + pthread_mutex_lock(state->stats_mutex); + state->stats->emailintercepts_ended_diff ++; + state->stats->emailintercepts_ended_total ++; + pthread_mutex_unlock(state->stats_mutex); + + logger(LOG_INFO, + "OpenLI: removed email intercept %s from email worker threads", + em->common.liid); + } + + free_single_emailintercept(em); + +} + +static int add_new_email_intercept(openli_email_worker_t *state, + provisioner_msg_t *msg) { + + emailintercept_t *em, *found; + int ret = 0; + + em = calloc(1, sizeof(emailintercept_t)); + + if (decode_emailintercept_start(msg->msgbody, msg->msglen, em) < 0) { + logger(LOG_INFO, "OpenLI: email worker failed to decode email intercept start message from provisioner"); + return -1; + } + + HASH_FIND(hh_liid, state->allintercepts, em->common.liid, + em->common.liid_len, found); + + if (found) { + email_target_t *tgt, *tmp; + /* Don't halt any target intercepts just yet -- hopefully a target + * update is going to follow this... + */ + HASH_ITER(hh, found->targets, tgt, tmp) { + tgt->awaitingconfirm = 1; + } + + update_email_intercept(state, found, em); + found->awaitingconfirm = 0; + free_single_emailintercept(em); + ret = 1; + } else { + start_email_intercept(state, em, 0); + if (state->emailid == 0) { + pthread_mutex_lock(state->stats_mutex); + state->stats->emailintercepts_added_diff ++; + state->stats->emailintercepts_added_total ++; + pthread_mutex_unlock(state->stats_mutex); + + logger(LOG_INFO, "OpenLI: added new email intercept for %s to email worker threads", em->common.liid); + } + } + + + return ret; +} + +static int modify_email_intercept(openli_email_worker_t *state, + provisioner_msg_t *provmsg) { + + emailintercept_t *decode, *found; + openli_export_recv_t *expmsg; + + decode = calloc(1, sizeof(emailintercept_t)); + if (decode_emailintercept_modify(provmsg->msgbody, provmsg->msglen, + decode) < 0) { + logger(LOG_INFO, "OpenLI: received invalid email intercept modification from provisioner"); + return -1; + } + + HASH_FIND(hh_liid, state->allintercepts, decode->common.liid, + decode->common.liid_len, found); + if (!found) { + start_email_intercept(state, decode, 0); + return 0; + } + + if (decode->common.tostart_time != found->common.tostart_time || + decode->common.toend_time != found->common.toend_time) { + logger(LOG_INFO, + "OpenLI: Email intercept %s has changed start / end times -- now %lu, %lu", + found->common.liid, decode->common.tostart_time, + decode->common.toend_time); + found->common.tostart_time = decode->common.tostart_time; + found->common.toend_time = decode->common.toend_time; + } + + if (decode->common.tomediate != found->common.tomediate) { + char space[1024]; + intercept_mediation_mode_as_string(decode->common.tomediate, space, + 1024); + logger(LOG_INFO, + "OpenLI: Email intercept %s has changed mediation mode to: %s", + decode->common.liid, space); + found->common.tomediate = decode->common.tomediate; + } + + if (strcmp(decode->common.delivcc, found->common.delivcc) != 0 || + strcmp(decode->common.authcc, found->common.authcc) != 0) { + char *tmp; + tmp = decode->common.authcc; + decode->common.authcc = found->common.authcc; + found->common.authcc = tmp; + tmp = decode->common.delivcc; + decode->common.delivcc = found->common.delivcc; + found->common.delivcc = tmp; + + expmsg = (openli_export_recv_t *)calloc(1, sizeof(openli_export_recv_t)); + expmsg->type = OPENLI_EXPORT_INTERCEPT_DETAILS; + expmsg->data.cept.liid = strdup(found->common.liid); + expmsg->data.cept.authcc = strdup(found->common.authcc); + expmsg->data.cept.delivcc = strdup(found->common.delivcc); + expmsg->data.cept.seqtrackerid = found->common.seqtrackerid; + + publish_openli_msg(state->zmq_pubsocks[found->common.seqtrackerid], + expmsg); + } + + free_single_emailintercept(decode); + return 0; +} + +static int halt_email_intercept(openli_email_worker_t *state, + provisioner_msg_t *provmsg) { + + emailintercept_t *decode, *found; + + decode = calloc(1, sizeof(emailintercept_t)); + if (decode_emailintercept_halt(provmsg->msgbody, provmsg->msglen, + decode) < 0) { + logger(LOG_INFO, "OpenLI: received invalid email intercept withdrawal from provisioner"); + return -1; + } + + HASH_FIND(hh_liid, state->allintercepts, decode->common.liid, + decode->common.liid_len, found); + if (!found && state->emailid == 0) { + logger(LOG_INFO, "OpenLI: tried to halt email intercept %s but this was not in the intercept map?", decode->common.liid); + free_single_emailintercept(decode); + return -1; + } + + remove_email_intercept(state, found, 1); + free_single_emailintercept(decode); + return 0; +} + +static int process_email_target_withdraw(openli_email_worker_t *state, + email_target_t *tgt, char *liid) { + + emailintercept_t *found; + email_target_t *tgtfound; + + HASH_FIND(hh_liid, state->allintercepts, liid, strlen(liid), found); + if (!found) { + logger(LOG_INFO, "OpenLI: received email target withdrawal for intercept %s, but this intercept is not active according to email worker thread %d", + liid, state->emailid); + return -1; + } + + if (remove_intercept_from_email_user_intercept_list(&(state->alltargets), + found, tgt) < 0) { + logger(LOG_INFO, "OpenLI: email worker thread %d failed to remove email target %s for intercept %s", state->emailid, tgt->address, liid); + return -1; + } + + HASH_FIND(hh, found->targets, tgt->address, strlen(tgt->address), tgtfound); + if (tgtfound) { + HASH_DELETE(hh, found->targets, tgtfound); + if (tgtfound->address) { + free(tgtfound->address); + } + free(tgtfound); + } + + return 0; +} + +static int remove_email_target(openli_email_worker_t *state, + provisioner_msg_t *provmsg) { + + email_target_t *tgt; + char liid[256]; + int ret; + + tgt = calloc(1, sizeof(email_target_t)); + + if (decode_email_target_withdraw(provmsg->msgbody, provmsg->msglen, + tgt, liid, 256) < 0) { + logger(LOG_INFO, "OpenLI: email worker %d received invalid email target withdrawal from provisioner", state->emailid); + return -1; + } + + ret = process_email_target_withdraw(state, tgt, liid); + + if (tgt->address) { + free(tgt->address); + } + free(tgt); + return ret; +} + +static int add_email_target(openli_email_worker_t *state, + provisioner_msg_t *provmsg) { + + email_target_t *tgt, *tgtfound; + emailintercept_t *found; + char liid[256]; + + tgt = calloc(1, sizeof(email_target_t)); + if (decode_email_target_announcement(provmsg->msgbody, provmsg->msglen, + tgt, liid, 256) < 0) { + logger(LOG_INFO, "OpenLI: email worker %d received invalid email target announcement from provisioner", state->emailid); + return -1; + } + + HASH_FIND(hh_liid, state->allintercepts, liid, strlen(liid), found); + if (!found) { + logger(LOG_INFO, "OpenLI: received email target announcement for intercept %s, but this intercept is not active according to email worker thread %d", + liid, state->emailid); + return -1; + } + + if (add_intercept_to_email_user_intercept_list(&(state->alltargets), + found, tgt) < 0) { + logger(LOG_INFO, "OpenLI: email worker thread %d failed to add email target %s for intercept %s", state->emailid, tgt->address, liid); + return -1; + } + + HASH_FIND(hh, found->targets, tgt->address, strlen(tgt->address), tgtfound); + if (!tgtfound) { + tgt->awaitingconfirm = 0; + HASH_ADD_KEYPTR(hh, found->targets, tgt->address, strlen(tgt->address), + tgt); + } else { + tgtfound->awaitingconfirm = 0; + if (tgt->address) { + free(tgt->address); + } + free(tgt); + } + return 0; +} + +static void flag_all_email_intercepts(openli_email_worker_t *state) { + emailintercept_t *em, *tmp; + email_target_t *tgt, *tmp2; + + HASH_ITER(hh_liid, state->allintercepts, em, tmp) { + em->awaitingconfirm = 1; + HASH_ITER(hh, em->targets, tgt, tmp2) { + tgt->awaitingconfirm = 1; + } + } +} + +static void disable_unconfirmed_email_intercepts(openli_email_worker_t *state) +{ + emailintercept_t *em, *tmp; + email_target_t *tgt, *tmp2; + + HASH_ITER(hh_liid, state->allintercepts, em, tmp) { + if (em->awaitingconfirm) { + remove_email_intercept(state, em, 1); + } else { + HASH_ITER(hh, em->targets, tgt, tmp2) { + if (tgt->awaitingconfirm) { + process_email_target_withdraw(state, tgt, em->common.liid); + } + } + } + } +} + +static int handle_provisioner_message(openli_email_worker_t *state, + openli_export_recv_t *msg) { + + int ret = 0; + + switch(msg->data.provmsg.msgtype) { + case OPENLI_PROTO_START_EMAILINTERCEPT: + ret = add_new_email_intercept(state, &(msg->data.provmsg)); + break; + case OPENLI_PROTO_HALT_EMAILINTERCEPT: + ret = halt_email_intercept(state, &(msg->data.provmsg)); + break; + case OPENLI_PROTO_MODIFY_EMAILINTERCEPT: + ret = modify_email_intercept(state, &(msg->data.provmsg)); + break; + case OPENLI_PROTO_ANNOUNCE_EMAIL_TARGET: + ret = add_email_target(state, &(msg->data.provmsg)); + break; + case OPENLI_PROTO_WITHDRAW_EMAIL_TARGET: + ret = remove_email_target(state, &(msg->data.provmsg)); + break; + case OPENLI_PROTO_NOMORE_INTERCEPTS: + disable_unconfirmed_email_intercepts(state); + break; + case OPENLI_PROTO_DISCONNECT: + flag_all_email_intercepts(state); + break; + default: + logger(LOG_INFO, "OpenLI: email worker thread %d received unexpected message type from provisioner: %u", + state->emailid, msg->data.provmsg.msgtype); + ret = -1; + } + + + if (msg->data.provmsg.msgbody) { + free(msg->data.provmsg.msgbody); + } + + return ret; +} + +static int process_sync_thread_message(openli_email_worker_t *state) { + + openli_export_recv_t *msg; + int x; + + do { + x = zmq_recv(state->zmq_ii_sock, &msg, sizeof(msg), + ZMQ_DONTWAIT); + if (x < 0 && errno != EAGAIN) { + logger(LOG_INFO, + "OpenLI: error while receiving II in email thread %d: %s", + state->emailid, strerror(errno)); + return -1; + } + + if (x <= 0) { + break; + } + + if (msg->type == OPENLI_EXPORT_HALT) { + free(msg); + return -1; + } + + if (msg->type == OPENLI_EXPORT_PROVISIONER_MESSAGE) { + handle_provisioner_message(state, msg); + } + + /* TODO handle other message types */ + + free(msg); + } while (x > 0); + + return 1; +} + +static int find_and_update_active_session(openli_email_worker_t *state, + openli_email_captured_t *cap) { + + char sesskey[256]; + emailsession_t *sess; + int r = 0; + + snprintf(sesskey, 256, "%s-%s", email_type_to_string(cap->type), + cap->session_id); + + HASH_FIND(hh, state->activesessions, sesskey, strlen(sesskey), sess); + if (!sess) { + sess = calloc(1, sizeof(emailsession_t)); + init_email_session(sess, cap, sesskey, state); + HASH_ADD_KEYPTR(hh, state->activesessions, sess->key, + strlen(sess->key), sess); + + } + + update_email_session_timeout(state, sess); + + if (sess->protocol == OPENLI_EMAIL_TYPE_SMTP) { + r = update_smtp_session_by_ingestion(state, sess, cap); + } else if (sess->protocol == OPENLI_EMAIL_TYPE_IMAP) { + r = update_imap_session_by_ingestion(state, sess, cap); + } else if (sess->protocol == OPENLI_EMAIL_TYPE_POP3) { + r = update_pop3_session_by_ingestion(state, sess, cap); + } + + if (r < 0) { + logger(LOG_INFO, + "OpenLI: error updating %s session '%s' -- removing session...", + email_type_to_string(cap->type), sess->key); + + HASH_DELETE(hh, state->activesessions, sess); + free_email_session(state, sess); + } else if (r == 1) { + HASH_DELETE(hh, state->activesessions, sess); + free_email_session(state, sess); + } + + free_captured_email(cap); + return r; +} + +static int process_received_packet(openli_email_worker_t *state) { + openli_state_update_t recvd; + int rc; + openli_email_captured_t *cap = NULL; + + do { + rc = zmq_recv(state->zmq_colthread_recvsock, &recvd, sizeof(recvd), + ZMQ_DONTWAIT); + if (rc < 0) { + if (errno == EAGAIN) { + return 0; + } + logger(LOG_INFO, + "OpenLI: error while receiving email packet in email thread %d: %s", state->emailid, strerror(errno)); + return -1; + } + + cap = convert_packet_to_email_captured(recvd.data.pkt, recvd.type); + + if (cap == NULL) { + logger(LOG_INFO, "OpenLI: unable to derive email session ID from received packet in email thread %d", state->emailid); + return -1; + } + if (cap->content != NULL) { + find_and_update_active_session(state, cap); + } else { + free_captured_email(cap); + } + + trace_destroy_packet(recvd.data.pkt); + } while (rc > 0); + + return 0; +} + +static int process_ingested_capture(openli_email_worker_t *state) { + openli_email_captured_t *cap = NULL; + int x; + + do { + x = zmq_recv(state->zmq_ingest_recvsock, &cap, sizeof(cap), + ZMQ_DONTWAIT); + + if (x < 0 && errno != EAGAIN) { + logger(LOG_INFO, + "OpenLI: error while receiving ingested email contents in email thread %d: %s", + state->emailid, strerror(errno)); + return -1; + } + + if (x <= 0) { + break; + } + + if (cap == NULL) { + break; + } + find_and_update_active_session(state, cap); + + } while (x > 0); + + return 1; +} + +static void email_worker_main(openli_email_worker_t *state) { + + emailsession_t **expired = NULL; + int x; + int topoll_req; + sync_epoll_t *ev, *tmp; + + logger(LOG_INFO, "OpenLI: starting email processing thread %d", + state->emailid); + + /* TODO add other consumer sockets to topoll */ + + while (1) { + topoll_req = 3 + HASH_CNT(hh, state->timeouts); + + if (topoll_req > state->topoll_size) { + if (state->topoll) { + free(state->topoll); + } + if (expired) { + free(expired); + } + state->topoll = calloc(topoll_req, sizeof(zmq_pollitem_t)); + state->topoll_size = topoll_req; + expired = calloc(topoll_req, sizeof(emailsession_t *)); + } + + state->topoll[0].socket = state->zmq_ii_sock; + state->topoll[0].events = ZMQ_POLLIN; + + state->topoll[1].socket = state->zmq_ingest_recvsock; + state->topoll[1].events = ZMQ_POLLIN; + + state->topoll[2].socket = state->zmq_colthread_recvsock; + state->topoll[2].events = ZMQ_POLLIN; + + x = 3; + HASH_ITER(hh, state->timeouts, ev, tmp) { + state->topoll[x].socket = NULL; + state->topoll[x].fd = ev->fd; + state->topoll[x].events = ZMQ_POLLIN; + expired[x] = (emailsession_t *)(ev->ptr); + x++; + } + + if ((x = zmq_poll(state->topoll, topoll_req, 50)) < 0) { + if (errno == EINTR) { + continue; + } + logger(LOG_INFO, "OpenLI: error while polling in email processor %d: %s", state->emailid, strerror(errno)); + return; + } + + if (x == 0) { + continue; + } + + if (state->topoll[0].revents & ZMQ_POLLIN) { + /* message from the sync thread */ + x = process_sync_thread_message(state); + if (x < 0) { + break; + } + state->topoll[0].revents = 0; + } + + if (state->topoll[1].revents & ZMQ_POLLIN) { + /* message from the email ingesting thread */ + x = process_ingested_capture(state); + if (x < 0) { + break; + } + state->topoll[1].revents = 0; + } + + if (state->topoll[2].revents & ZMQ_POLLIN) { + /* message from the email ingesting thread */ + x = process_received_packet(state); + if (x < 0) { + break; + } + state->topoll[2].revents = 0; + } + + for (x = 3; x < topoll_req; x++) { + emailsession_t *sessfound; + + if (state->topoll[x].revents & ZMQ_POLLIN) { + HASH_FIND(hh, state->activesessions, expired[x]->key, + strlen(expired[x]->key), sessfound); + if (sessfound) { + HASH_DELETE(hh, state->activesessions, sessfound); + } + free_email_session(state, expired[x]); + } + } + } + if (expired) { + free(expired); + } +} + +static inline void clear_zmqsocks(void **zmq_socks, int sockcount) { + int i, zero = 0; + if (zmq_socks == NULL) { + return; + } + + for (i = 0; i < sockcount; i++) { + if (zmq_socks[i] == NULL) { + continue; + } + zmq_setsockopt(zmq_socks[i], ZMQ_LINGER, &zero, sizeof(zero)); + zmq_close(zmq_socks[i]); + } + free(zmq_socks); +} + +static inline int init_zmqsocks(void **zmq_socks, int sockcount, + const char *basename, void *zmq_ctxt) { + + int i; + char sockname[256]; + int ret = 0; + + for (i = 0; i < sockcount; i++) { + zmq_socks[i] = zmq_socket(zmq_ctxt, ZMQ_PUSH); + snprintf(sockname, 256, "%s-%d", basename, i); + if (zmq_connect(zmq_socks[i], sockname) < 0) { + ret = -1; + logger(LOG_INFO, + "OpenLI: email worker failed to bind to publishing zmq %s: %s", + sockname, strerror(errno)); + + zmq_close(zmq_socks[i]); + zmq_socks[i] = NULL; + } + } + return ret; +} + +static void free_all_email_sessions(openli_email_worker_t *state) { + + emailsession_t *sess, *tmp; + + HASH_ITER(hh, state->activesessions, sess, tmp) { + HASH_DELETE(hh, state->activesessions, sess); + free_email_session(state, sess); + } + +} + +void *start_email_worker_thread(void *arg) { + + openli_email_worker_t *state = (openli_email_worker_t *)arg; + int x, zero = 0; + char sockname[256]; + sync_epoll_t *syncev, *tmp; + openli_state_update_t recvd; + + state->zmq_pubsocks = calloc(state->tracker_threads, sizeof(void *)); + state->zmq_fwdsocks = calloc(state->fwd_threads, sizeof(void *)); + + init_zmqsocks(state->zmq_pubsocks, state->tracker_threads, + "inproc://openlipub", state->zmq_ctxt); + + init_zmqsocks(state->zmq_fwdsocks, state->fwd_threads, + "inproc://openliforwardercontrol_sync", state->zmq_ctxt); + + state->zmq_ii_sock = zmq_socket(state->zmq_ctxt, ZMQ_PULL); + snprintf(sockname, 256, "inproc://openliemailcontrol_sync-%d", + state->emailid); + if (zmq_bind(state->zmq_ii_sock, sockname) < 0) { + logger(LOG_INFO, "OpenLI: email processing thread %d failed to bind to II zmq: %s", state->emailid, strerror(errno)); + goto haltemailworker; + } + + if (zmq_setsockopt(state->zmq_ii_sock, ZMQ_LINGER, &zero, sizeof(zero)) + != 0) { + logger(LOG_INFO, "OpenLI: email processing thread %d failed to configure II zmq: %s", state->emailid, strerror(errno)); + goto haltemailworker; + } + + state->zmq_ingest_recvsock = zmq_socket(state->zmq_ctxt, ZMQ_PULL); + snprintf(sockname, 256, "inproc://openliemailworker-ingest%d", + state->emailid); + + if (zmq_bind(state->zmq_ingest_recvsock, sockname) < 0) { + logger(LOG_INFO, "OpenLI: email processing thread %d failed to bind to ingesting zmq: %s", state->emailid, strerror(errno)); + goto haltemailworker; + } + + if (zmq_setsockopt(state->zmq_ingest_recvsock, ZMQ_LINGER, &zero, + sizeof(zero)) != 0) { + logger(LOG_INFO, "OpenLI: email processing thread %d failed to configure ingesting zmq: %s", state->emailid, strerror(errno)); + goto haltemailworker; + } + + state->zmq_colthread_recvsock = zmq_socket(state->zmq_ctxt, ZMQ_PULL); + snprintf(sockname, 256, "inproc://openliemailworker-colrecv%d", + state->emailid); + + if (zmq_bind(state->zmq_colthread_recvsock, sockname) < 0) { + logger(LOG_INFO, "OpenLI: email processing thread %d failed to bind to colthread zmq: %s", state->emailid, strerror(errno)); + goto haltemailworker; + } + + if (zmq_setsockopt(state->zmq_colthread_recvsock, ZMQ_LINGER, &zero, + sizeof(zero)) != 0) { + logger(LOG_INFO, "OpenLI: email processing thread %d failed to configure colthread zmq: %s", state->emailid, strerror(errno)); + goto haltemailworker; + } + + email_worker_main(state); + + do { + /* drain remaining email captures and free them */ + x = zmq_recv(state->zmq_colthread_recvsock, &recvd, sizeof(recvd), + ZMQ_DONTWAIT); + if (x > 0) { + trace_destroy_packet(recvd.data.pkt); + } + } while (x > 0); + +haltemailworker: + logger(LOG_INFO, "OpenLI: halting email processing thread %d", + state->emailid); + /* free all state for intercepts and active sessions */ + clear_email_user_intercept_list(state->alltargets); + free_all_emailintercepts(&(state->allintercepts)); + free_all_email_sessions(state); + + /* close all ZMQs */ + zmq_close(state->zmq_ii_sock); + + if (state->topoll) { + free(state->topoll); + } + + zmq_close(state->zmq_ingest_recvsock); + zmq_close(state->zmq_colthread_recvsock); + + clear_zmqsocks(state->zmq_pubsocks, state->tracker_threads); + clear_zmqsocks(state->zmq_fwdsocks, state->fwd_threads); + + /* All timeouts should be freed when we release the active sessions, + * but just in case there are any left floating around... + */ + HASH_ITER(hh, state->timeouts, syncev, tmp) { + HASH_DELETE(hh, state->timeouts, syncev); + free(syncev); + } + + pthread_exit(NULL); +} + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : + diff --git a/src/collector/email_worker.h b/src/collector/email_worker.h new file mode 100644 index 00000000..7e75a939 --- /dev/null +++ b/src/collector/email_worker.h @@ -0,0 +1,202 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + + +#ifndef OPENLI_EMAIL_WORKER_H_ +#define OPENLI_EMAIL_WORKER_H_ + +#include +#include + +#include "intercept.h" +#include "collector_base.h" + +typedef enum { + OPENLI_EMAIL_TYPE_UNKNOWN = 0, + OPENLI_EMAIL_TYPE_SMTP = 1, + OPENLI_EMAIL_TYPE_POP3 = 2, + OPENLI_EMAIL_TYPE_IMAP = 3, +} openli_email_type_t; + +enum { + OPENLI_EMAIL_DIRECTION_UNKNOWN, + OPENLI_EMAIL_DIRECTION_OUTBOUND, + OPENLI_EMAIL_DIRECTION_INBOUND +}; + +typedef enum { + OPENLI_IMAP_STATE_INIT = 0, + OPENLI_IMAP_STATE_SESSION_OVER, + OPENLI_IMAP_STATE_SERVER_READY, + OPENLI_IMAP_STATE_PRE_AUTH, + OPENLI_IMAP_STATE_AUTH_STARTED, + OPENLI_IMAP_STATE_AUTHENTICATING, + OPENLI_IMAP_STATE_AUTH_REPLY, + OPENLI_IMAP_STATE_AUTHENTICATED, + OPENLI_IMAP_STATE_APPENDING, + OPENLI_IMAP_STATE_IDLING, + OPENLI_IMAP_STATE_LOGOUT, + OPENLI_IMAP_STATE_IGNORING, +} openli_imap_status_t; + +typedef enum { + OPENLI_SMTP_STATE_INIT = 0, + OPENLI_SMTP_STATE_EHLO, + OPENLI_SMTP_STATE_EHLO_RESPONSE, + OPENLI_SMTP_STATE_EHLO_OVER, + OPENLI_SMTP_STATE_MAIL_FROM, + OPENLI_SMTP_STATE_MAIL_FROM_REPLY, + OPENLI_SMTP_STATE_MAIL_FROM_OVER, + OPENLI_SMTP_STATE_RCPT_TO, + OPENLI_SMTP_STATE_RCPT_TO_REPLY, + OPENLI_SMTP_STATE_RCPT_TO_OVER, + OPENLI_SMTP_STATE_DATA_INIT_REPLY, + OPENLI_SMTP_STATE_DATA_CONTENT, + OPENLI_SMTP_STATE_DATA_FINAL_REPLY, + OPENLI_SMTP_STATE_DATA_OVER, + OPENLI_SMTP_STATE_RESET, + OPENLI_SMTP_STATE_QUIT, + OPENLI_SMTP_STATE_QUIT_REPLY +} openli_smtp_status_t; + +typedef struct openli_email_timeouts { + uint16_t smtp; + uint16_t imap; + uint16_t pop3; +} openli_email_timeouts_t; + +typedef struct openli_email_captured { + + openli_email_type_t type; + char *session_id; + char *target_id; + char *remote_ip; + char *remote_port; + char *host_ip; + char *host_port; + char *datasource; + uint8_t direction; + + uint64_t timestamp; + uint32_t mail_id; + uint32_t msg_length; + char *content; + uint8_t own_content; + +} openli_email_captured_t; + +typedef struct openli_email_worker { + + void *zmq_ctxt; + zmq_pollitem_t *topoll; + int topoll_size; + pthread_t threadid; + int emailid; + int tracker_threads; + int fwd_threads; + + void *zmq_ii_sock; /* ZMQ for receiving instructions from sync thread */ + void **zmq_pubsocks; /* ZMQs for publishing to seqtracker threads */ + void **zmq_fwdsocks; /* ZMQs for publishing to forwarding threads */ + void *zmq_ingest_recvsock; /* ZMQ for receiving from the ingestor */ + void *zmq_colthread_recvsock; /* ZMQ for receiving from collector threads */ + + sync_epoll_t *timeouts; + + emailintercept_t *allintercepts; + email_user_intercept_list_t *alltargets; + + emailsession_t *activesessions; + + pthread_mutex_t *stats_mutex; + collector_stats_t *stats; + + openli_email_timeouts_t *timeout_thresholds; + uint8_t *mask_imap_creds; + uint8_t *mask_pop3_creds; + pthread_rwlock_t *glob_config_mutex; + +} openli_email_worker_t; + +void *start_email_worker_thread(void *arg); +void free_captured_email(openli_email_captured_t *cap); + +void free_smtp_session_state(emailsession_t *sess, void *smtpstate); +int update_smtp_session_by_ingestion(openli_email_worker_t *state, + emailsession_t *sess, openli_email_captured_t *cap); +void free_imap_session_state(emailsession_t *sess, void *smtpstate); +int update_imap_session_by_ingestion(openli_email_worker_t *state, + emailsession_t *sess, openli_email_captured_t *cap); +void free_pop3_session_state(emailsession_t *sess, void *smtpstate); +int update_pop3_session_by_ingestion(openli_email_worker_t *state, + emailsession_t *sess, openli_email_captured_t *cap); + +void add_email_participant(emailsession_t *sess, char *address, int issender); +void clear_email_participant_list(emailsession_t *sess); +void clear_email_sender(emailsession_t *sess); + +int extract_email_sender_from_body(openli_email_worker_t *state, + emailsession_t *sess, char *bodycontent, char **extracted); + +void replace_email_session_serveraddr(emailsession_t *sess, + char *server_ip, char *server_port); +void replace_email_session_clientaddr(emailsession_t *sess, + char *client_ip, char *client_port); + +/* Defined in emailiri.c */ +int generate_email_partial_download_success_iri(openli_email_worker_t *state, + emailsession_t *sess); +int generate_email_partial_download_failure_iri(openli_email_worker_t *state, + emailsession_t *sess); +int generate_email_download_success_iri(openli_email_worker_t *state, + emailsession_t *sess); +int generate_email_download_failure_iri(openli_email_worker_t *state, + emailsession_t *sess); +int generate_email_login_success_iri(openli_email_worker_t *state, + emailsession_t *sess); +int generate_email_login_failure_iri(openli_email_worker_t *state, + emailsession_t *sess); +int generate_email_upload_success_iri(openli_email_worker_t *state, + emailsession_t *sess); +int generate_email_upload_failure_iri(openli_email_worker_t *state, + emailsession_t *sess); +int generate_email_send_iri(openli_email_worker_t *state, + emailsession_t *sess); +int generate_email_logoff_iri(openli_email_worker_t *state, + emailsession_t *sess); + +/* Defined in emailcc.c */ +int generate_email_cc_from_smtp_payload(openli_email_worker_t *state, + emailsession_t *sess, uint8_t *content, int content_len, + uint64_t timestamp); +int generate_email_cc_from_imap_payload(openli_email_worker_t *state, + emailsession_t *sess, uint8_t *content, int content_len, + uint64_t timestamp, uint8_t dir); +int generate_email_cc_from_pop3_payload(openli_email_worker_t *state, + emailsession_t *sess, uint8_t *content, int content_len, + uint64_t timestamp, uint8_t dir); +#endif +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/collector/emailcc.c b/src/collector/emailcc.c new file mode 100644 index 00000000..fb68199e --- /dev/null +++ b/src/collector/emailcc.c @@ -0,0 +1,195 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#include + +#include "email_worker.h" +#include "intercept.h" +#include "util.h" +#include "logger.h" +#include "etsili_core.h" + +static inline email_user_intercept_list_t *is_address_interceptable( + openli_email_worker_t *state, char *emailaddr) { + + email_user_intercept_list_t *active = NULL; + + HASH_FIND(hh, state->alltargets, emailaddr, strlen(emailaddr), active); + return active; +} + +static openli_export_recv_t *create_emailcc_job(char *liid, + emailsession_t *sess, uint32_t destid, uint64_t timestamp, + uint8_t *content, int content_len, uint8_t format, uint8_t dir) { + + openli_export_recv_t *msg = NULL; + + msg = (openli_export_recv_t *)calloc(1, sizeof(openli_export_recv_t)); + if (msg == NULL) { + return msg; + } + msg->type = OPENLI_EXPORT_EMAILCC; + msg->destid = destid; + msg->ts.tv_sec = (time_t)(timestamp / 1000.0); + msg->ts.tv_usec = ((time_t)(timestamp % 1000)) * 1000; + + msg->data.emailcc.format = format; + msg->data.emailcc.dir = dir; + msg->data.emailcc.liid = strdup(liid); + msg->data.emailcc.cin = sess->cin; + msg->data.emailcc.cc_content_len = content_len; + + msg->data.emailcc.cc_content = (uint8_t *)malloc(content_len); + memcpy(msg->data.emailcc.cc_content, content, content_len); + + return msg; +} + +static void create_emailccs_for_intercept_list(openli_email_worker_t *state, + emailsession_t *sess, uint8_t *content, int content_len, + uint8_t format, email_user_intercept_list_t *active, + uint64_t timestamp, uint8_t dir) { + + openli_export_recv_t *ccjob = NULL; + email_intercept_ref_t *ref, *tmp; + + HASH_ITER(hh, active->intlist, ref, tmp) { + + if (ref->em->common.tomediate == OPENLI_INTERCEPT_OUTPUTS_IRIONLY) { + continue; + } + + if (timestamp < ref->em->common.tostart_time * 1000) { + continue; + } + if (ref->em->common.toend_time > 0 && + timestamp > ref->em->common.toend_time * 1000) { + continue; + } + ccjob = create_emailcc_job(ref->em->common.liid, sess, + ref->em->common.destid, timestamp, content, content_len, + format, dir); + if (ccjob == NULL) { + continue; + } + pthread_mutex_lock(state->stats_mutex); + state->stats->emailcc_created ++; + pthread_mutex_unlock(state->stats_mutex); + publish_openli_msg( + state->zmq_pubsocks[ref->em->common.seqtrackerid], ccjob); + + } +} + +int generate_email_cc_from_smtp_payload(openli_email_worker_t *state, + emailsession_t *sess, uint8_t *content, int content_len, + uint64_t timestamp) { + + email_user_intercept_list_t *active = NULL; + email_participant_t *recip, *tmp; + + if (sess->sender.emailaddr) { + active = is_address_interceptable(state, sess->sender.emailaddr); + } + + if (active) { + create_emailccs_for_intercept_list(state, sess, content, content_len, + ETSILI_EMAIL_CC_FORMAT_APP, active, timestamp, + ETSI_DIR_FROM_TARGET); + } + + HASH_ITER(hh, sess->participants, recip, tmp) { + if (sess->sender.emailaddr != NULL && + strcmp(recip->emailaddr, sess->sender.emailaddr) == 0) { + continue; + } + + active = is_address_interceptable(state, recip->emailaddr); + if (!active) { + continue; + } + + create_emailccs_for_intercept_list(state, sess, content, content_len, + ETSILI_EMAIL_CC_FORMAT_APP, active, timestamp, + ETSI_DIR_TO_TARGET); + } + + return 0; +} + +int generate_email_cc_from_pop3_payload(openli_email_worker_t *state, + emailsession_t *sess, uint8_t *content, int content_len, + uint64_t timestamp, uint8_t etsidir) { + + email_user_intercept_list_t *active = NULL; + email_participant_t *recip, *tmp; + + /* POP3 is purely a mail receiving protocol so sender should be + * irrelevant. + */ + + HASH_ITER(hh, sess->participants, recip, tmp) { + active = is_address_interceptable(state, recip->emailaddr); + if (!active) { + continue; + } + + create_emailccs_for_intercept_list(state, sess, content, content_len, + ETSILI_EMAIL_CC_FORMAT_APP, active, timestamp, + etsidir); + } + + return 0; +} + + +int generate_email_cc_from_imap_payload(openli_email_worker_t *state, + emailsession_t *sess, uint8_t *content, int content_len, + uint64_t timestamp, uint8_t etsidir) { + + email_user_intercept_list_t *active = NULL; + email_participant_t *recip, *tmp; + + /* IMAP is purely a mail receiving protocol so sender should be + * irrelevant. + */ + + HASH_ITER(hh, sess->participants, recip, tmp) { + active = is_address_interceptable(state, recip->emailaddr); + if (!active) { + continue; + } + + create_emailccs_for_intercept_list(state, sess, content, content_len, + ETSILI_EMAIL_CC_FORMAT_APP, active, timestamp, + etsidir); + } + + return 0; +} + + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/collector/emailiri.c b/src/collector/emailiri.c new file mode 100644 index 00000000..b9a2ca0c --- /dev/null +++ b/src/collector/emailiri.c @@ -0,0 +1,499 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#include + +#include "util.h" +#include "logger.h" +#include "collector_base.h" +#include "collector_publish.h" +#include "email_worker.h" +#include "netcomms.h" +#include "intercept.h" +#include "etsili_core.h" +#include "emailiri.h" + +static inline email_user_intercept_list_t *is_address_interceptable( + openli_email_worker_t *state, char *emailaddr) { + + email_user_intercept_list_t *active = NULL; + + HASH_FIND(hh, state->alltargets, emailaddr, strlen(emailaddr), active); + return active; +} + +void free_email_iri_content(etsili_email_iri_content_t *content) { + + int i; + + if (content->recipients) { + for (i = 0; i < content->recipient_count; i++) { + free(content->recipients[i]); + } + free(content->recipients); + } + + if (content->sender) { + free(content->sender); + } + if (content->clientaddr) { + free(content->clientaddr); + } + if (content->serveraddr) { + free(content->serveraddr); + } + if (content->messageid) { + free(content->messageid); + } + +} + +static openli_export_recv_t *create_emailiri_job(char *liid, + emailsession_t *sess, uint8_t iritype, uint8_t emailev, + uint8_t status, uint32_t destid, uint64_t timestamp, + const char *onlyrecipient) { + + openli_export_recv_t *msg = NULL; + etsili_email_iri_content_t *content; + int i; + email_participant_t *recip, *tmp; + + msg = (openli_export_recv_t *)calloc(1, sizeof(openli_export_recv_t)); + if (msg == NULL) { + return msg; + } + + msg->type = OPENLI_EXPORT_EMAILIRI; + msg->destid = destid; + msg->ts.tv_sec = (time_t)(timestamp / 1000.0); + msg->ts.tv_usec = ((time_t)(timestamp % 1000)) * 1000; + + content = &(msg->data.emailiri.content); + + msg->data.emailiri.customparams = NULL; + msg->data.emailiri.liid = strdup(liid); + msg->data.emailiri.cin = sess->cin; + msg->data.emailiri.iritype = iritype; + content->eventtype = emailev; + if (sess->serveraddr) { + content->serveraddr = calloc(1, sizeof(struct sockaddr_storage)); + memcpy(content->serveraddr, sess->serveraddr, + sizeof(struct sockaddr_storage)); + } else { + content->serveraddr = NULL; + } + + if (sess->clientaddr) { + content->clientaddr = calloc(1, sizeof(struct sockaddr_storage)); + memcpy(content->clientaddr, sess->clientaddr, + sizeof(struct sockaddr_storage)); + } else { + content->clientaddr = NULL; + } + content->server_octets = sess->server_octets; + content->client_octets = sess->client_octets; + content->protocol = sess->protocol; + if (sess->sender.emailaddr) { + content->sender = strdup(sess->sender.emailaddr); + } else { + content->sender = NULL; + } + + /* TODO maybe we need a config option to include ALL recipients + * regardless of whether they were intercept targets? + */ + if (onlyrecipient) { + content->recipient_count = 1; + content->recipients = calloc(1, sizeof(char *)); + content->recipients[0] = strdup(onlyrecipient); + } else { + content->recipient_count = HASH_CNT(hh, sess->participants); + content->recipients = calloc(content->recipient_count, + sizeof(char *)); + i = 0; + HASH_ITER(hh, sess->participants, recip, tmp) { + content->recipients[i] = strdup(recip->emailaddr); + i++; + } + } + + content->status = status; + content->messageid = NULL; + + return msg; + +} + +static void create_emailiris_for_intercept_list(openli_email_worker_t *state, + emailsession_t *sess, uint8_t iri_type, uint8_t email_ev, + uint8_t status, email_user_intercept_list_t *active, uint64_t ts, + const char *onlyrecipient) { + + openli_export_recv_t *irijob = NULL; + email_intercept_ref_t *ref, *tmp; + + HASH_ITER(hh, active->intlist, ref, tmp) { + if (ref->em->common.tomediate == OPENLI_INTERCEPT_OUTPUTS_CCONLY) { + continue; + } + + if (ts < ref->em->common.tostart_time * 1000) { + continue; + } + + if (ref->em->common.toend_time > 0 && + ts > ref->em->common.toend_time * 1000) { + continue; + } + + irijob = create_emailiri_job(ref->em->common.liid, sess, + iri_type, email_ev, status, ref->em->common.destid, ts, + onlyrecipient); + if (irijob == NULL) { + continue; + } + pthread_mutex_lock(state->stats_mutex); + state->stats->emailiri_created ++; + pthread_mutex_unlock(state->stats_mutex); + publish_openli_msg( + state->zmq_pubsocks[ref->em->common.seqtrackerid], irijob); + } + +} + +static inline int generate_iris_for_participants(openli_email_worker_t *state, + emailsession_t *sess, uint8_t email_ev, uint8_t iri_type, + uint8_t status, uint64_t timestamp) { + + email_user_intercept_list_t *active = NULL; + email_participant_t *recip, *tmp; + + if (sess->sender.emailaddr) { + active = is_address_interceptable(state, sess->sender.emailaddr); + } + if (active) { + create_emailiris_for_intercept_list(state, sess, iri_type, + email_ev, status, active, timestamp, NULL); + } + + HASH_ITER(hh, sess->participants, recip, tmp) { + if (sess->sender.emailaddr && strcmp(recip->emailaddr, + sess->sender.emailaddr) == 0) { + continue; + } + + active = is_address_interceptable(state, recip->emailaddr); + if (!active) { + continue; + } + + create_emailiris_for_intercept_list(state, sess, iri_type, + email_ev, status, active, timestamp, recip->emailaddr); + } + + return 0; +} + +static int generate_email_login_iri(openli_email_worker_t *state, + emailsession_t *sess, uint8_t success) { + + uint8_t email_ev; + uint8_t iri_type; + uint8_t status; + + if (success) { + email_ev = ETSILI_EMAIL_EVENT_LOGON; + iri_type = ETSILI_IRI_BEGIN; + status = ETSILI_EMAIL_STATUS_SUCCESS; + } else { + email_ev = ETSILI_EMAIL_EVENT_LOGON_FAILURE; + iri_type = ETSILI_IRI_REPORT; + status = ETSILI_EMAIL_STATUS_FAILED; + } + + return generate_iris_for_participants(state, sess, email_ev, iri_type, + status, sess->login_time); +} + +int generate_email_send_iri(openli_email_worker_t *state, + emailsession_t *sess) { + + return generate_iris_for_participants(state, sess, ETSILI_EMAIL_EVENT_SEND, + ETSILI_IRI_CONTINUE, ETSILI_EMAIL_STATUS_SUCCESS, sess->event_time); + +} + +int generate_email_partial_download_success_iri(openli_email_worker_t *state, + emailsession_t *sess) { + + return generate_iris_for_participants(state, sess, + ETSILI_EMAIL_EVENT_PARTIAL_DOWNLOAD, + ETSILI_IRI_REPORT, ETSILI_EMAIL_STATUS_SUCCESS, sess->event_time); + +} + +int generate_email_partial_download_failure_iri(openli_email_worker_t *state, + emailsession_t *sess) { + + return generate_iris_for_participants(state, sess, + ETSILI_EMAIL_EVENT_PARTIAL_DOWNLOAD, + ETSILI_IRI_REPORT, ETSILI_EMAIL_STATUS_FAILED, sess->event_time); +} + +int generate_email_upload_success_iri(openli_email_worker_t *state, + emailsession_t *sess) { + + return generate_iris_for_participants(state, sess, + ETSILI_EMAIL_EVENT_UPLOAD, + ETSILI_IRI_REPORT, ETSILI_EMAIL_STATUS_SUCCESS, sess->event_time); + +} + +int generate_email_upload_failure_iri(openli_email_worker_t *state, + emailsession_t *sess) { + + return generate_iris_for_participants(state, sess, + ETSILI_EMAIL_EVENT_UPLOAD, + ETSILI_IRI_REPORT, ETSILI_EMAIL_STATUS_FAILED, sess->event_time); +} + +int generate_email_download_success_iri(openli_email_worker_t *state, + emailsession_t *sess) { + + return generate_iris_for_participants(state, sess, + ETSILI_EMAIL_EVENT_DOWNLOAD, + ETSILI_IRI_REPORT, ETSILI_EMAIL_STATUS_SUCCESS, sess->event_time); + +} + +int generate_email_download_failure_iri(openli_email_worker_t *state, + emailsession_t *sess) { + + return generate_iris_for_participants(state, sess, + ETSILI_EMAIL_EVENT_DOWNLOAD, + ETSILI_IRI_REPORT, ETSILI_EMAIL_STATUS_FAILED, sess->event_time); +} + +int generate_email_logoff_iri(openli_email_worker_t *state, + emailsession_t *sess) { + + return generate_iris_for_participants(state, sess, + ETSILI_EMAIL_EVENT_LOGOFF, ETSILI_IRI_END, + ETSILI_EMAIL_STATUS_SUCCESS, sess->event_time); + +} + +int generate_email_login_success_iri(openli_email_worker_t *state, + emailsession_t *sess) { + return generate_email_login_iri(state, sess, 1); +} + +int generate_email_login_failure_iri(openli_email_worker_t *state, + emailsession_t *sess) { + return generate_email_login_iri(state, sess, 0); +} + +static inline void emailiri_free_recipients( + etsili_email_recipients_t *recipients) { + + int i; + for (i = 0; i < recipients->count; i++) { + free(recipients->addresses[i]); + } + free(recipients->addresses); +} + +static inline void emailiri_populate_recipients( + etsili_email_recipients_t *recipients, + uint32_t count, char **reciplist) { + + int i; + + recipients->count = count; + recipients->addresses = calloc(count, sizeof(char *)); + for (i = 0; i < count; i++) { + recipients->addresses[i] = reciplist[i]; + reciplist[i] = NULL; + } + +} + +void free_emailiri_parameters(etsili_generic_t *params) { + + etsili_email_recipients_t *recipients = NULL; + etsili_generic_t *oldp, *tmp; + + HASH_ITER(hh, params, oldp, tmp) { + HASH_DELETE(hh, params, oldp); + if (oldp->itemnum == EMAILIRI_CONTENTS_RECIPIENTS) { + recipients = (etsili_email_recipients_t *)oldp->itemptr; + emailiri_free_recipients(recipients); + } + + release_etsili_generic(oldp); + } +} + +void prepare_emailiri_parameters(etsili_generic_freelist_t *freegenerics, + openli_emailiri_job_t *job, etsili_generic_t **params_p) { + + etsili_generic_t *np, *params = *params_p; + etsili_email_recipients_t recipients; + etsili_ipaddress_t encip; + uint32_t port; + + memset(&recipients, 0, sizeof(recipients)); + + if (job->content.recipient_count > 0) { + emailiri_populate_recipients(&recipients, job->content.recipient_count, + job->content.recipients); + np = create_etsili_generic(freegenerics, EMAILIRI_CONTENTS_RECIPIENTS, + sizeof(etsili_email_recipients_t), (uint8_t *)(&recipients)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + } + + np = create_etsili_generic(freegenerics, EMAILIRI_CONTENTS_TOTAL_RECIPIENTS, + sizeof(job->content.recipient_count), + (uint8_t *)&(job->content.recipient_count)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + np = create_etsili_generic(freegenerics, EMAILIRI_CONTENTS_EVENT_TYPE, + sizeof(job->content.eventtype), + (uint8_t *)&(job->content.eventtype)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + np = create_etsili_generic(freegenerics, EMAILIRI_CONTENTS_PROTOCOL_ID, + sizeof(job->content.protocol), + (uint8_t *)&(job->content.protocol)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + np = create_etsili_generic(freegenerics, EMAILIRI_CONTENTS_STATUS, + sizeof(job->content.status), + (uint8_t *)&(job->content.status)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + if (job->content.sender) { + np = create_etsili_generic(freegenerics, EMAILIRI_CONTENTS_SENDER, + strlen(job->content.sender), (uint8_t *)(job->content.sender)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + } + + if (job->content.messageid) { + np = create_etsili_generic(freegenerics, EMAILIRI_CONTENTS_MESSAGE_ID, + strlen(job->content.messageid), + (uint8_t *)(job->content.messageid)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + } + + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_SERVER_OCTETS_SENT, + sizeof(job->content.server_octets), + (uint8_t *)&(job->content.server_octets)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_CLIENT_OCTETS_SENT, + sizeof(job->content.client_octets), + (uint8_t *)&(job->content.client_octets)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + if (job->content.serveraddr && + job->content.serveraddr->ss_family == AF_INET) { + struct sockaddr_in *in = (struct sockaddr_in *) + (job->content.serveraddr); + port = ntohs(in->sin_port); + + etsili_create_ipaddress_v4( + (uint32_t *)(&(in->sin_addr.s_addr)), 32, + ETSILI_IPADDRESS_ASSIGNED_UNKNOWN, &encip); + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_SERVER_ADDRESS, + sizeof(etsili_ipaddress_t), (uint8_t *)(&encip)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_SERVER_PORT, sizeof(port), (uint8_t *)&port); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + } else if (job->content.serveraddr && + job->content.serveraddr->ss_family == AF_INET6) { + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) + (job->content.serveraddr); + port = ntohs(in6->sin6_port); + + etsili_create_ipaddress_v6( + (uint8_t *)(&(in6->sin6_addr.s6_addr)), 128, + ETSILI_IPADDRESS_ASSIGNED_UNKNOWN, &encip); + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_SERVER_ADDRESS, + sizeof(etsili_ipaddress_t), (uint8_t *)(&encip)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_SERVER_PORT, sizeof(port), (uint8_t *)&port); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + } + + if (job->content.clientaddr && + job->content.clientaddr->ss_family == AF_INET) { + struct sockaddr_in *in = (struct sockaddr_in *) + (job->content.clientaddr); + port = ntohs(in->sin_port); + + etsili_create_ipaddress_v4( + (uint32_t *)(&(in->sin_addr.s_addr)), 32, + ETSILI_IPADDRESS_ASSIGNED_UNKNOWN, &encip); + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_CLIENT_ADDRESS, + sizeof(etsili_ipaddress_t), (uint8_t *)(&encip)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_CLIENT_PORT, sizeof(port), (uint8_t *)&port); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + } else if (job->content.clientaddr && + job->content.clientaddr->ss_family == AF_INET6) { + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) + (job->content.clientaddr); + port = ntohs(in6->sin6_port); + + etsili_create_ipaddress_v6( + (uint8_t *)(&(in6->sin6_addr.s6_addr)), 128, + ETSILI_IPADDRESS_ASSIGNED_UNKNOWN, &encip); + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_CLIENT_ADDRESS, + sizeof(etsili_ipaddress_t), (uint8_t *)(&encip)); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + + np = create_etsili_generic(freegenerics, + EMAILIRI_CONTENTS_CLIENT_PORT, sizeof(port), (uint8_t *)&port); + HASH_ADD_KEYPTR(hh, params, &(np->itemnum), sizeof(np->itemnum), np); + } + + *params_p = params; +} + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/collector/emailiri.h b/src/collector/emailiri.h new file mode 100644 index 00000000..e67f6506 --- /dev/null +++ b/src/collector/emailiri.h @@ -0,0 +1,60 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#ifndef OPENLI_EMAILIRI_H_ +#define OPENLI_EMAILIRI_H_ + +#include "etsili_core.h" +#include "collector_publish.h" + +enum { + EMAILIRI_CONTENTS_EVENT_TYPE = 1, + EMAILIRI_CONTENTS_CLIENT_ADDRESS = 2, + EMAILIRI_CONTENTS_SERVER_ADDRESS = 3, + EMAILIRI_CONTENTS_CLIENT_PORT = 4, + EMAILIRI_CONTENTS_SERVER_PORT = 5, + EMAILIRI_CONTENTS_SERVER_OCTETS_SENT = 6, + EMAILIRI_CONTENTS_CLIENT_OCTETS_SENT = 7, + EMAILIRI_CONTENTS_PROTOCOL_ID = 8, + EMAILIRI_CONTENTS_SENDER = 9, + EMAILIRI_CONTENTS_RECIPIENTS = 10, + EMAILIRI_CONTENTS_STATUS = 11, + EMAILIRI_CONTENTS_TOTAL_RECIPIENTS = 12, + EMAILIRI_CONTENTS_MESSAGE_ID = 13, + EMAILIRI_CONTENTS_NATIONAL_PARAMETER = 14, + EMAILIRI_CONTENTS_NATIONAL_ASN1_PARAMETERS = 15, + EMAILIRI_CONTENTS_AAA_INFORMATION = 16, + EMAILIRI_CONTENTS_SENDER_VALIDITY = 17, +}; + +void free_email_iri_content(etsili_email_iri_content_t *content); +void free_emailiri_parameters(etsili_generic_t *params); +void prepare_emailiri_parameters(etsili_generic_freelist_t *freegenerics, + openli_emailiri_job_t *job, etsili_generic_t **params_p); + +#endif + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/collector/emailprotocols/imap.c b/src/collector/emailprotocols/imap.c new file mode 100644 index 00000000..94fb052d --- /dev/null +++ b/src/collector/emailprotocols/imap.c @@ -0,0 +1,1750 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +#include "email_worker.h" +#include "logger.h" + +enum { + OPENLI_IMAP_COMMAND_NONE = 0, + OPENLI_IMAP_COMMAND_SERVREADY, + OPENLI_IMAP_COMMAND_REPLY, + OPENLI_IMAP_COMMAND_REPLY_ONGOING, + OPENLI_IMAP_COMMAND_BYE, + OPENLI_IMAP_COMMAND_GENERIC, + OPENLI_IMAP_COMMAND_PREAUTH, + OPENLI_IMAP_COMMAND_AUTH, + OPENLI_IMAP_COMMAND_LOGOUT, + OPENLI_IMAP_COMMAND_LOGIN, + OPENLI_IMAP_COMMAND_IDLE, + OPENLI_IMAP_COMMAND_APPEND, + OPENLI_IMAP_COMMAND_ID, +}; + +enum { + OPENLI_IMAP_AUTH_NONE = 0, + OPENLI_IMAP_AUTH_PLAIN, + OPENLI_IMAP_AUTH_GSSAPI, + OPENLI_IMAP_AUTH_LOGIN, + OPENLI_IMAP_AUTH_OTHER +}; + +typedef struct imap_cc_index { + + int cc_start; + int cc_end; + uint8_t dir; + +} imap_cc_index_t; + +typedef struct imap_comm { + uint8_t *commbuffer; + int commbufsize; + int commbufused; + + char *imap_command; + char *tag; + char *imap_reply; + + imap_cc_index_t *ccs; + int cc_used; + int cc_alloc; + + int reply_start; + int reply_end; +} imap_command_t; + +typedef struct imapsession { + + uint8_t *contbuffer; + int contbufsize; + int contbufused; + int contbufread; + + imap_command_t *commands; + int commands_size; + + char *auth_tag; + char *mailbox; + char *mail_sender; + + int reply_start; + int next_comm_start; + uint8_t next_command_type; + char *next_comm_tag; + char *next_command_name; + + int append_command_index; + int idle_command_index; + int auth_command_index; + int auth_token_index; + int auth_read_from; + int auth_type; + +} imap_session_t; + +static void init_imap_command(imap_command_t *comm) { + comm->commbuffer = calloc(4096, sizeof(uint8_t)); + comm->commbufsize = 4096; + comm->commbufused = 0; + comm->tag = NULL; + comm->imap_reply = NULL; + comm->imap_command = NULL; + + comm->reply_start = 0; + comm->reply_end = 0; + + comm->ccs = calloc(8, sizeof(imap_cc_index_t)); + comm->cc_used = 0; + comm->cc_alloc = 8; +}; + +static inline int extend_command_buffer(imap_command_t *comm, int required) { + while (comm->commbufsize - comm->commbufused <= required + 1) { + comm->commbuffer = realloc(comm->commbuffer, comm->commbufsize + 4096); + if (comm->commbuffer == NULL) { + return -1; + } + comm->commbufsize += 4096; + } + return 0; +} + +static void add_cc_to_imap_command(imap_command_t *comm, int start_ind, + int end_ind, uint8_t dir) { + + /* dir 1 == from client (COMMAND), dir 0 == from server (RESPONSE) */ + if (comm->cc_alloc == comm->cc_used) { + comm->ccs = realloc(comm->ccs, + (comm->cc_alloc + 8) * sizeof(imap_cc_index_t)); + comm->cc_alloc += 8; + } + + comm->ccs[comm->cc_used].cc_start = start_ind; + comm->ccs[comm->cc_used].cc_end = end_ind; + comm->ccs[comm->cc_used].dir = dir; + + comm->cc_used ++; + +} + +static int complete_imap_append(openli_email_worker_t *state, + emailsession_t *sess, imap_session_t *imapsess, imap_command_t *comm) { + + if (imapsess->mailbox == NULL) { + return 1; + } + + if (strcmp(comm->imap_reply, "OK") == 0) { + generate_email_upload_success_iri(state, sess); + } else { + generate_email_upload_failure_iri(state, sess); + } + + return 1; + +} + +static int extract_imap_email_sender(openli_email_worker_t *state, + emailsession_t *sess, imap_session_t *imapsess, imap_command_t *comm) { + + int r = 0; + char *extracted = NULL; + char *safecopy; + int copylen; + char *search = (char *)(comm->commbuffer + comm->reply_start); + char *end = (char *)(comm->commbuffer + comm->reply_end); + + copylen = (end - search) + 1; + safecopy = calloc(sizeof(char), copylen); + memcpy(safecopy, search, (end - search)); + + r = extract_email_sender_from_body(state, sess, safecopy, &extracted); + + if (r == 0 || extracted == NULL) { + free(safecopy); + return r; + } + + imapsess->mail_sender = extracted; + add_email_participant(sess, imapsess->mail_sender, 1); + free(safecopy); + + return r; +} + +static int complete_imap_fetch(openli_email_worker_t *state, + emailsession_t *sess, imap_session_t *imapsess, imap_command_t *comm) { + + /* TODO Figure out what is actually being fetched so we can decide if this + * is a full or partial download? + */ + + if (imapsess->mailbox == NULL) { + return 1; + } + + /* For now, every example I've seen for IMAP is classed as a partial + * download and the ETSI standards are not specific on what would + * qualify for a complete download in IMAP (maybe a "RFC822" fetch? + * or BODY[]? ) + */ + + if (strcmp(comm->imap_reply, "OK") == 0) { + extract_imap_email_sender(state, sess, imapsess, comm); + generate_email_partial_download_success_iri(state, sess); + } else { + generate_email_partial_download_failure_iri(state, sess); + } + + if (imapsess->mail_sender) { + clear_email_sender(sess); + /* the memory is freed inside clear_email_sender()... */ + imapsess->mail_sender = NULL; + } + + return 1; + +} + +static int complete_imap_authentication(openli_email_worker_t *state, + emailsession_t *sess, imap_session_t *imapsess) { + + imap_command_t *comm; + + comm = &(imapsess->commands[imapsess->auth_command_index]); + + if (strcmp(comm->imap_reply, "OK") == 0) { + sess->currstate = OPENLI_IMAP_STATE_AUTHENTICATED; + /* generate login success iri */ + + generate_email_login_success_iri(state, sess); + } else { + sess->currstate = OPENLI_IMAP_STATE_PRE_AUTH; + + /* generate login failure iri */ + generate_email_login_failure_iri(state, sess); + } + + return 1; +} + +static int generate_ccs_from_imap_command(openli_email_worker_t *state, + emailsession_t *sess, imap_command_t *comm, uint64_t timestamp) { + + int i, len; + uint8_t dir; + + for (i = 0; i < comm->cc_used; i++) { + len = comm->ccs[i].cc_end - comm->ccs[i].cc_start; + + if (comm->ccs[i].dir == 1) { + dir = ETSI_DIR_FROM_TARGET; + } else { + dir = ETSI_DIR_TO_TARGET; + } + + generate_email_cc_from_imap_payload(state, sess, + comm->commbuffer + comm->ccs[i].cc_start, len, timestamp, dir); + } + + return 1; +} + +static int update_saved_login_command(imap_session_t *sess, int pwordindex, + const char *sesskey) { + + int replacelen; + imap_command_t *comm = NULL; + uint8_t *ptr; + const char *replacement = "XXX\r\n"; + + if (sess->auth_command_index == -1) { + logger(LOG_INFO, "OpenLI: %s missing IMAP auth command index?", sesskey); + return -1; + } + comm = &(sess->commands[sess->auth_command_index]); + + if (strcmp(comm->tag, sess->auth_tag) != 0) { + logger(LOG_INFO, "OpenLI: %s IMAP login command tags are mismatched? %s vs %s", sesskey, sess->auth_tag, comm->tag); + return -1; + } + + if (strcmp(comm->imap_command, "LOGIN") != 0) { + logger(LOG_INFO, "OpenLI: %s unexpected type for saved IMAP login command: %d", sesskey, comm->imap_command); + return -1; + } + + if (pwordindex >= comm->commbufused) { + logger(LOG_INFO, "OpenLI: cannot find original password token for IMAP login command %s, session %s\n", sess->auth_tag, sesskey); + return -1; + } + ptr = comm->commbuffer + pwordindex; + + replacelen = strlen(replacement); + memcpy(ptr, replacement, replacelen); + ptr += replacelen; + + comm->commbufused = ptr - comm->commbuffer; + comm->reply_start = comm->commbufused; + memset(ptr, 0, comm->commbufsize - comm->commbufused); + + comm->ccs[comm->cc_used - 1].cc_end = comm->commbufused; + return 1; + +} + +static int update_saved_auth_command(imap_session_t *sess, char *replace, + const char *origtoken, const char *sesskey) { + + int replacelen; + imap_command_t *comm = NULL; + char *ptr; + + if (sess->auth_command_index == -1) { + logger(LOG_INFO, "OpenLI: %s missing IMAP auth command index?", sesskey); + return -1; + } + comm = &(sess->commands[sess->auth_command_index]); + + if (strcmp(comm->tag, sess->auth_tag) != 0) { + logger(LOG_INFO, "OpenLI: %s IMAP auth command tags are mismatched? %s vs %s", sesskey, sess->auth_tag, comm->tag); + return -1; + } + + if (strcasecmp(comm->imap_command, "AUTHENTICATE") != 0) { + logger(LOG_INFO, "OpenLI: %s unexpected type for saved IMAP auth command: %s", sesskey, comm->imap_command); + return -1; + } + + ptr = strstr((const char *)comm->commbuffer, origtoken); + if (!ptr) { + logger(LOG_INFO, "OpenLI: cannot find original auth token for IMAP auth command %s, session %s\n", sess->auth_tag, sesskey); + return -1; + } + + replacelen = strlen(replace); + memcpy(ptr, replace, replacelen); + ptr += replacelen; + + comm->commbufused = ((uint8_t *)ptr - comm->commbuffer); + comm->reply_start = comm->commbufused; + memset(ptr, 0, comm->commbufsize - comm->commbufused); + + comm->ccs[comm->cc_used - 1].cc_end = comm->commbufused; + + return 1; + +} + +static int save_imap_command(imap_session_t *sess, char *sesskey) { + + int i, index; + int comm_start; + + imap_command_t *comm = NULL; + + for (i = 0; i < sess->commands_size; i++) { + if (sess->commands[i].imap_command == NULL) { + comm = &(sess->commands[i]); + index = i; + break; + } + } + + if (comm == NULL) { + sess->commands = realloc(sess->commands, + (sess->commands_size + 5) * sizeof(imap_command_t)); + for (i = sess->commands_size; i < sess->commands_size + 5; i++) { + init_imap_command(&(sess->commands[i])); + } + comm = &(sess->commands[sess->commands_size]); + index = sess->commands_size; + sess->commands_size += 5; + } + + if (extend_command_buffer(comm, sess->contbufread - sess->next_comm_start) + < 0) { + return -1; + } + + comm_start = comm->commbufused; + memcpy(comm->commbuffer + comm->commbufused, + sess->contbuffer + sess->next_comm_start, + sess->contbufread - sess->next_comm_start); + comm->commbufused += (sess->contbufread - sess->next_comm_start); + + comm->commbuffer[comm->commbufused] = '\0'; + + add_cc_to_imap_command(comm, comm_start, comm->commbufused, 1); + + comm->reply_start = comm->commbufused; + comm->reply_end = 0; + comm->imap_command = sess->next_command_name; + comm->tag = sess->next_comm_tag; + + + sess->next_comm_tag = NULL; + sess->next_command_name = NULL; + + return index; +} + +static void mask_plainauth_creds(char *mailbox, char *reencoded, int buflen) { + + char input[2048]; + char *ptr; + base64_encodestate e; + int spaces, toencode, cnt; + + /* reencode authtoken with replaced username and password */ + base64_init_encodestate(&e); + snprintf(input, 2048, "%s XXX XXX", mailbox); + toencode = strlen(input); + ptr = input; + spaces = 0; + + while(spaces < 2) { + if (*ptr == '\0') { + break; + } + + if (*ptr == ' ') { + *ptr = '\0'; + spaces ++; + } + ptr ++; + } + + /* TODO try not to walk off the end of reencoded -- very unlikely, given + * that we have 2048 bytes of space but you never know... + */ + ptr = reencoded; + cnt = base64_encode_block(input, toencode, ptr, &e); + + ptr += cnt; + cnt = base64_encode_blockend(ptr, &e); + + ptr += cnt; + + /* libb64 likes to add a newline to the end of its encodings, so make + * sure we strip it if one is present. + */ + if (*(ptr - 1) == '\n') { + ptr--; + } + + *ptr = '\r'; ptr++; + *ptr = '\n'; ptr++; + *ptr = '\0'; ptr++; + +} + +static int get_auth_type(char *authmsg, imap_session_t *imapsess, + const char *sesskey) { + + char *saveptr; + char *tag = NULL; + char *comm = NULL; + char *authtype = NULL; + char *lineend = NULL; + + lineend = strstr(authmsg, "\r\n"); + if (lineend == NULL) { + return 0; + } + + tag = strtok_r(authmsg, " ", &saveptr); + if (!tag) { + logger(LOG_INFO, "OpenLI: unable to derive tag from IMAP AUTHENTICATE command"); + return -1; + } + + comm = strtok_r(NULL, " ", &saveptr); + if (!comm) { + logger(LOG_INFO, "OpenLI: unable to derive command from IMAP AUTHENTICATE command"); + return -1; + } + + authtype = strtok_r(NULL, " \r\n", &saveptr); + + if (!authtype) { + logger(LOG_INFO, "OpenLI: unable to derive authentication type from IMAP AUTHENTICATE command"); + return -1; + } + + if (strcasecmp(authtype, "PLAIN") == 0) { + imapsess->auth_type = OPENLI_IMAP_AUTH_PLAIN; + imapsess->auth_read_from += (5 + (authtype - authmsg)); + + if (lineend == authtype + 5) { + imapsess->auth_read_from += 2; + } else { + imapsess->auth_read_from += 1; + } + } else if (strcasecmp(authtype, "LOGIN") == 0) { + imapsess->auth_type = OPENLI_IMAP_AUTH_LOGIN; + imapsess->auth_read_from += (5 + (authtype - authmsg)); + + if (lineend == authtype + 5) { + imapsess->auth_read_from += 2; + } else { + imapsess->auth_read_from += 1; + } + } else if (strcasecmp(authtype, "GSSAPI") == 0) { + imapsess->auth_type = OPENLI_IMAP_AUTH_GSSAPI; + imapsess->auth_read_from += (6 + (authtype - authmsg)); + + if (lineend == authtype + 6) { + imapsess->auth_read_from += 2; + } else { + imapsess->auth_read_from += 1; + } + + } else { + logger(LOG_INFO, "OpenLI: unsupported IMAP authentication type '%s' -- will not be able to derive mailbox owner for session %s", + authtype, sesskey); + return -1; + } + + + return 1; +} + +static int decode_login_command(emailsession_t *sess, + imap_session_t *imapsess) { + + char *loginmsg; + int msglen; + char *lineend = NULL; + char *saveptr; + char *tag = NULL; + char *comm = NULL; + char *username = NULL; + char *pword = NULL; + + msglen = imapsess->contbufread - imapsess->auth_read_from; + loginmsg = calloc(msglen + 1, sizeof(uint8_t)); + + memcpy(loginmsg, imapsess->contbuffer + imapsess->auth_read_from, + msglen); + + lineend = strstr(loginmsg, "\r\n"); + if (lineend == NULL) { + return 0; + } + + tag = strtok_r(loginmsg, " ", &saveptr); + if (!tag) { + logger(LOG_INFO, "OpenLI: unable to derive tag from IMAP LOGIN command"); + goto loginparsefail; + } + + comm = strtok_r(NULL, " ", &saveptr); + if (!comm) { + logger(LOG_INFO, "OpenLI: unable to derive command from IMAP LOGIN command"); + goto loginparsefail; + } + + username = strtok_r(NULL, " ", &saveptr); + + if (!username) { + logger(LOG_INFO, "OpenLI: unable to derive username from IMAP LOGIN command"); + return -1; + } + + pword = strtok_r(NULL, " \r\n", &saveptr); + + if (!pword) { + logger(LOG_INFO, "OpenLI: unable to derive password from IMAP LOGIN command"); + return -1; + } + + if (*username == '"') { + /* mailbox is enclosed in quotes that we need to strip */ + char *endquote = strchrnul(username + 1, '"'); + imapsess->mailbox = strndup(username + 1, endquote - (username + 1)); + } else { + imapsess->mailbox = strdup(username); + } + + add_email_participant(sess, imapsess->mailbox, 0); + + /* replace password with masked credentials */ + if (sess->mask_credentials) { + update_saved_login_command(imapsess, pword - loginmsg, sess->key); + } + free(loginmsg); + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + imapsess->next_comm_start = 0; + imapsess->reply_start = 0; + + sess->currstate = OPENLI_IMAP_STATE_AUTH_REPLY; + return 1; + +loginparsefail: + sess->currstate = OPENLI_IMAP_STATE_IGNORING; + free(loginmsg); + return -1; + +} + +static int decode_plain_auth_content(char *authmsg, imap_session_t *imapsess, + emailsession_t *sess) { + + char decoded[2048]; + char reencoded[2048]; + char *ptr; + int cnt, r; + char *crlf; + base64_decodestate s; + + if (*authmsg == '\0') { + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + sess->currstate = OPENLI_IMAP_STATE_AUTHENTICATING; + return 0; + } + + crlf = strstr(authmsg, "\r\n"); + if (crlf == NULL) { + return 0; + } + + /* auth plain can be split across two messages with a + * "+" from the server in between :( */ + + if (*authmsg == '+') { + /* Client has not yet sent the auth token, so this line is + * the server indicating that it is waiting for the token. + * Skip the "+" line and remain in auth command state until + * the token arrives. + */ + + imapsess->auth_read_from += ((crlf - authmsg) + 2); + sess->server_octets += ((crlf - authmsg) + 2); + return 0; + } + + base64_init_decodestate(&s); + cnt = base64_decode_block(authmsg, strlen(authmsg), decoded, &s); + if (cnt == 0) { + return 0; + } + decoded[cnt] = '\0'; + + if (decoded[0] == '\0') { + ptr = decoded + 1; + } else { + ptr = decoded; + } + /* username and password are also inside 'decoded', each term is + * separated by null bytes (e.g. \0 \0 ) + */ + imapsess->mailbox = strdup(ptr); + + /* add "mailbox" as a recipient for this session */ + add_email_participant(sess, imapsess->mailbox, 0); + + /* replace encoded credentials, if requested by the user */ + if (sess->mask_credentials) { + mask_plainauth_creds(imapsess->mailbox, reencoded, 2048); + } + + /* replace saved imap command with re-encoded auth token */ + r = update_saved_auth_command(imapsess, reencoded, authmsg, sess->key); + if (r < 0) { + return r; + } + + sess->client_octets += strlen(reencoded); + + sess->currstate = OPENLI_IMAP_STATE_AUTH_REPLY; + return 1; +} + +static inline char *clone_authentication_message(imap_session_t *imapsess) { + + char *authmsg; + int msglen; + + msglen = imapsess->contbufread - imapsess->auth_read_from; + authmsg = calloc(msglen + 1, sizeof(uint8_t)); + + memcpy(authmsg, imapsess->contbuffer + imapsess->auth_read_from, + msglen); + return authmsg; +} + +static int decode_authentication_command(emailsession_t *sess, + imap_session_t *imapsess) { + + char *authmsg; + int msglen, r; + + while (1) { + /* There's no readable content in the buffer */ + if (imapsess->auth_read_from >= imapsess->contbufused) { + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + imapsess->next_comm_start = 0; + imapsess->reply_start = 0; + return 0; + } + + msglen = imapsess->contbufread - imapsess->auth_read_from; + authmsg = clone_authentication_message(imapsess); + + if (imapsess->auth_type == OPENLI_IMAP_AUTH_NONE) { + r = get_auth_type(authmsg, imapsess, sess->key); + if (r > 0) { + sess->client_octets += (msglen - + (imapsess->contbufread - imapsess->auth_read_from)); + } + free(authmsg); + if (r < 0) { + sess->currstate = OPENLI_IMAP_STATE_IGNORING; + } + if (r <= 0) { + break; + } + continue; + } + + if (imapsess->auth_type == OPENLI_IMAP_AUTH_PLAIN) { + r = decode_plain_auth_content(authmsg, imapsess, sess); + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + imapsess->next_comm_start = 0; + imapsess->reply_start = 0; + free(authmsg); + return r; + } else if (imapsess->auth_type == OPENLI_IMAP_AUTH_LOGIN) { + /* Let read_imap_while_auth_state() parse all future + * content until we exit the AUTHENTICATING state + */ + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + imapsess->next_comm_start = 0; + imapsess->reply_start = 0; + free(authmsg); + return 1; + } else if (imapsess->auth_type == OPENLI_IMAP_AUTH_GSSAPI) { + /* Let read_imap_while_auth_state() parse all future + * content until we exit the AUTHENTICATING state + */ + sess->currstate = OPENLI_IMAP_STATE_AUTHENTICATING; + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + imapsess->next_comm_start = 0; + imapsess->reply_start = 0; + free(authmsg); + return 1; + } else { + free(authmsg); + return -1; + } + } + + return 1; +} + +static int save_imap_reply(imap_session_t *sess, char *sesskey, + imap_command_t **comm) { + + int i; + int comm_start; + + *comm = NULL; + + for (i = 0; i < sess->commands_size; i++) { + if (sess->commands[i].tag == NULL) { + continue; + } + if (strcmp(sess->commands[i].tag, sess->next_comm_tag) == 0) { + (*comm) = &(sess->commands[i]); + break; + } + } + + if (*comm == NULL) { + logger(LOG_INFO, "OpenLI: %s unable to match IMAP reply (%s, %s) to any existing commands?", sesskey, sess->next_comm_tag, sess->next_command_name); + free(sess->next_comm_tag); + free(sess->next_command_name); + sess->next_comm_tag = NULL; + sess->next_command_name = NULL; + return 0; + } + + if (extend_command_buffer(*comm, sess->contbufread - sess->reply_start) + < 0) { + return -1; + } + + comm_start = (*comm)->commbufused; + memcpy((*comm)->commbuffer + (*comm)->commbufused, + sess->contbuffer + sess->reply_start, + sess->contbufread - sess->reply_start); + (*comm)->commbufused += (sess->contbufread - sess->reply_start); + + add_cc_to_imap_command((*comm), comm_start, (*comm)->commbufused, 0); + + (*comm)->commbuffer[(*comm)->commbufused] = '\0'; + (*comm)->reply_end = (*comm)->commbufused; + (*comm)->imap_reply = sess->next_command_name; + + free(sess->next_comm_tag); + sess->next_comm_tag = NULL; + sess->next_command_name = NULL; + + return 1; +} + +static void reset_imap_saved_command(imap_command_t *comm) { + + comm->commbufused = 0; + comm->reply_start = 0; + comm->reply_end = 0; + comm->cc_used = 0; + + if (comm->tag) { + free(comm->tag); + comm->tag = NULL; + } + if (comm->imap_command) { + free(comm->imap_command); + comm->imap_command = NULL; + } + if (comm->imap_reply) { + free(comm->imap_reply); + comm->imap_reply = NULL; + } +} + +void free_imap_session_state(emailsession_t *sess, void *imapstate) { + imap_session_t *imapsess; + int i; + + if (imapstate == NULL) { + return; + } + imapsess = (imap_session_t *)imapstate; + + for (i = 0; i < imapsess->commands_size; i++) { + if (imapsess->commands[i].commbuffer) { + free(imapsess->commands[i].commbuffer); + } + if (imapsess->commands[i].tag) { + free(imapsess->commands[i].tag); + } + if (imapsess->commands[i].imap_command) { + free(imapsess->commands[i].imap_command); + } + if (imapsess->commands[i].imap_reply) { + free(imapsess->commands[i].imap_reply); + } + if (imapsess->commands[i].ccs) { + free(imapsess->commands[i].ccs); + } + } + + if (imapsess->next_comm_tag) { + free(imapsess->next_comm_tag); + } + if (imapsess->next_command_name) { + free(imapsess->next_command_name); + } + + if (imapsess->auth_tag) { + free(imapsess->auth_tag); + } + + /* Don't free 'mailbox' or 'mail_sender', as these are owned by the + * participant list for the overall email session. + */ + + free(imapsess->commands); + free(imapsess->contbuffer); + free(imapsess); +} + +static int append_content_to_imap_buffer(imap_session_t *imapsess, + openli_email_captured_t *cap) { + + /* +1 to account for a null terminator */ + while (imapsess->contbufsize - imapsess->contbufused <= + cap->msg_length + 1) { + imapsess->contbuffer = realloc(imapsess->contbuffer, + imapsess->contbufsize + 4096); + if (imapsess->contbuffer == NULL) { + return -1; + } + imapsess->contbufsize += 4096; + } + + memcpy(imapsess->contbuffer + imapsess->contbufused, + cap->content, cap->msg_length); + imapsess->contbufused += cap->msg_length; + imapsess->contbuffer[imapsess->contbufused] = '\0'; + + return 0; +} + +#define ADVANCE_ID_PTR \ + ptr = strchr(ptr, '"'); \ + if (ptr == NULL) { \ + break; \ + } \ + ptr ++; \ + if (*ptr == '\r' || *ptr == '\0') { \ + break; \ + } + + +static int parse_id_command(emailsession_t *sess, imap_session_t *imapsess) { + char *ptr; + char *comm_str = (char *)(imapsess->contbuffer + imapsess->next_comm_start); + + char *field_start, *field_end, *val_start, *val_end; + char field_str[2048]; + char val_str[2048]; + + char *server_ip, *server_port, *client_ip, *client_port; + int ret = 0; + + ptr = strchr(comm_str, '('); + if (!ptr) { + return 0; + } + + /* ID commands can contain custom field that specify the "real" server + * and client IPs and ports for an IMAP session, i.e. in cases where + * the IMAP session has been delivered to our collector via a proxy + * IMAP server. + * + * In that situation, we want to replace the server and client addresses + * that we saved from the original packet captures with the addresses + * described in the ID content. + */ + + ptr ++; + field_start = field_end = val_start = val_end = NULL; + server_ip = client_ip = server_port = client_port = NULL; + + while (1) { + if (*ptr == ')' || *ptr == '\r' || *ptr == '\0') { + break; + } + + ADVANCE_ID_PTR + field_start = ptr; + + ADVANCE_ID_PTR + field_end = ptr - 1; + + if (strncmp(ptr, " NIL", 4) == 0) { + val_start = ptr + 1; + val_end = ptr + 4; + ptr += 4; + } else { + ADVANCE_ID_PTR + val_start = ptr; + ADVANCE_ID_PTR + val_end = ptr - 1; + } + + memset(field_str, 0, 2048); + memcpy(field_str, field_start, field_end - field_start); + memset(val_str, 0, 2048); + memcpy(val_str, val_start, val_end - val_start); + field_start = field_end = val_start = val_end = NULL; + + if (strcmp(field_str, "x-originating-ip") == 0) { + client_ip = strdup(val_str); + } else if (strcmp(field_str, "x-originating-port") == 0) { + client_port = strdup(val_str); + } else if (strcmp(field_str, "x-connected-ip") == 0) { + server_ip = strdup(val_str); + } else if (strcmp(field_str, "x-connected-port") == 0) { + server_port = strdup(val_str); + } + + } + + if (field_start || field_end || val_start || val_end) { + ret = 0; + } else { + ret = 1; + } + + if (server_ip && server_port) { + replace_email_session_serveraddr(sess, server_ip, server_port); + } + + if (client_ip && client_port) { + replace_email_session_clientaddr(sess, client_ip, client_port); + } + + if (server_ip) { free(server_ip); } + if (client_ip) { free(client_ip); } + if (server_port) { free(server_port); } + if (client_port) { free(client_port); } + + return ret; +} + +static int find_next_crlf(imap_session_t *sess, int start_index) { + int rem; + uint8_t *found; + uint8_t *openparent = NULL; + uint8_t *closeparent = NULL; + + int nests = 0; + + rem = sess->contbufused - start_index; + + while (1) { + openparent = (uint8_t *)memmem(sess->contbuffer + start_index, rem, + "(", 1); + found = (uint8_t *)memmem(sess->contbuffer + start_index, rem, "\r\n", + 2); + closeparent = (uint8_t *)memmem(sess->contbuffer + start_index, rem, + ")", 1); + + if (openparent == NULL && closeparent == NULL && nests == 0) { + break; + } + + if (closeparent == NULL && nests > 0) { + return 0; + } + + if ((openparent == NULL || found < openparent) && nests == 0) { + break; + } + + if (openparent == NULL || closeparent < openparent) { + nests -= 1; + start_index = (closeparent - sess->contbuffer) + 1; + } else { + nests += 1; + start_index = (openparent - sess->contbuffer) + 1; + } + + rem = sess->contbufused - start_index; + } + + if (found) { + sess->contbufread = (found - sess->contbuffer) + 2; + return 1; + } + return 0; +} + +static int find_command_end(emailsession_t *sess, imap_session_t *imapsess) { + int r, ind; + + r = find_next_crlf(imapsess, imapsess->next_comm_start); + if (r == 0) { + return 0; + } + + ind = save_imap_command(imapsess, sess->key); + if (ind < 0) { + return ind; + } + + if (imapsess->next_command_type == OPENLI_IMAP_COMMAND_AUTH) { + sess->currstate = OPENLI_IMAP_STATE_AUTHENTICATING; + imapsess->auth_command_index = ind; + + r = decode_authentication_command(sess, imapsess); + return r; + /* Don't count client octets just yet, since we could be rewriting + * the auth tokens shortly... + */ + + } else if (imapsess->next_command_type == OPENLI_IMAP_COMMAND_LOGIN) { + + sess->currstate = OPENLI_IMAP_STATE_AUTHENTICATING; + imapsess->auth_command_index = ind; + + return decode_login_command(sess, imapsess); + + } else { + sess->client_octets += (imapsess->contbufread - imapsess->next_comm_start); + + } + + /* if command was ID, update session endpoint details using + * command content */ + + if (imapsess->next_command_type == OPENLI_IMAP_COMMAND_ID) { + parse_id_command(sess, imapsess); + } + + if (imapsess->next_command_type == OPENLI_IMAP_COMMAND_LOGOUT) { + sess->currstate = OPENLI_IMAP_STATE_LOGOUT; + } else if (imapsess->next_command_type == OPENLI_IMAP_COMMAND_IDLE) { + sess->currstate = OPENLI_IMAP_STATE_IDLING; + imapsess->idle_command_index = ind; + } else if (imapsess->next_command_type == OPENLI_IMAP_COMMAND_APPEND) { + sess->currstate = OPENLI_IMAP_STATE_APPENDING; + imapsess->append_command_index = ind; + } + + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + imapsess->next_comm_start = 0; + imapsess->reply_start = 0; + + return 1; +} + +static int find_reply_end(openli_email_worker_t *state, + emailsession_t *sess, imap_session_t *imapsess, uint64_t timestamp) { + int r; + imap_command_t *comm; + + r = find_next_crlf(imapsess, imapsess->next_comm_start); + if (r == 0) { + return 0; + } + sess->server_octets += (imapsess->contbufread - imapsess->next_comm_start); + + if ((r = save_imap_reply(imapsess, sess->key, &comm)) < 0) { + return r; + } + + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + imapsess->next_comm_start = 0; + imapsess->reply_start = 0; + + if (comm == NULL) { + return r; + } + + if (comm->imap_command == NULL) { + reset_imap_saved_command(comm); + return r; + } + + if (strcasecmp(comm->imap_command, "LOGOUT") == 0) { + sess->currstate = OPENLI_IMAP_STATE_SESSION_OVER; + sess->event_time = timestamp; + generate_email_logoff_iri(state, sess); + generate_ccs_from_imap_command(state, sess, comm, timestamp); + return 0; + } else if (strcasecmp(comm->imap_command, "AUTHENTICATE") == 0 || + strcasecmp(comm->imap_command, "LOGIN") == 0) { + sess->login_time = timestamp; + complete_imap_authentication(state, sess, imapsess); + } else if (strcasecmp(comm->imap_command, "FETCH") == 0 || + strcasecmp(comm->imap_command, "UID FETCH") == 0) { + + sess->event_time = timestamp; + complete_imap_fetch(state, sess, imapsess, comm); + + } else if (strcasecmp(comm->imap_command, "APPEND") == 0) { + sess->event_time = timestamp; + complete_imap_append(state, sess, imapsess, comm); + } + + generate_ccs_from_imap_command(state, sess, comm, timestamp); + reset_imap_saved_command(comm); + return r; +} + +static int find_partial_reply_end(emailsession_t *sess, + imap_session_t *imapsess) { + int r; + + r = find_next_crlf(imapsess, imapsess->next_comm_start); + if (r == 0) { + return 0; + } + sess->server_octets += (imapsess->contbufread - imapsess->next_comm_start); + + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + imapsess->next_comm_start = 0; + + return 1; +} + + +static int find_server_ready_end(imap_session_t *imapsess) { + + int r; + + r = find_next_crlf(imapsess, imapsess->next_comm_start); + if (r == 0) { + return 0; + } + + return 1; +} + +static int find_server_ready(imap_session_t *imapsess) { + + uint8_t *found = NULL; + + if (imapsess->contbufused - imapsess->contbufread < 5) { + return 0; + } + + found = (uint8_t *)strcasestr( + (const char *)(imapsess->contbuffer + imapsess->contbufread), + "* OK "); + if (found != NULL) { + imapsess->next_comm_start = (found - imapsess->contbuffer); + imapsess->next_command_type = OPENLI_IMAP_COMMAND_SERVREADY; + return 1; + } + return 0; +} + +static inline int is_tagged_reply(char *msgcontent, char *searchtag) { + + char reply_cmp[2048]; + + snprintf(reply_cmp, 2048, "%s OK ", searchtag); + if (strncmp(msgcontent, reply_cmp, strlen(reply_cmp)) == 0) { + return 1; + } + snprintf(reply_cmp, 2048, "%s NO ", searchtag); + if (strncmp(msgcontent, reply_cmp, strlen(reply_cmp)) == 0) { + return 1; + } + snprintf(reply_cmp, 2048, "%s BAD ", searchtag); + if (strncmp(msgcontent, reply_cmp, strlen(reply_cmp)) == 0) { + return 1; + } + return 0; +} + +static int read_imap_while_appending_state(emailsession_t *sess, + imap_session_t *imapsess) { + + char *msgstart, *firstchar; + char *crlf = NULL; + char *appendtag; + imap_command_t *comm; + int comm_start, pluslen, cc_len; + int cc_dir = 1; + + /* XXX need some more test cases for APPEND */ + + /* We have a loop here because we want to try and keep all of + * the appended content that is in the same observed packet/message + * in a single CC -- this loop allows us to do that easily without + * having to maintain state outside of the scope of this function. + */ + cc_len = 0; + comm = &(imapsess->commands[imapsess->append_command_index]); + appendtag = comm->tag; + comm_start = comm->commbufused; + firstchar = (char *)(imapsess->contbuffer + imapsess->contbufread); + + while (imapsess->contbufread < imapsess->contbufused) { + msgstart = (char *)(imapsess->contbuffer + imapsess->contbufread); + /* First step, find the next \r\n so we're only working with a + * complete message */ + crlf = strstr(msgstart, "\r\n"); + if (crlf == NULL) { + return 0; + } + + pluslen = (crlf - msgstart) + 2; + + /* Is this the server reply to the APPEND command? */ + /* If yes, rewind to the start of the reply tag so our normal + * processing can be applied when we return... + */ + /* Ideally, we would use the byte count from the APPEND command to + * keep track of when the append is over, but that is an + * annoying amount of parsing to deal with... + */ + if (is_tagged_reply(msgstart, appendtag)) { + sess->currstate = OPENLI_IMAP_STATE_AUTHENTICATED; + return 1; + } + + if (extend_command_buffer(comm, pluslen) < 0) { + return -1; + } + memcpy(comm->commbuffer + comm->commbufused, msgstart, pluslen); + comm->commbufused += pluslen; + comm->commbuffer[comm->commbufused] = '\0'; + + /* Does this begin with a '+'? This is from the server */ + if (*firstchar == '+' && msgstart == firstchar) { + sess->server_octets += pluslen; + cc_dir = 0; + } else { + /* Otherwise, this is message content from the client */ + sess->client_octets += pluslen; + } + + /* Advance read pointer to the next line */ + cc_len += pluslen; + imapsess->contbufread += pluslen; + } + + if (cc_len > 0) { + add_cc_to_imap_command(comm, comm_start, comm_start + cc_len, cc_dir); + } + + return 0; +} + +static int read_imap_while_auth_state(emailsession_t *sess, + imap_session_t *imapsess) { + + /* Our goal here is to just consume any unconventional exchanges + * between client and server that might be occurring during + * authentication (e.g. challenges, responses for GSSAPI, etc.). + */ + + char *msgstart = (char *)(imapsess->contbuffer + imapsess->contbufread); + char *tmp = NULL, *crlf = NULL; + char *authtag; + imap_command_t *comm; + int comm_start, pluslen; + + /* XXX need some test cases for AUTHENTICATE */ + + if (imapsess->contbufread >= imapsess->contbufused) { + return 0; + } + + /* First step, find the next \r\n so we're only working with a + * complete message */ + crlf = strstr(msgstart, "\r\n"); + if (crlf == NULL) { + return 0; + } + + pluslen = (crlf - msgstart) + 2; + tmp = calloc((crlf - msgstart) + 1, sizeof(char)); + memcpy(tmp, msgstart, crlf - msgstart); + + comm = &(imapsess->commands[imapsess->auth_command_index]); + authtag = comm->tag; + + /* Is this the server reply to the AUTH command? */ + /* If yes, rewind to the start of the reply tag so our normal + * processing can be applied when we return... + */ + if (is_tagged_reply(tmp, authtag)) { + free(tmp); + if (imapsess->auth_type == OPENLI_IMAP_AUTH_PLAIN) { + int r = 0; + char *authmsg; + /* Bit wasteful to be constantly strduping here XXX */ + while (r == 0) { + authmsg = clone_authentication_message(imapsess); + if (*authmsg == '\0') { + /* This is bad, we somehow decoded the whole plain + * auth content and didn't find what we were looking + * for... + */ + logger(LOG_INFO, "OpenLI: failed to decode plain auth content for IMAP session: %s", sess->key); + r = 1; + } else { + r = decode_plain_auth_content(authmsg, imapsess, sess); + } + free(authmsg); + } + return r; + } + sess->currstate = OPENLI_IMAP_STATE_AUTH_REPLY; + return 1; + } + + if (extend_command_buffer(comm, pluslen) < 0) { + return -1; + } + comm_start = comm->commbufused; + memcpy(comm->commbuffer + comm->commbufused, msgstart, pluslen); + comm->commbufused += pluslen; + comm->commbuffer[comm->commbufused] = '\0'; + + /* We'll update the byte counts for plain auth later on when we decode the + * entire auth message + */ + /* Does this begin with a '+'? This is from the server */ + if (*tmp == '+') { + if (imapsess->auth_type != OPENLI_IMAP_AUTH_PLAIN) { + sess->server_octets += pluslen; + } + add_cc_to_imap_command(comm, comm_start, comm_start + pluslen, 0); + } else { + /* Otherwise, this is message content from the client */ + if (imapsess->auth_type != OPENLI_IMAP_AUTH_PLAIN) { + sess->client_octets += pluslen; + } + add_cc_to_imap_command(comm, comm_start, comm_start + pluslen, 1); + } + + /* Advance read pointer to the next line */ + imapsess->contbufread += pluslen; + free(tmp); + return 1; +} + +static int read_imap_while_idle_state(emailsession_t *sess, + imap_session_t *imapsess) { + + uint8_t *msgstart = imapsess->contbuffer + imapsess->contbufread; + imap_command_t *comm; + uint8_t *found = NULL; + int idle_server_length = 0; + int comm_start; + + comm = &(imapsess->commands[imapsess->idle_command_index]); + + /* check for "+ " -- server response to the idle command*/ + + if (imapsess->reply_start == 0) { + found = (uint8_t *)strstr((const char *)msgstart, "+ "); + if (!found) { + return 0; + } + + imapsess->reply_start = found - imapsess->contbuffer; + } + + /* all untagged messages are updates from the server + * add them to our reply */ + + /* check for "DONE\r\n" -- client message to end idling state */ + /* make sure we add everything from reply_start to the start + * of "DONE" as a separate server->client CC, then add the + * "DONE" as a client->server CC. + */ + found = (uint8_t *)strstr((const char *)msgstart, "\r\nDONE\r\n"); + if (!found) { + return 0; + } + + idle_server_length = (found + 2 - imapsess->contbuffer) - + imapsess->reply_start; + + imapsess->contbufread = (found - imapsess->contbuffer) + 8; + + if (extend_command_buffer(comm, idle_server_length + 6) < 0) { + return -1; + } + + comm_start = comm->commbufused; + memcpy(comm->commbuffer + comm->commbufused, + imapsess->contbuffer + imapsess->reply_start, + idle_server_length + 6); + comm->commbufused += (idle_server_length + 6); + comm->commbuffer[comm->commbufused] = '\0'; + + add_cc_to_imap_command(comm, comm_start, + comm_start + idle_server_length, 0); + add_cc_to_imap_command(comm, comm_start + idle_server_length, + comm_start + idle_server_length + 6, 1); + + sess->server_octets += idle_server_length; + sess->client_octets += 6; + + imapsess->reply_start = 0; + sess->currstate = OPENLI_IMAP_STATE_AUTHENTICATED; + + return 1; +} + +static char *get_uid_command(char *command, uint8_t *buffer) { + + char *new_comm = NULL; + int old_len = strlen(command); + uint8_t *nextspace, *crlf; + + /* XXX requires testing with a pcap containing UID commands! */ + + /* The next character in our buffer should be a space, but if it + * isn't (i.e. it is a \r), then just return the command as is so we + * can try to handle this weirdness nicely. + */ + if (*buffer != ' ') { + return command; + } + + nextspace = (uint8_t *)strchr((const char *)(buffer + 1), ' '); + crlf = (uint8_t *)strstr((const char *)(buffer + 1), "\r\n"); + + if (!nextspace && !crlf) { + return command; + } + + if (nextspace == NULL || crlf < nextspace) { + nextspace = crlf; + } + + new_comm = calloc(old_len + (nextspace - buffer) + 1, sizeof(char)); + memcpy(new_comm, command, old_len); + memcpy(new_comm + old_len, buffer, (nextspace - buffer)); + + free(command); + return new_comm; +} + +static int find_next_imap_message(openli_email_worker_t *state, + emailsession_t *sess, imap_session_t *imapsess) { + + char *tag; + char *comm_resp; + uint8_t *spacefound = NULL; + uint8_t *spacefound2 = NULL; + uint8_t *crlffound = NULL; + uint8_t *msgstart = imapsess->contbuffer + imapsess->contbufread; + + + if (sess->currstate == OPENLI_IMAP_STATE_AUTHENTICATING) { + /* Handle various auth response behaviours, as per RFC9051 */ + return read_imap_while_auth_state(sess, imapsess); + } + + if (sess->currstate == OPENLI_IMAP_STATE_IDLING) { + return read_imap_while_idle_state(sess, imapsess); + } + + if (sess->currstate == OPENLI_IMAP_STATE_APPENDING) { + return read_imap_while_appending_state(sess, imapsess); + } + + spacefound = (uint8_t *)strchr((const char *)msgstart, ' '); + if (!spacefound) { + return 0; + } + + tag = calloc((spacefound - msgstart) + 1, sizeof(char *)); + memcpy(tag, msgstart, spacefound - msgstart); + tag[spacefound - msgstart] = '\0'; + + /* Most commands are " \r\n", but some + * have no extra context and are just " \r\n". + * Therefore if we see a \r\n BEFORE the next space, we want to + * treat that as our string boundary. + */ + spacefound2 = (uint8_t *)strchr((const char *)(spacefound + 1), ' '); + crlffound = (uint8_t *)strstr((const char *)(spacefound + 1), "\r\n"); + + if (!spacefound2 && !crlffound) { + free(tag); + return 0; + } + + if (spacefound2 == NULL || (crlffound != NULL && crlffound < spacefound2)) { + spacefound2 = crlffound; + } + + comm_resp = calloc((spacefound2 - spacefound), sizeof(char *)); + memcpy(comm_resp, spacefound + 1, (spacefound2 - spacefound) - 1); + comm_resp[spacefound2 - spacefound - 1] = '\0'; + + if (strcmp(tag, "*") == 0) { + if (strcasecmp(comm_resp, "BYE") == 0 && + sess->currstate != OPENLI_IMAP_STATE_LOGOUT) { + + /* server is doing an immediate shutdown */ + + /* TODO dump CCs for any incomplete commands (including the + * sudden BYE)? + */ + sess->currstate = OPENLI_IMAP_STATE_SESSION_OVER; + generate_email_logoff_iri(state, sess); + free(tag); + free(comm_resp); + return 0; + + } else if (strcasecmp(comm_resp, "PREAUTH") == 0) { + //imapsess->next_command_type = OPENLI_IMAP_COMMAND_PREAUTH; + } else { + /* a partial reply to a command, more to come... */ + imapsess->next_command_type = OPENLI_IMAP_COMMAND_REPLY_ONGOING; + free(comm_resp); + comm_resp = NULL; + + if (imapsess->reply_start == 0) { + imapsess->reply_start = msgstart - imapsess->contbuffer; + } + } + } else if (strcasecmp(comm_resp, "OK") == 0 || + strcasecmp(comm_resp, "NO") == 0 || + strcasecmp(comm_resp, "BAD") == 0) { + + /* this is a reply that completes the response to a command */ + imapsess->next_command_type = OPENLI_IMAP_COMMAND_REPLY; + if (imapsess->reply_start == 0) { + imapsess->reply_start = msgstart - imapsess->contbuffer; + } + } else if (strcasecmp(comm_resp, "ID") == 0) { + imapsess->next_command_type = OPENLI_IMAP_COMMAND_ID; + } else if (strcasecmp(comm_resp, "UID") == 0) { + comm_resp = get_uid_command(comm_resp, spacefound2); + imapsess->next_command_type = OPENLI_IMAP_COMMAND_GENERIC; + } else if (strcasecmp(comm_resp, "IDLE") == 0) { + imapsess->next_command_type = OPENLI_IMAP_COMMAND_IDLE; + } else if (strcasecmp(comm_resp, "APPEND") == 0) { + imapsess->next_command_type = OPENLI_IMAP_COMMAND_APPEND; + } else if (strcasecmp(comm_resp, "LOGOUT") == 0) { + imapsess->next_command_type = OPENLI_IMAP_COMMAND_LOGOUT; + } else if (strcasecmp(comm_resp, "LOGIN") == 0) { + imapsess->next_command_type = OPENLI_IMAP_COMMAND_LOGIN; + imapsess->auth_read_from = msgstart - imapsess->contbuffer; + if (imapsess->auth_tag) { + free(imapsess->auth_tag); + } + imapsess->auth_tag = strdup(tag); + sess->currstate = OPENLI_IMAP_STATE_AUTH_STARTED; + } else if (strcasecmp(comm_resp, "AUTHENTICATE") == 0) { + imapsess->next_command_type = OPENLI_IMAP_COMMAND_AUTH; + if (imapsess->auth_tag) { + free(imapsess->auth_tag); + } + imapsess->auth_tag = strdup(tag); + imapsess->auth_read_from = msgstart - imapsess->contbuffer; + sess->currstate = OPENLI_IMAP_STATE_AUTH_STARTED; + } else { + /* just a regular IMAP command that requires no special treatment */ + imapsess->next_command_type = OPENLI_IMAP_COMMAND_GENERIC; + } + + if (imapsess->next_comm_tag) { + free(imapsess->next_comm_tag); + } + imapsess->next_comm_tag = tag; + + if (imapsess->next_command_name) { + free(imapsess->next_command_name); + } + imapsess->next_command_name = comm_resp; + imapsess->next_comm_start = msgstart - imapsess->contbuffer; + + return 1; +} + +static int process_next_imap_state(openli_email_worker_t *state, + emailsession_t *sess, imap_session_t *imapsess, uint64_t timestamp) { + + int r; + + if (sess->currstate == OPENLI_IMAP_STATE_INIT) { + r = find_server_ready(imapsess); + if (r == 1) { + sess->currstate = OPENLI_IMAP_STATE_SERVER_READY; + } + } + + if (sess->currstate == OPENLI_IMAP_STATE_SERVER_READY) { + r = find_server_ready_end(imapsess); + if (r == 1) { + sess->currstate = OPENLI_IMAP_STATE_PRE_AUTH; + sess->server_octets += + (imapsess->contbufread - imapsess->next_comm_start); + imapsess->next_comm_start = 0; + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + } + return r; + } + + if (imapsess->next_command_type == OPENLI_IMAP_COMMAND_NONE) { + r = find_next_imap_message(state, sess, imapsess); + return r; + } else if (imapsess->next_command_type == OPENLI_IMAP_COMMAND_REPLY) { + r = find_reply_end(state, sess, imapsess, timestamp); + + return r; + } else if (imapsess->next_command_type == + OPENLI_IMAP_COMMAND_REPLY_ONGOING) { + r = find_partial_reply_end(sess, imapsess); + return r; + } else { + r = find_command_end(sess, imapsess); + return r; + } + + return 0; +} + +int update_imap_session_by_ingestion(openli_email_worker_t *state, + emailsession_t *sess, openli_email_captured_t *cap) { + + imap_session_t *imapsess; + int r, i; + + if (sess->proto_state == NULL) { + imapsess = calloc(1, sizeof(imap_session_t)); + imapsess->contbuffer = calloc(1024, sizeof(uint8_t)); + imapsess->contbufused = 0; + imapsess->contbufread = 0; + imapsess->contbufsize = 1024; + imapsess->commands = calloc(5, sizeof(imap_command_t)); + imapsess->commands_size = 5; + imapsess->next_command_type = OPENLI_IMAP_COMMAND_NONE; + imapsess->idle_command_index = -1; + imapsess->auth_command_index = -1; + + for (i = 0; i < imapsess->commands_size; i++) { + init_imap_command(&(imapsess->commands[i])); + } + + sess->proto_state = (void *)imapsess; + } else { + imapsess = (imap_session_t *)sess->proto_state; + } + + if (sess->currstate == OPENLI_IMAP_STATE_IGNORING) { + return 0; + } + + if (append_content_to_imap_buffer(imapsess, cap) < 0) { + logger(LOG_INFO, "OpenLI: Failed to append IMAP message content to session buffer for %s", sess->key); + return -1; + } + + while (1) { + if ((r = process_next_imap_state(state, sess, imapsess, + cap->timestamp)) <= 0) { + break; + } + if (sess->currstate == OPENLI_IMAP_STATE_IGNORING) { + break; + } + } + + if (sess->currstate == OPENLI_IMAP_STATE_SESSION_OVER) { + return 1; + } + + return 0; +} + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/collector/emailprotocols/pop3.c b/src/collector/emailprotocols/pop3.c new file mode 100644 index 00000000..e1c4523f --- /dev/null +++ b/src/collector/emailprotocols/pop3.c @@ -0,0 +1,859 @@ +/* + * + * Copyright (c) 2018-2023 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#define _GNU_SOURCE +#include +#include +#include + +#include "email_worker.h" +#include "logger.h" + + +enum { + OPENLI_POP3_STATE_START, + OPENLI_POP3_STATE_SERVER_REPLY, + OPENLI_POP3_STATE_XCLIENT_SEEN, + OPENLI_POP3_STATE_WAITING_COMMAND, + OPENLI_POP3_STATE_WAITING_SERVER, + OPENLI_POP3_STATE_MULTI_CONTENT, + OPENLI_POP3_STATE_IGNORING, + OPENLI_POP3_STATE_OVER, + OPENLI_POP3_STATE_AUTH, + OPENLI_POP3_STATE_AUTH_SERVER_CONTENT, + OPENLI_POP3_STATE_AUTH_CLIENT_CONTENT, + OPENLI_POP3_STATE_CONSUME_SERVER, + OPENLI_POP3_STATE_CONSUME_CLIENT, + +}; + +enum { + OPENLI_POP3_INIT, + OPENLI_POP3_PREAUTH, + OPENLI_POP3_POSTAUTH, + OPENLI_POP3_POSTQUIT, +}; + +enum { + OPENLI_POP3_COMMAND_NONE, + OPENLI_POP3_COMMAND_USER, + OPENLI_POP3_COMMAND_PASS, + OPENLI_POP3_COMMAND_APOP, + OPENLI_POP3_COMMAND_RETR, + OPENLI_POP3_COMMAND_TOP, + OPENLI_POP3_COMMAND_AUTH, + OPENLI_POP3_COMMAND_QUIT, + OPENLI_POP3_COMMAND_XCLIENT, + OPENLI_POP3_COMMAND_OTHER_MULTI, + OPENLI_POP3_COMMAND_OTHER_SINGLE, + OPENLI_POP3_COMMAND_UNKNOWN, +}; + +enum { + OPENLI_POP3_SERV_OK, + OPENLI_POP3_SERV_ERR, + OPENLI_POP3_SERV_AUTH, +}; + +typedef struct pop3session { + + uint8_t *contbuffer; + int contbufsize; + int contbufused; + int contbufread; + + int auth_state; + int last_command_type; + int server_indicator; + + int command_start; + int command_end; + int reply_start; + + char *mailbox; + char *mail_sender; + char *password_content; + + char *client_ip; + char *client_port; + int seen_xclient; + int seen_xclient_reply; + +} pop3_session_t; + +/* this is basically a direct copy of the imap version -- maybe we could + * just have one version of this in utils.c somewhere? XXX + */ +static int append_content_to_pop3_buffer(pop3_session_t *pop3sess, + openli_email_captured_t *cap) { + + /* +1 to account for a null terminator */ + while (pop3sess->contbufsize - pop3sess->contbufused <= + cap->msg_length + 1) { + pop3sess->contbuffer = realloc(pop3sess->contbuffer, + pop3sess->contbufsize + 4096); + if (pop3sess->contbuffer == NULL) { + return -1; + } + pop3sess->contbufsize += 4096; + } + + memcpy(pop3sess->contbuffer + pop3sess->contbufused, + cap->content, cap->msg_length); + pop3sess->contbufused += cap->msg_length; + pop3sess->contbuffer[pop3sess->contbufused] = '\0'; + + return 0; +} + +static int decode_login_username_command(emailsession_t *sess, + pop3_session_t *pop3sess) { + + char *usermsg; + int msglen; + char *username; + + // strip \r\n from end of command + msglen = pop3sess->command_end - pop3sess->command_start - 2; + usermsg = calloc(msglen + 1, sizeof(char)); + + memcpy(usermsg, pop3sess->contbuffer + pop3sess->command_start, + msglen); + + username = strchr(usermsg, ' '); + if (username == NULL) { + logger(LOG_INFO, "OpenLI: unable to parse POP3 USER command -- no space found in command (\"%s\")", usermsg); + free(usermsg); + return -1; + } + + username += 1; + pop3sess->mailbox = strdup(username); + add_email_participant(sess, pop3sess->mailbox, 0); + free(usermsg); + return 1; +} + +static int decode_login_apop_command(emailsession_t *sess, + pop3_session_t *pop3sess) { + + char *usermsg; + int msglen; + char *username; + char *username_end; + + // strip \r\n from end of command + msglen = pop3sess->command_end - pop3sess->command_start - 2; + usermsg = calloc(msglen + 1, sizeof(char)); + + memcpy(usermsg, pop3sess->contbuffer + pop3sess->command_start, + msglen); + + username = strchr(usermsg, ' '); + if (username == NULL) { + logger(LOG_INFO, "OpenLI: unable to parse POP3 APOP command -- no space found in command (\"%s\")", usermsg); + return -1; + } + + username += 1; + + + username_end = strchr(username, ' '); + if (username_end == NULL) { + logger(LOG_INFO, "OpenLI: unable to parse POP3 APOP command -- not enough terms in command (\"%s\")", usermsg); + return -1; + } + + pop3sess->mailbox = strndup(username, username_end - username); + add_email_participant(sess, pop3sess->mailbox, 0); + return 1; +} + +static int save_pop3_password(emailsession_t *sess, pop3_session_t *pop3sess) { + + if (sess->mask_credentials) { + /* Replace the password with 'XXX' */ + pop3sess->password_content = strdup("PASS XXX\r\n"); + } else { + int msglen = pop3sess->command_end - pop3sess->command_start; + pop3sess->password_content = calloc(msglen + 1, sizeof(char)); + + memcpy(pop3sess->password_content, + pop3sess->contbuffer + pop3sess->command_start, msglen); + } + + return 1; +} + +static int find_next_crlf(pop3_session_t *pop3sess, int start_index) { + int rem; + uint8_t *found; + + rem = pop3sess->contbufused - start_index; + + found = (uint8_t *)memmem(pop3sess->contbuffer + start_index, rem, + "\r\n", 2); + + if (found) { + pop3sess->contbufread = (found - pop3sess->contbuffer) + 2; + return 1; + } + return 0; +} + +static int find_multi_end(pop3_session_t *pop3sess, int start_index) { + int rem; + uint8_t *found; + + rem = pop3sess->contbufused - start_index; + + found = (uint8_t *)memmem(pop3sess->contbuffer + start_index, rem, + "\r\n.\r\n", 5); + + if (found) { + pop3sess->contbufread = (found - pop3sess->contbuffer) + 5; + return 1; + } + return 0; +} + +static int parse_xclient_content(emailsession_t *sess, + pop3_session_t *pop3sess) { + + + char *xcontent = (char *)pop3sess->contbuffer + pop3sess->command_start; + int xcontlen = (pop3sess->command_end - pop3sess->command_start) - 2; + char xcopy[2048]; + int ret = 0; + char *key, *value, *next; + + char *ptr = xcopy; + + memcpy(xcopy, xcontent, xcontlen); + xcopy[xcontlen] = '\0'; + + /* The real client IP and port will be contained in the XCLIENT + * message payload -- we don't know the "real" server for sure, + * though so let's just use the server that received the XCLIENT + * message instead */ + + while (ptr) { + if (*ptr == '\r' || *ptr == '\n' || *ptr == '\0') { + break; + } + next = strchr(ptr, ' '); + key = ptr; + + value = strchr(ptr, '='); + if (value == NULL) { + ret = -1; + break; + } + + if (next != NULL && next < value) { + if (key == xcopy) { + /* this is the XCLIENT command itself */ + ptr = next + 1; + continue; + } + return -1; + } + + *value = '\0'; + value ++; + + if (next) { + *next = '\0'; + next ++; + } + ptr = next; + + if (strcmp(key, "ADDR") == 0) { + if (pop3sess->client_ip) { + free(pop3sess->client_ip); + } + pop3sess->client_ip = strdup(value); + } + + if (strcmp(key, "PORT") == 0) { + if (pop3sess->client_port) { + free(pop3sess->client_port); + } + pop3sess->client_port = strdup(value); + } + replace_email_session_clientaddr(sess, pop3sess->client_ip, + pop3sess->client_port); + } + + return ret; + +} + +static int parse_pop3_command(pop3_session_t *pop3sess) { + + int comm_size = (pop3sess->contbufread - pop3sess->command_start) - 2; + char comm_copy[1024]; + + if (comm_size >= 1024) { + comm_size = 1023; + } + + memcpy(comm_copy, pop3sess->contbuffer + pop3sess->command_start, comm_size); + comm_copy[comm_size] = '\0'; + + if (strncmp(comm_copy, "CAPA", 4) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_OTHER_MULTI; + } + else if (strncmp(comm_copy, "XCLIENT ", 8) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_XCLIENT; + } + else if (strncmp(comm_copy, "USER ", 5) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_USER; + } + else if (strncmp(comm_copy, "PASS ", 5) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_PASS; + } + else if (strncmp(comm_copy, "RETR ", 5) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_RETR; + } + else if (strncmp(comm_copy, "TOP ", 4) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_TOP; + } + else if (strncmp(comm_copy, "LIST", 4) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_OTHER_MULTI; + } + else if (strncmp(comm_copy, "APOP ", 5) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_APOP; + } + else if (strncmp(comm_copy, "NOOP", 4) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_OTHER_SINGLE; + } + else if (strncmp(comm_copy, "RSET", 4) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_OTHER_SINGLE; + } + else if (strncmp(comm_copy, "STAT", 4) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_OTHER_SINGLE; + } + else if (strncmp(comm_copy, "AUTH ", 5) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_AUTH; + } + else if (strncmp(comm_copy, "UIDL", 4) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_OTHER_MULTI; + } + else if (strncmp(comm_copy, "DELE ", 5) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_OTHER_SINGLE; + } + else if (strncmp(comm_copy, "QUIT", 4) == 0) { + pop3sess->last_command_type = OPENLI_POP3_COMMAND_QUIT; + pop3sess->auth_state = OPENLI_POP3_POSTQUIT; + } else { + /* Unknown command -- let's guess a single line reply and pray */ + pop3sess->last_command_type = OPENLI_POP3_COMMAND_OTHER_SINGLE; + } + + return 0; +} + +static int process_server_indicator(emailsession_t *sess, + pop3_session_t *pop3sess, int isauth) { + int rem = pop3sess->contbufused - pop3sess->reply_start; + uint8_t *found; + + if (isauth) { + if (rem < 2) { + return -1; + } + + if (memcmp(pop3sess->contbuffer + pop3sess->reply_start, + "+ ", 2) == 0) { + pop3sess->server_indicator = OPENLI_POP3_SERV_AUTH; + return 1; + } + } + + + if (rem < 3) { + return -1; + } + + found = (uint8_t *)memmem(pop3sess->contbuffer + pop3sess->reply_start, + rem, "+OK", 3); + if (found) { + pop3sess->server_indicator = OPENLI_POP3_SERV_OK; + return 1; + } + + if (rem < 4) { + return -1; + } + + found = (uint8_t *)memmem(pop3sess->contbuffer + pop3sess->reply_start, + rem, "-ERR", 4); + if (found) { + pop3sess->server_indicator = OPENLI_POP3_SERV_ERR; + return 1; + } + + if (pop3sess->last_command_type != OPENLI_POP3_COMMAND_XCLIENT) { + logger(LOG_INFO, "OpenLI: unexpected server reply observed for POP3 session '%s'", sess->key); + } + return 0; +} + +static int is_single_line_response(pop3_session_t *pop3sess) { + + switch (pop3sess->last_command_type) { + case OPENLI_POP3_COMMAND_RETR: + case OPENLI_POP3_COMMAND_TOP: + case OPENLI_POP3_COMMAND_OTHER_MULTI: + return 0; + } + + return 1; +} + +static int handle_xclient_seen_state(emailsession_t *sess, + pop3_session_t *pop3sess) { + + int r; + /* We might get a server reply, or the proxy might just + * carry on and forward the next client command -- who knows? + */ + r = process_server_indicator(sess, pop3sess, 0); + if (r < 0) { + return 0; + } + if (r == 1) { + sess->currstate = OPENLI_POP3_STATE_SERVER_REPLY; + } else { + /* Must be a command instead? */ + sess->currstate = OPENLI_POP3_STATE_WAITING_COMMAND; + pop3sess->command_start = pop3sess->reply_start; + return 1; + } + + return 0; +} + +static int extract_pop3_email_sender(openli_email_worker_t *state, + emailsession_t *sess, pop3_session_t *pop3sess) { + + int r; + char *extracted = NULL; + char *safecopy; + int copylen; + char *search = (char *)(pop3sess->contbuffer + pop3sess->reply_start); + char *end = (char *)(pop3sess->contbuffer + pop3sess->contbufread); + + copylen = (end - search) + 1; + safecopy = calloc(sizeof(char), copylen); + memcpy(safecopy, search, (end - search)); + + r = extract_email_sender_from_body(state, sess, safecopy, &extracted); + + if (r == 0 || extracted == NULL) { + free(safecopy); + return r; + } + + pop3sess->mail_sender = extracted; + add_email_participant(sess, pop3sess->mail_sender, 1); + free(safecopy); + + return r; +} + +static int handle_multi_reply_state(openli_email_worker_t *state, + emailsession_t *sess, pop3_session_t *pop3sess, uint64_t timestamp) { + + int r; + + if ((r = find_multi_end(pop3sess, pop3sess->reply_start)) <= 0) { + return r; + } + + sess->server_octets += (pop3sess->contbufread - pop3sess->reply_start); + + /* TODO command response is complete -- generate the CCs */ + if (pop3sess->server_indicator == OPENLI_POP3_SERV_OK) { + sess->event_time = timestamp; + /* if command was RETR, generate an email download IRI */ + /* if command was TOP, generate a partial download IRI */ + if (pop3sess->last_command_type == OPENLI_POP3_COMMAND_RETR) { + extract_pop3_email_sender(state, sess, pop3sess); + generate_email_download_success_iri(state, sess); + } else if (pop3sess->last_command_type == OPENLI_POP3_COMMAND_TOP) { + extract_pop3_email_sender(state, sess, pop3sess); + generate_email_partial_download_success_iri(state, sess); + } + + /* free the sender so we don't include it in future IRIs where + * it is not relevant (e.g. logoff) + */ + if (pop3sess->mail_sender) { + clear_email_sender(sess); + /* the memory is freed inside clear_email_sender()... */ + pop3sess->mail_sender = NULL; + } + + } + generate_email_cc_from_pop3_payload(state, sess, + pop3sess->contbuffer + pop3sess->command_start, + pop3sess->command_end - pop3sess->command_start, + timestamp, ETSI_DIR_FROM_TARGET); + generate_email_cc_from_pop3_payload(state, sess, + pop3sess->contbuffer + pop3sess->reply_start, + pop3sess->contbufread - pop3sess->reply_start, + timestamp, ETSI_DIR_TO_TARGET); + pop3sess->command_start = pop3sess->contbufread; + sess->currstate = OPENLI_POP3_STATE_WAITING_COMMAND; + + return 1; +} + +static int handle_client_command(emailsession_t *sess, + pop3_session_t *pop3sess) { + + int r; + + if ((r = find_next_crlf(pop3sess, pop3sess->command_start)) <= 0) { + return r; + } + + if (parse_pop3_command(pop3sess) < 0) { + return -1; + } + + if (pop3sess->last_command_type == OPENLI_POP3_COMMAND_PASS && + sess->mask_credentials) { + sess->client_octets += 10; + } else { + sess->client_octets += (pop3sess->contbufread - pop3sess->command_start); + } + pop3sess->command_end = pop3sess->contbufread; + pop3sess->reply_start = pop3sess->contbufread; + if (pop3sess->last_command_type == OPENLI_POP3_COMMAND_XCLIENT) + { + sess->currstate = OPENLI_POP3_STATE_XCLIENT_SEEN; + pop3sess->seen_xclient = 1; + + if (parse_xclient_content(sess, pop3sess) < 0) { + return -1; + } + } else if (pop3sess->last_command_type == OPENLI_POP3_COMMAND_AUTH) { + sess->currstate = OPENLI_POP3_STATE_AUTH; + } else { + sess->currstate = OPENLI_POP3_STATE_WAITING_SERVER; + } + + if (pop3sess->last_command_type == OPENLI_POP3_COMMAND_USER) { + if (decode_login_username_command(sess, pop3sess) < 0) { + return -1; + } + } + + if (pop3sess->last_command_type == OPENLI_POP3_COMMAND_APOP) { + if (decode_login_apop_command(sess, pop3sess) < 0) { + return -1; + } + } + + if (pop3sess->last_command_type == OPENLI_POP3_COMMAND_PASS) { + if (save_pop3_password(sess, pop3sess) < 0) { + return -1; + } + } + + return 1; +} + + +static int handle_server_reply_state(openli_email_worker_t *state, + emailsession_t *sess, pop3_session_t *pop3sess, uint64_t timestamp) { + + int r = 1; + + if ((r = find_next_crlf(pop3sess, pop3sess->reply_start)) <= 0) { + return r; + } + + /* Server reply line is complete */ + + sess->server_octets += (pop3sess->contbufread - pop3sess->reply_start); + + if (pop3sess->auth_state == OPENLI_POP3_POSTQUIT) { + sess->currstate = OPENLI_POP3_STATE_OVER; + sess->event_time = timestamp; + /* generate email logoff IRI */ + generate_email_logoff_iri(state, sess); + } + /* If our last command is one that will produce multi-line responses, + * then we need to keep parsing lines until we see a line with just + * a full stop + */ + else if (is_single_line_response(pop3sess)) { + if (pop3sess->seen_xclient && !pop3sess->seen_xclient_reply) { + /* This is the first reply since we saw XCLIENT */ + pop3sess->seen_xclient_reply = 1; + if (pop3sess->last_command_type != OPENLI_POP3_COMMAND_XCLIENT) { + /* We saw another command before we saw this reply, so we + * now need to wait for the reply to that subsequent + * command + */ + sess->currstate = OPENLI_POP3_STATE_WAITING_SERVER; + pop3sess->reply_start = pop3sess->contbufread; + return 1; + } + + /* Otherwise, the XCLIENT reply was the first thing we + * saw after the XCLIENT command, so we can carry on + * normally and expect a client command next. + */ + } + + sess->currstate = OPENLI_POP3_STATE_WAITING_COMMAND; + + } else { + sess->currstate = OPENLI_POP3_STATE_MULTI_CONTENT; + return 1; + } + + if (pop3sess->last_command_type == OPENLI_POP3_COMMAND_PASS || + pop3sess->last_command_type == + OPENLI_POP3_COMMAND_APOP || + pop3sess->last_command_type == OPENLI_POP3_COMMAND_AUTH) { + + /* This is the reply for a login attempt, so we'll need to + * publish an IRI + */ + if (pop3sess->server_indicator == OPENLI_POP3_SERV_OK) { + pop3sess->auth_state = OPENLI_POP3_POSTAUTH; + sess->login_time = timestamp; + generate_email_login_success_iri(state, sess); + } else { + generate_email_login_failure_iri(state, sess); + } + } + + /* TODO command response is complete -- generate the CCs */ + if (pop3sess->last_command_type != OPENLI_POP3_COMMAND_NONE && + pop3sess->last_command_type != OPENLI_POP3_COMMAND_XCLIENT) { + + if (pop3sess->last_command_type != OPENLI_POP3_COMMAND_PASS) { + generate_email_cc_from_pop3_payload(state, sess, + pop3sess->contbuffer + pop3sess->command_start, + pop3sess->command_end - pop3sess->command_start, + timestamp, ETSI_DIR_FROM_TARGET); + } else { + generate_email_cc_from_pop3_payload(state, sess, + (uint8_t *)pop3sess->password_content, + strlen(pop3sess->password_content), + timestamp, ETSI_DIR_FROM_TARGET); + } + + + generate_email_cc_from_pop3_payload(state, sess, + pop3sess->contbuffer + pop3sess->reply_start, + pop3sess->contbufread - pop3sess->reply_start, + timestamp, ETSI_DIR_TO_TARGET); + + } + + pop3sess->command_start = pop3sess->contbufread; + return r; +} + +static int process_next_pop3_line(openli_email_worker_t *state, + emailsession_t *sess, pop3_session_t *pop3sess, uint64_t timestamp) { + + int r; + + switch(sess->currstate) { + case OPENLI_POP3_STATE_START: + pop3sess->reply_start = pop3sess->contbufread; + + // fall through + case OPENLI_POP3_STATE_WAITING_SERVER: + r = process_server_indicator(sess, pop3sess, 0); + if (r < 0) { + return 0; + } + if (r == 0) { + sess->currstate = OPENLI_POP3_STATE_CONSUME_SERVER; + } else { + sess->currstate = OPENLI_POP3_STATE_SERVER_REPLY; + } + + return 1; + + case OPENLI_POP3_STATE_XCLIENT_SEEN: + r = handle_xclient_seen_state(sess, pop3sess); + return r; + + case OPENLI_POP3_STATE_AUTH: + r = process_server_indicator(sess, pop3sess, 1); + if (r < 0) { + return 0; + } + if (r == 0) { + logger(LOG_INFO, "OpenLI: POP3 session '%s' has bogus authentication exchange -- ignoring for our sanity", sess->key); + sess->currstate = OPENLI_POP3_STATE_IGNORING; + return -1; + } + if (pop3sess->server_indicator == OPENLI_POP3_SERV_AUTH) { + sess->currstate = OPENLI_POP3_STATE_AUTH_SERVER_CONTENT; + } else { + sess->currstate = OPENLI_POP3_STATE_SERVER_REPLY; + } + return 1; + + case OPENLI_POP3_STATE_AUTH_SERVER_CONTENT: + /* TODO figure out a way to do CCs properly in this state */ + if ((r = find_next_crlf(pop3sess, pop3sess->reply_start)) == 1) { + sess->currstate = OPENLI_POP3_STATE_AUTH_CLIENT_CONTENT; + pop3sess->reply_start = pop3sess->contbufread; + return 1; + } + break; + + case OPENLI_POP3_STATE_AUTH_CLIENT_CONTENT: + /* TODO figure out a way to do CCs properly in this state */ + if ((r = find_next_crlf(pop3sess, pop3sess->reply_start)) == 1) { + sess->currstate = OPENLI_POP3_STATE_AUTH; + pop3sess->reply_start = pop3sess->contbufread; + return 1; + } + break; + + case OPENLI_POP3_STATE_CONSUME_SERVER: + /* Let's hope we never have to use this case... */ + if ((r = find_next_crlf(pop3sess, pop3sess->reply_start)) == 1) { + sess->currstate = OPENLI_POP3_STATE_WAITING_SERVER; + pop3sess->reply_start = pop3sess->contbufread; + return 1; + } + break; + + case OPENLI_POP3_STATE_SERVER_REPLY: + r = handle_server_reply_state(state, sess, pop3sess, timestamp); + return r; + + case OPENLI_POP3_STATE_MULTI_CONTENT: + r = handle_multi_reply_state(state, sess, pop3sess, timestamp); + return r; + + case OPENLI_POP3_STATE_WAITING_COMMAND: + r = handle_client_command(sess, pop3sess); + return r; + + case OPENLI_POP3_STATE_IGNORING: + case OPENLI_POP3_STATE_OVER: + return 0; + } + + + return 0; +} + +void free_pop3_session_state(emailsession_t *sess, void *pop3state) { + pop3_session_t *pop3sess; + + if (pop3state == NULL) { + return; + } + + pop3sess = (pop3_session_t *)pop3state; + if (pop3sess->client_ip) { + free(pop3sess->client_ip); + } + if (pop3sess->client_port) { + free(pop3sess->client_port); + } + if (pop3sess->password_content) { + free(pop3sess->password_content); + } + + /* Don't free 'mailbox' or 'mail_sender', as these are owned by the + * participant list for the overall email session. + */ + + + free(pop3sess->contbuffer); + free(pop3sess); + +} + +int update_pop3_session_by_ingestion(openli_email_worker_t *state, + emailsession_t *sess, openli_email_captured_t *cap) { + + pop3_session_t *pop3sess; + int r; + + if (sess->proto_state == NULL) { + pop3sess = calloc(1, sizeof(pop3_session_t)); + pop3sess->contbuffer = calloc(2048, sizeof(uint8_t)); + pop3sess->contbufused = 0; + pop3sess->contbufread = 0; + pop3sess->contbufsize = 2048; + pop3sess->auth_state = OPENLI_POP3_INIT; + pop3sess->last_command_type = OPENLI_POP3_COMMAND_NONE; + + pop3sess->client_port = strdup(cap->remote_port); + pop3sess->client_ip = strdup(cap->remote_ip); + + sess->currstate = OPENLI_POP3_STATE_START; + + sess->proto_state = (void *)pop3sess; + } else { + pop3sess = (pop3_session_t *)sess->proto_state; + } + + if (sess->currstate == OPENLI_POP3_STATE_IGNORING) { + return 0; + } + + if (append_content_to_pop3_buffer(pop3sess, cap) < 0) { + logger(LOG_INFO, "OpenLI: Failed to append POP3 message content to session buffer for %s", sess->key); + return -1; + } + + while (1) { + if ((r = process_next_pop3_line(state, sess, pop3sess, + cap->timestamp)) <= 0) { + break; + } + if (sess->currstate == OPENLI_POP3_STATE_IGNORING) { + break; + } + } + + if (sess->currstate == OPENLI_POP3_STATE_OVER) { + return 1; + } + return 0; +} + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/collector/emailprotocols/smtp.c b/src/collector/emailprotocols/smtp.c new file mode 100644 index 00000000..ac9028e3 --- /dev/null +++ b/src/collector/emailprotocols/smtp.c @@ -0,0 +1,720 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ +#define _GNU_SOURCE +#include +#include +#include + +#include "email_worker.h" +#include "logger.h" + +typedef struct smtpsession { + char *messageid; + + uint8_t *contbuffer; + int contbufsize; + int contbufused; + int contbufread; + int reply_start; + + uint8_t saved_state; + + uint16_t ehlo_reply_code; + uint16_t mailfrom_reply_code; + uint16_t rcptto_reply_code; + uint16_t data_reply_code; + uint16_t data_final_reply_code; + int ehlo_start; + int ehlo_reply_end; + int mailfrom_start; + int rcptto_start; + int data_start; + int data_end; +} smtp_session_t; + +void free_smtp_session_state(emailsession_t *sess, void *smtpstate) { + + smtp_session_t *smtpsess; + if (smtpstate == NULL) { + return; + } + smtpsess = (smtp_session_t *)smtpstate; + free(smtpsess->contbuffer); + free(smtpsess); + +} + +static int append_content_to_smtp_buffer(smtp_session_t *smtpsess, + openli_email_captured_t *cap) { + + /* "16" is just a bit of extra buffer space to account for + * special cases where we need to insert missing "DATA" commands + * into the application data stream. + */ + while (smtpsess->contbufsize - smtpsess->contbufused <= + cap->msg_length + 16) { + smtpsess->contbuffer = realloc(smtpsess->contbuffer, + smtpsess->contbufsize + 4096); + if (smtpsess->contbuffer == NULL) { + return -1; + } + + smtpsess->contbufsize += 4096; + } + + /* Special case -- some ingested data sources skip the DATA + * command, so we're going to try and squeeze that in ourselves + * whenever we see content beginning with the "354 " response. + */ + if (smtpsess->data_start == 0 && + memcmp(cap->content, (const void *)"354 ", 4) == 0) { + memcpy(smtpsess->contbuffer + smtpsess->contbufused, + "DATA\r\n", 6); + smtpsess->contbufused += 6; + } + + memcpy(smtpsess->contbuffer + smtpsess->contbufused, + cap->content, cap->msg_length); + smtpsess->contbufused += cap->msg_length; + smtpsess->contbuffer[smtpsess->contbufused] = '\0'; + + return 0; +} + +static int extract_smtp_participant(emailsession_t *sess, + smtp_session_t *smtpstate, int contoffset, int contend) { + + char *addr, *addrstart, *addrend; + const char *search = (const char *)(smtpstate->contbuffer + contoffset); + + addrstart = strchr(search, '<'); + if (addrstart == NULL) { + return -1; + } + + addrend = strchr(search, '>'); + if (addrend == NULL) { + return -1; + } + + if (addrstart >= (char *)(smtpstate->contbuffer + contend)) { + return -1; + } + + if (addrend >= (char *)(smtpstate->contbuffer + contend)) { + return -1; + } + + + addr = strndup(addrstart + 1, addrend - addrstart - 1); + + add_email_participant(sess, addr, + (sess->currstate == OPENLI_SMTP_STATE_MAIL_FROM_OVER)); + return 1; + +} + +static int find_next_crlf(smtp_session_t *sess, int start_index) { + + int rem; + uint8_t *found; + + rem = sess->contbufused - start_index; + + found = (uint8_t *)memmem(sess->contbuffer + start_index, rem, "\r\n", 2); + + if (found) { + sess->contbufread = (found - sess->contbuffer) + 2; + return 1; + } + return 0; +} + +static int find_smtp_reply_code(smtp_session_t *sess, uint16_t *storage) { + + int res; + regex_t lastreply; + regmatch_t pmatch[1]; + const char *search; + + if (regcomp(&lastreply, "[[:digit:]][[:digit:]][[:digit:]] ", 0) != 0) { + return -1; + } + + search = (const char *)(sess->contbuffer + sess->contbufread); + + res = regexec(&lastreply, search, 1, pmatch, 0); + if (res != 0) { + regfree(&lastreply); + return 0; + } + + if (storage) { + (*storage) = strtoul(search + pmatch[0].rm_so, NULL, 10); + } + regfree(&lastreply); + return find_next_crlf(sess, sess->contbufread + pmatch[0].rm_so); +} + +static int find_ehlo_end(smtp_session_t *sess) { + return find_next_crlf(sess, sess->ehlo_start); +} + +static int find_mail_from_end(smtp_session_t *sess) { + return find_next_crlf(sess, sess->mailfrom_start); +} + +static int find_rcpt_to_end(smtp_session_t *sess) { + return find_next_crlf(sess, sess->rcptto_start); +} + +static int find_data_init_reply_code(smtp_session_t *sess) { + return find_smtp_reply_code(sess, &(sess->data_reply_code)); +} + +static int find_data_final_reply_code(smtp_session_t *sess) { + return find_smtp_reply_code(sess, &(sess->data_final_reply_code)); +} + +static int find_reset_reply_code(smtp_session_t *sess) { + return find_smtp_reply_code(sess, NULL); +} + +static int find_quit_reply_code(smtp_session_t *sess) { + return find_smtp_reply_code(sess, NULL); +} + +static int find_ehlo_response_end(smtp_session_t *sess) { + return find_smtp_reply_code(sess, &(sess->ehlo_reply_code)); +} + +static int find_mail_from_reply_end(smtp_session_t *sess) { + return find_smtp_reply_code(sess, &(sess->mailfrom_reply_code)); +} + +static int find_rcpt_to_reply_end(smtp_session_t *sess) { + return find_smtp_reply_code(sess, &(sess->rcptto_reply_code)); +} + +static int find_data_start(smtp_session_t *sess) { + uint8_t *found = NULL; + if (sess->contbufused - sess->contbufread < 6) { + return 0; + } + + found = (uint8_t *)strcasestr( + (const char *)(sess->contbuffer + sess->contbufread), + "DATA\r\n"); + if (found == NULL) { + return 0; + } + + /* Skip past "DATA\r\n" automatically */ + sess->data_start = (found - sess->contbuffer); + sess->contbufread = sess->data_start + 6; + return 1; +} + +static int find_reset_command(smtp_session_t *sess) { + uint8_t *found = NULL; + if (sess->contbufused - sess->contbufread < 6) { + return 0; + } + + found = (uint8_t *)strcasestr( + (const char *)(sess->contbuffer + sess->contbufread), + "RSET\r\n"); + if (found == NULL) { + return 0; + } + + /* Skip past "RSET\r\n" automatically */ + sess->contbufread = (found - sess->contbuffer); + sess->contbufread += 6; + return 1; +} + +static int find_quit_command(smtp_session_t *sess) { + uint8_t *found = NULL; + if (sess->contbufused - sess->contbufread < 6) { + return 0; + } + + found = (uint8_t *)strcasestr( + (const char *)(sess->contbuffer + sess->contbufread), + "QUIT\r\n"); + if (found == NULL) { + return 0; + } + + /* Skip past "QUIT\r\n" automatically */ + sess->contbufread = (found - sess->contbuffer); + sess->contbufread += 6; + return 1; +} + +static int find_mail_from(smtp_session_t *sess) { + uint8_t *found = NULL; + if (sess->contbufused - sess->contbufread < 10) { + return 0; + } + + found = (uint8_t *)strcasestr( + (const char *)(sess->contbuffer + sess->contbufread), + "MAIL FROM:"); + if (found != NULL) { + sess->mailfrom_start = (found - sess->contbuffer); + return 1; + } + + return 0; +} + +static int find_rcpt_to(smtp_session_t *sess) { + uint8_t *found = NULL; + if (sess->contbufused - sess->contbufread < 8) { + return 0; + } + + found = (uint8_t *)strcasestr( + (const char *)(sess->contbuffer + sess->contbufread), + "RCPT TO:"); + if (found != NULL) { + sess->rcptto_start = (found - sess->contbuffer); + return 1; + } + + return 0; +} + +static int find_data_content_ending(smtp_session_t *sess) { + const char *search = (const char *)(sess->contbuffer + sess->contbufread); + uint8_t *found = NULL; + + /* An "empty" mail message is ".\r\n" -- edge case, but let's try to + * handle it regardless. + */ + if (strncmp(search, ".\r\n", 3) == 0) { + sess->contbufread += 3; + return 1; + } + + found = (uint8_t *)strstr(search, "\r\n.\r\n"); + if (found != NULL) { + sess->contbufread = (found - sess->contbuffer) + 5; + sess->data_end = sess->contbufread; + return 1; + } + + return 0; +} + + +static int find_ehlo_start(emailsession_t *mailsess, smtp_session_t *sess) { + uint8_t *found = NULL; + const char *search; + + if (sess->contbufused - sess->contbufread < 5) { + return 0; + } + search = (const char *)(sess->contbuffer + sess->contbufread); + + found = (uint8_t *)strcasestr(search, "EHLO "); + + /* In theory, we can have multiple EHLOs (e.g. when STARTTLS is used), + * so don't reset the EHLO start pointer if we haven't transitioned past + * the EHLO OVER state. + */ + if (found != NULL) { + if (mailsess->currstate != OPENLI_SMTP_STATE_EHLO_OVER) { + sess->ehlo_start = (found - sess->contbuffer); + } + return 1; + } + + found = (uint8_t *)strcasestr(search, "HELO "); + if (found != NULL) { + if (mailsess->currstate != OPENLI_SMTP_STATE_EHLO_OVER) { + sess->ehlo_start = (found - sess->contbuffer); + } + return 1; + } + + return 0; +} + +static int process_next_smtp_state(openli_email_worker_t *state, + emailsession_t *sess, smtp_session_t *smtpsess, uint64_t timestamp) { + int r; + + /* TODO consider adding state parsing for AUTH, STARTTLS, VRFY, EXPN + * and any other SMTP commands that exist -- it will only really + * matter for octet counting reasons and I doubt the LEAs care that + * much, but something to bear in mind... + */ + + if (sess->currstate != OPENLI_SMTP_STATE_DATA_CONTENT) { + if ((r = find_quit_command(smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_QUIT; + sess->client_octets += 6; + smtpsess->reply_start = smtpsess->contbufread; + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate != OPENLI_SMTP_STATE_DATA_CONTENT) { + if ((r = find_reset_command(smtpsess)) == 1) { + smtpsess->saved_state = sess->currstate; + sess->currstate = OPENLI_SMTP_STATE_RESET; + sess->client_octets += 6; + smtpsess->reply_start = smtpsess->contbufread; + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_INIT || + sess->currstate == OPENLI_SMTP_STATE_EHLO_OVER) { + if ((r = find_ehlo_start(sess, smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_EHLO; + sess->server_octets += + (smtpsess->ehlo_start - smtpsess->contbufread); + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_EHLO) { + if ((r = find_ehlo_end(smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_EHLO_RESPONSE; + sess->client_octets += + (smtpsess->contbufread - smtpsess->ehlo_start); + smtpsess->reply_start = smtpsess->contbufread; + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_EHLO_RESPONSE) { + if ((r = find_ehlo_response_end(smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_EHLO_OVER; + sess->server_octets += + (smtpsess->contbufread - smtpsess->reply_start); + + return 1; + } else if (r < 0) { + return r; + } + + } + + if (sess->currstate == OPENLI_SMTP_STATE_EHLO_OVER) { + if ((r = find_mail_from(smtpsess)) == 1) { + smtpsess->ehlo_reply_end = smtpsess->mailfrom_start; + sess->currstate = OPENLI_SMTP_STATE_MAIL_FROM; + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_MAIL_FROM) { + if ((r = find_mail_from_end(smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_MAIL_FROM_REPLY; + smtpsess->reply_start = smtpsess->contbufread; + sess->client_octets += + (smtpsess->contbufread - smtpsess->mailfrom_start); + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_MAIL_FROM_REPLY) { + if ((r = find_mail_from_reply_end(smtpsess)) == 1) { + sess->server_octets += + (smtpsess->contbufread - smtpsess->reply_start); + if (smtpsess->mailfrom_reply_code == 250) { + char *saved_sender = NULL; + int skip_login_iri = 0; + sess->currstate = OPENLI_SMTP_STATE_MAIL_FROM_OVER; + + if (sess->login_sent && sess->sender.emailaddr) { + saved_sender = strdup(sess->sender.emailaddr); + } + + /* extract latest sender info from mail from content */ + if (extract_smtp_participant(sess, smtpsess, + smtpsess->mailfrom_start, smtpsess->contbufread) < 0) { + if (saved_sender) { + free(saved_sender); + } + return -1; + } + + if (sess->login_sent) { + /* If we have sent a login IRI and the MAIL FROM + * address has now changed, send a logoff IRI to indicate + * that this session is no longer being used by the + * previous address (remember, the new address may + * not be a target so we cannot rely on a login event + * IRI for the new address being seen by the LEA. + */ + if (strcmp(saved_sender, sess->sender.emailaddr) != 0) { + sess->event_time = timestamp; + generate_email_logoff_iri(state, sess); + } else { + skip_login_iri = 1; + } + } + if (saved_sender) { + free(saved_sender); + } + clear_email_participant_list(sess); + + /* send email login event IRI (and CC?) if any of the + participants match a known target. + */ + sess->login_time = timestamp; + if (smtpsess->ehlo_reply_code >= 200 && + smtpsess->ehlo_reply_code < 300) { + if (!skip_login_iri) { + generate_email_login_success_iri(state, sess); + sess->login_sent = 1; + } + } else { + generate_email_login_failure_iri(state, sess); + } + } else { + sess->currstate = OPENLI_SMTP_STATE_EHLO_OVER; + } + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_MAIL_FROM_OVER) { + if ((r = find_rcpt_to(smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_RCPT_TO; + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_RCPT_TO) { + if ((r = find_rcpt_to_end(smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_RCPT_TO_REPLY; + smtpsess->reply_start = smtpsess->contbufread; + sess->client_octets += + (smtpsess->contbufread - smtpsess->rcptto_start); + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_RCPT_TO_REPLY) { + if ((r = find_rcpt_to_reply_end(smtpsess)) == 1) { + sess->server_octets += + (smtpsess->contbufread - smtpsess->reply_start); + if (smtpsess->rcptto_reply_code == 250) { + sess->currstate = OPENLI_SMTP_STATE_RCPT_TO_OVER; + + /* extract recipient info from rcpt to content */ + if (extract_smtp_participant(sess, smtpsess, + smtpsess->rcptto_start, smtpsess->contbufread) < 0) { + return -1; + } + } else { + sess->currstate = OPENLI_SMTP_STATE_MAIL_FROM_OVER; + } + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_RCPT_TO_OVER) { + if ((r = find_rcpt_to(smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_RCPT_TO; + /* Need to restart the loop to handle RCPT_TO state again */ + return 1; + } else if ((r = find_data_start(smtpsess)) == 1) { + + sess->currstate = OPENLI_SMTP_STATE_DATA_INIT_REPLY; + sess->client_octets += 6; + smtpsess->reply_start = smtpsess->contbufread; + return 1; + } else if ((r = find_mail_from(smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_MAIL_FROM; + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_DATA_INIT_REPLY) { + if ((r = find_data_init_reply_code(smtpsess)) == 1) { + sess->server_octets += + (smtpsess->contbufread - smtpsess->reply_start); + if (smtpsess->data_reply_code == 354) { + sess->currstate = OPENLI_SMTP_STATE_DATA_CONTENT; + } else { + sess->currstate = OPENLI_SMTP_STATE_RCPT_TO_OVER; + } + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_DATA_CONTENT) { + if ((r = find_data_content_ending(smtpsess)) == 1) { + sess->currstate = OPENLI_SMTP_STATE_DATA_FINAL_REPLY; + smtpsess->reply_start = smtpsess->contbufread; + sess->client_octets += + (smtpsess->contbufread - smtpsess->data_start); + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_DATA_FINAL_REPLY) { + if ((r = find_data_final_reply_code(smtpsess)) == 1) { + sess->server_octets += + (smtpsess->contbufread - smtpsess->reply_start); + if (smtpsess->data_final_reply_code == 250) { + sess->currstate = OPENLI_SMTP_STATE_DATA_OVER; + sess->event_time = timestamp; + /* generate email send CC and IRI */ + generate_email_send_iri(state, sess); + generate_email_cc_from_smtp_payload(state, sess, + smtpsess->contbuffer + smtpsess->data_start, + smtpsess->contbufread - smtpsess->data_start, + timestamp); + } else { + sess->currstate = OPENLI_SMTP_STATE_RCPT_TO_OVER; + } + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_DATA_OVER) { + if ((r = find_mail_from(smtpsess)) == 1) { + /* client is re-using the session to send another email? */ + sess->currstate = OPENLI_SMTP_STATE_MAIL_FROM; + return 1; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_RESET) { + if ((r = find_reset_reply_code(smtpsess)) == 1) { + sess->server_octets += + (smtpsess->contbufread - smtpsess->reply_start); + if (smtpsess->saved_state == OPENLI_SMTP_STATE_INIT || + smtpsess->saved_state == OPENLI_SMTP_STATE_EHLO_OVER || + smtpsess->saved_state == OPENLI_SMTP_STATE_DATA_OVER) { + sess->currstate = smtpsess->saved_state; + smtpsess->saved_state = OPENLI_SMTP_STATE_INIT; + } else { + sess->currstate = OPENLI_SMTP_STATE_EHLO_OVER; + smtpsess->saved_state = OPENLI_SMTP_STATE_INIT; + + smtpsess->mailfrom_start = 0; + smtpsess->rcptto_start = 0; + smtpsess->data_start = 0; + smtpsess->data_end = 0; + } + + return 1; + } else if (r < 0) { + return r; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_QUIT) { + if ((r = find_quit_reply_code(smtpsess)) == 1) { + sess->server_octets += + (smtpsess->contbufread - smtpsess->reply_start); + sess->currstate = OPENLI_SMTP_STATE_QUIT_REPLY; + sess->event_time = timestamp; + generate_email_logoff_iri(state, sess); + return 0; + } else if (r < 0) { + return r; + } + } + + return 0; +} + +int update_smtp_session_by_ingestion(openli_email_worker_t *state, + emailsession_t *sess, openli_email_captured_t *cap) { + smtp_session_t *smtpsess; + int r; + + if (sess->proto_state == NULL) { + smtpsess = calloc(1, sizeof(smtp_session_t)); + smtpsess->messageid = NULL; + smtpsess->contbuffer = calloc(1024, sizeof(uint8_t)); + smtpsess->contbufused = 0; + smtpsess->contbufread = 0; + smtpsess->contbufsize = 1024; + sess->proto_state = (void *)smtpsess; + } else { + smtpsess = (smtp_session_t *)sess->proto_state; + } + + if (append_content_to_smtp_buffer(smtpsess, cap) < 0) { + logger(LOG_INFO, "OpenLI: Failed to append SMTP message content to session buffer for %s", sess->key); + return -1; + } + + while (1) { + if ((r = process_next_smtp_state(state, sess, smtpsess, + cap->timestamp)) <= 0) { + break; + } + } + + if (sess->currstate == OPENLI_SMTP_STATE_QUIT_REPLY) { + return 1; + } + + return 0; +} + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/collector/encoder_worker.c b/src/collector/encoder_worker.c index 53748cf2..031f9faa 100644 --- a/src/collector/encoder_worker.c +++ b/src/collector/encoder_worker.c @@ -32,6 +32,7 @@ #include "ipcc.h" #include "ipmmiri.h" #include "umtsiri.h" +#include "emailiri.h" #include "collector_base.h" #include "logger.h" #include "etsili_core.h" @@ -263,13 +264,23 @@ void destroy_encoder_worker(openli_encoder_t *enc) { static int encode_rawip(openli_encoder_t *enc, openli_encoding_job_t *job, openli_encoded_result_t *res) { + uint16_t liidlen, l; + + liidlen = strlen(job->liid); + l = htons(liidlen); + memset(res, 0, sizeof(openli_encoded_result_t)); res->msgbody = calloc(1, sizeof(wandder_encoded_result_t)); res->msgbody->encoder = NULL; - res->msgbody->encoded = NULL; - res->msgbody->len = job->origreq->data.rawip.ipclen; - res->msgbody->alloced = 0; + res->msgbody->encoded = malloc(liidlen + sizeof(uint16_t)); + + memcpy(res->msgbody->encoded, &l, sizeof(uint16_t)); + memcpy(res->msgbody->encoded + sizeof(uint16_t), job->liid, liidlen); + + res->msgbody->len = job->origreq->data.rawip.ipclen + + (liidlen + sizeof(uint16_t)); + res->msgbody->alloced = liidlen + sizeof(uint16_t); res->msgbody->next = NULL; res->ipcontents = job->origreq->data.rawip.ipcontent; @@ -498,6 +509,47 @@ static int encode_templated_ipmmcc(openli_encoder_t *enc, return 1; } +static int encode_templated_emailiri(openli_encoder_t *enc, + openli_encoding_job_t *job, encoded_header_template_t *hdr_tplate, + openli_encoded_result_t *res) { + + wandder_encoded_result_t *body = NULL; + openli_emailiri_job_t *irijob = + (openli_emailiri_job_t *)&(job->origreq->data.emailiri); + + /* create custom params from job "contents" */ + prepare_emailiri_parameters(enc->freegenerics, irijob, + &(irijob->customparams)); + + reset_wandder_encoder(enc->encoder); + body = encode_emailiri_body(enc->encoder, job->preencoded, irijob->iritype, + &(irijob->customparams)); + if (body == NULL || body->len == 0 || body->encoded == NULL) { + logger(LOG_INFO, "OpenLI: failed to encode ETSI Email IRI body"); + if (body) { + wandder_release_encoded_result(enc->encoder, body); + } + return -1; + } + + if (create_encoded_message_body(res, hdr_tplate, body->encoded, body->len, + job->liid, + job->preencoded[OPENLI_PREENCODE_LIID].vallen) < 0) { + wandder_release_encoded_result(enc->encoder, body); + return -1; + } + + res->ipcontents = NULL; + res->ipclen = 0; + res->header.intercepttype = htons(OPENLI_PROTO_ETSI_IRI); + + wandder_release_encoded_result(enc->encoder, body); + free_emailiri_parameters(irijob->customparams); + + /* Success */ + return 1; +} + static int encode_templated_umtsiri(openli_encoder_t *enc, openli_encoding_job_t *job, encoded_header_template_t *hdr_tplate, openli_encoded_result_t *res) { @@ -659,6 +711,69 @@ static int encode_templated_umtscc(openli_encoder_t *enc, } +static int encode_templated_emailcc(openli_encoder_t *enc, + openli_encoding_job_t *job, encoded_header_template_t *hdr_tplate, + openli_encoded_result_t *res) { + + uint32_t key = 0; + encoded_global_template_t *emailcc_tplate = NULL; + openli_emailcc_job_t *emailccjob; + uint8_t is_new = 0; + + emailccjob = (openli_emailcc_job_t *)&(job->origreq->data.emailcc); + + if (emailccjob->format == ETSILI_EMAIL_CC_FORMAT_IP && + emailccjob->dir == ETSI_DIR_FROM_TARGET) { + key = (TEMPLATE_TYPE_EMAILCC_IP_DIRFROM << 16) + + emailccjob->cc_content_len; + } else if (emailccjob->format == ETSILI_EMAIL_CC_FORMAT_APP && + emailccjob->dir == ETSI_DIR_FROM_TARGET) { + key = (TEMPLATE_TYPE_EMAILCC_APP_DIRFROM << 16) + + emailccjob->cc_content_len; + } else if (emailccjob->format == ETSILI_EMAIL_CC_FORMAT_IP && + emailccjob->dir == ETSI_DIR_TO_TARGET) { + key = (TEMPLATE_TYPE_EMAILCC_IP_DIRTO << 16) + + emailccjob->cc_content_len; + } else if (emailccjob->format == ETSILI_EMAIL_CC_FORMAT_APP && + emailccjob->dir == ETSI_DIR_TO_TARGET) { + key = (TEMPLATE_TYPE_EMAILCC_APP_DIRTO << 16) + + emailccjob->cc_content_len; + } else { + logger(LOG_INFO, "Unexpected format + direction for EmailCC: %u %u", + emailccjob->format, emailccjob->dir); + return -1; + } + + emailcc_tplate = lookup_global_template(enc, key, &is_new); + + if (is_new) { + if (etsili_create_emailcc_template(enc->encoder, job->preencoded, + emailccjob->format, emailccjob->dir, + emailccjob->cc_content_len, emailcc_tplate) < 0) { + logger(LOG_INFO, "OpenLI: Failed to create EmailCC template?"); + return -1; + } + } + /* We have very specific templates for each observed packet size, so + * this will not require updating */ + + if (create_encoded_message_body(res, hdr_tplate, + emailcc_tplate->cc_content.cc_wrap, + emailcc_tplate->cc_content.cc_wrap_len, + job->liid, + job->preencoded[OPENLI_PREENCODE_LIID].vallen) < 0) { + return -1; + } + + /* Set ipcontents in the result */ + res->ipcontents = (uint8_t *)emailccjob->cc_content; + res->ipclen = emailccjob->cc_content_len; + res->header.intercepttype = htons(OPENLI_PROTO_ETSI_CC); + + /* Success */ + return 1; +} + static int encode_templated_ipcc(openli_encoder_t *enc, openli_encoding_job_t *job, encoded_header_template_t *hdr_tplate, openli_encoded_result_t *res) { @@ -794,6 +909,12 @@ static int encode_etsi(openli_encoder_t *enc, openli_encoding_job_t *job, case OPENLI_EXPORT_UMTSIRI: ret = encode_templated_umtsiri(enc, job, hdr_tplate, res); break; + case OPENLI_EXPORT_EMAILIRI: + ret = encode_templated_emailiri(enc, job, hdr_tplate, res); + break; + case OPENLI_EXPORT_EMAILCC: + ret = encode_templated_emailcc(enc, job, hdr_tplate, res); + break; default: ret = 0; } diff --git a/src/collector/encoder_worker.h b/src/collector/encoder_worker.h index 1a43b586..b8051e2e 100644 --- a/src/collector/encoder_worker.h +++ b/src/collector/encoder_worker.h @@ -51,6 +51,10 @@ enum { TEMPLATE_TYPE_UMTSCC_DIRTO, TEMPLATE_TYPE_UMTSCC_DIROTHER, + TEMPLATE_TYPE_EMAILCC_IP_DIRTO, + TEMPLATE_TYPE_EMAILCC_APP_DIRTO, + TEMPLATE_TYPE_EMAILCC_IP_DIRFROM, + TEMPLATE_TYPE_EMAILCC_APP_DIRFROM, }; typedef struct saved_encoding_templates { diff --git a/src/collector/ipcc.c b/src/collector/ipcc.c index 93afb1a4..2ab5dc7f 100644 --- a/src/collector/ipcc.c +++ b/src/collector/ipcc.c @@ -129,6 +129,11 @@ static inline int lookup_static_ranges(struct sockaddr *cmp, "OpenLI: matched an IP range for intercept %s but this is not present in activestaticintercepts", sliid->key); } else { + if (matchsess->common.tomediate == + OPENLI_INTERCEPT_OUTPUTS_IRIONLY) { + continue; + } + if (tv->tv_sec < matchsess->common.tostart_time) { continue; } @@ -180,6 +185,11 @@ static void singlev6_conn_contents(struct sockaddr_in6 *cmp, sliid->key); } else { HASH_ITER(hh, tgt->intercepts, sess, tmp) { + if (sess->common.tomediate == + OPENLI_INTERCEPT_OUTPUTS_IRIONLY) { + continue; + } + if (tv->tv_sec < sess->common.tostart_time) { continue; } @@ -261,6 +271,9 @@ int ipv4_comm_contents(libtrace_packet_t *pkt, packet_info_t *pinfo, if (tgt) { HASH_ITER(hh, tgt->intercepts, sess, tmp) { + if (sess->common.tomediate == OPENLI_INTERCEPT_OUTPUTS_IRIONLY) { + continue; + } if (pinfo->tv.tv_sec < sess->common.tostart_time) { continue; } @@ -288,6 +301,9 @@ int ipv4_comm_contents(libtrace_packet_t *pkt, packet_info_t *pinfo, if (tgt) { HASH_ITER(hh, tgt->intercepts, sess, tmp) { + if (sess->common.tomediate == OPENLI_INTERCEPT_OUTPUTS_IRIONLY) { + continue; + } if (pinfo->tv.tv_sec < sess->common.tostart_time) { continue; } diff --git a/src/collector/ipmmcc.c b/src/collector/ipmmcc.c index 1bad303d..6530b89e 100644 --- a/src/collector/ipmmcc.c +++ b/src/collector/ipmmcc.c @@ -121,6 +121,10 @@ static inline int generic_mm_comm_contents(int family, libtrace_packet_t *pkt, continue; } + if (rtp->common.tomediate == OPENLI_INTERCEPT_OUTPUTS_IRIONLY) { + continue; + } + if (rtp->targetaddr == NULL || rtp->otheraddr == NULL) { continue; } diff --git a/src/configparser.c b/src/configparser.c index 83ef0870..1635297c 100644 --- a/src/configparser.c +++ b/src/configparser.c @@ -175,6 +175,145 @@ static int parse_input_config(collector_global_t *glob, yaml_document_t *doc, return 0; } +static int parse_email_ingest_config(collector_global_t *glob, + yaml_document_t *doc, yaml_node_t *optmap) { + + yaml_node_pair_t *pair; + yaml_node_t *key, *value; + + for (pair = optmap->data.mapping.pairs.start; + pair < optmap->data.mapping.pairs.top; pair ++) { + + key = yaml_document_get_node(doc, pair->key); + value = yaml_document_get_node(doc, pair->value); + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "enabled") == 0) { + glob->emailconf.enabled = + check_onoff((char *)value->data.scalar.value); + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "requiretls") == 0) { + glob->emailconf.tlsrequired = + check_onoff((char *)value->data.scalar.value); + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "authpassword") == 0) { + glob->emailconf.authrequired = true; + SET_CONFIG_STRING_OPTION(glob->emailconf.authpassword, value); + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "listenaddress") == 0) { + SET_CONFIG_STRING_OPTION(glob->emailconf.listenaddr, value); + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "listenport") == 0) { + SET_CONFIG_STRING_OPTION(glob->emailconf.listenport, value); + } + } + + return 0; +} + +static int parse_email_timeouts_config(collector_global_t *glob, + yaml_document_t *doc, yaml_node_t *inputs) { + + yaml_node_item_t *item; + + for (item = inputs->data.sequence.items.start; + item != inputs->data.sequence.items.top; item ++) { + yaml_node_t *node = yaml_document_get_node(doc, *item); + yaml_node_pair_t *pair; + + for (pair = node->data.mapping.pairs.start; + pair < node->data.mapping.pairs.top; pair ++) { + yaml_node_t *key, *value; + + key = yaml_document_get_node(doc, pair->key); + value = yaml_document_get_node(doc, pair->value); + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE) { + if (strcasecmp((char *)key->data.scalar.value, "smtp") == 0) { + glob->email_timeouts.smtp = strtoul( + (char *)value->data.scalar.value, NULL, 10); + } + else if (strcasecmp((char *)key->data.scalar.value, + "imap") == 0) { + glob->email_timeouts.imap = strtoul( + (char *)value->data.scalar.value, NULL, 10); + } + else if (strcasecmp((char *)key->data.scalar.value, + "pop3") == 0) { + glob->email_timeouts.pop3 = strtoul( + (char *)value->data.scalar.value, NULL, 10); + } else { + logger(LOG_INFO, "OpenLI: unexpected email protocol '%s' in 'emailsessiontimeouts' configuration", (char *)key->data.scalar.value); + } + } + } + } + return 0; +} + +static void parse_email_targets(email_target_t **targets, yaml_document_t *doc, + yaml_node_t *tgtconf) { + + yaml_node_item_t *item; + + for (item = tgtconf->data.sequence.items.start; + item != tgtconf->data.sequence.items.top; item ++) { + yaml_node_t *node = yaml_document_get_node(doc, *item); + yaml_node_pair_t *pair; + + email_target_t *newtgt, *found; + + newtgt = (email_target_t *)calloc(1, sizeof(email_target_t)); + newtgt->awaitingconfirm = 1; + + for (pair = node->data.mapping.pairs.start; + pair < node->data.mapping.pairs.top; pair ++) { + yaml_node_t *key, *value; + key = yaml_document_get_node(doc, pair->key); + value = yaml_document_get_node(doc, pair->value); + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "address") == 0) { + SET_CONFIG_STRING_OPTION(newtgt->address, value); + } + } + + if (!newtgt->address) { + logger(LOG_INFO, + "OpenLI: a Email target requires an address, skipping."); + free(newtgt); + continue; + } + + HASH_FIND(hh, *targets, newtgt->address, strlen(newtgt->address), + found); + if (found) { + free(newtgt->address); + free(newtgt); + continue; + } + + HASH_ADD_KEYPTR(hh, *targets, newtgt->address, strlen(newtgt->address), + newtgt); + } + +} + static void parse_sip_targets(libtrace_list_t *targets, yaml_document_t *doc, yaml_node_t *tgtconf) { @@ -507,6 +646,132 @@ static int parse_agency_list(prov_intercept_conf_t *state, yaml_document_t *doc, return 0; } +static void parse_intercept_common_fields(intercept_common_t *common, + yaml_node_t *key, yaml_node_t *value) { + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "liid") == 0) { + SET_CONFIG_STRING_OPTION(common->liid, value); + common->liid_len = strlen(common->liid); + } + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, + "authcountrycode") == 0) { + SET_CONFIG_STRING_OPTION(common->authcc, value); + common->authcc_len = strlen(common->authcc); + } + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, + "deliverycountrycode") == 0) { + SET_CONFIG_STRING_OPTION(common->delivcc, value); + common->delivcc_len = strlen(common->delivcc); + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "mediator") == 0 + && common->destid == 0) { + common->destid = strtoul( + (char *)value->data.scalar.value, NULL, 10); + if (common->destid == 0) { + logger(LOG_INFO, "OpenLI: 0 is not a valid value for the 'mediator' config option."); + } + } + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "starttime") == 0) { + common->tostart_time = strtoul( + (char *)value->data.scalar.value, NULL, 10); + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "endtime") == 0) { + common->toend_time = strtoul( + (char *)value->data.scalar.value, NULL, 10); + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "agencyid") == 0) { + SET_CONFIG_STRING_OPTION(common->targetagency, value); + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "outputhandovers") == 0) { + + if (strcasecmp((char *)value->data.scalar.value, "irionly") == 0) { + common->tomediate = OPENLI_INTERCEPT_OUTPUTS_IRIONLY; + } else if (strcasecmp((char *)value->data.scalar.value, "cconly") == 0) { + common->tomediate = OPENLI_INTERCEPT_OUTPUTS_CCONLY; + } else { + common->tomediate = OPENLI_INTERCEPT_OUTPUTS_ALL; + } + } +} + +static int parse_emailintercept_list(emailintercept_t **mailints, + yaml_document_t *doc, yaml_node_t *inputs) { + + yaml_node_item_t *item; + + for (item = inputs->data.sequence.items.start; + item != inputs->data.sequence.items.top; item ++) { + yaml_node_t *node = yaml_document_get_node(doc, *item); + yaml_node_pair_t *pair; + emailintercept_t *newcept; + unsigned int tgtcount = 0; + + newcept = (emailintercept_t *)calloc(1, sizeof(emailintercept_t)); + newcept->common.liid = NULL; + newcept->common.authcc = NULL; + newcept->common.delivcc = NULL; + newcept->common.destid = 0; + newcept->common.targetagency = NULL; + newcept->common.tostart_time = 0; + newcept->common.toend_time = 0; + newcept->common.tomediate = OPENLI_INTERCEPT_OUTPUTS_ALL; + newcept->common.hi1_seqno = 0; + newcept->awaitingconfirm = 1; + newcept->targets = NULL; + + for (pair = node->data.mapping.pairs.start; + pair < node->data.mapping.pairs.top; pair ++) { + yaml_node_t *key, *value; + + key = yaml_document_get_node(doc, pair->key); + value = yaml_document_get_node(doc, pair->value); + + parse_intercept_common_fields(&(newcept->common), key, value); + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SEQUENCE_NODE && + strcmp((char *)key->data.scalar.value, "targets") == 0) { + + parse_email_targets(&(newcept->targets), doc, value); + } + } + + tgtcount = HASH_CNT(hh, newcept->targets); + if (newcept->common.liid != NULL && newcept->common.authcc != NULL && + newcept->common.delivcc != NULL && + tgtcount > 0 && + newcept->common.destid > 0 && + newcept->common.targetagency != NULL) { + HASH_ADD_KEYPTR(hh_liid, *mailints, newcept->common.liid, + newcept->common.liid_len, newcept); + } else { + logger(LOG_INFO, "OpenLI: Email Intercept configuration was incomplete -- skipping."); + } + } + + return 0; +} + static int parse_voipintercept_list(voipintercept_t **voipints, yaml_document_t *doc, yaml_node_t *inputs) { @@ -539,6 +804,7 @@ static int parse_voipintercept_list(voipintercept_t **voipints, newcept->options = 0; newcept->common.tostart_time = 0; newcept->common.toend_time = 0; + newcept->common.tomediate = OPENLI_INTERCEPT_OUTPUTS_ALL; /* Mappings describe the parameters for each intercept */ for (pair = node->data.mapping.pairs.start; @@ -548,27 +814,7 @@ static int parse_voipintercept_list(voipintercept_t **voipints, key = yaml_document_get_node(doc, pair->key); value = yaml_document_get_node(doc, pair->value); - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "liid") == 0) { - SET_CONFIG_STRING_OPTION(newcept->common.liid, value); - newcept->common.liid_len = strlen(newcept->common.liid); - } - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, - "authcountrycode") == 0) { - SET_CONFIG_STRING_OPTION(newcept->common.authcc, value); - newcept->common.authcc_len = strlen(newcept->common.authcc); - } - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, - "deliverycountrycode") == 0) { - SET_CONFIG_STRING_OPTION(newcept->common.delivcc, value); - newcept->common.delivcc_len = strlen(newcept->common.delivcc); - } - + parse_intercept_common_fields(&(newcept->common), key, value); if (key->type == YAML_SCALAR_NODE && value->type == YAML_SEQUENCE_NODE && strcmp((char *)key->data.scalar.value, "siptargets") == 0) { @@ -576,36 +822,6 @@ static int parse_voipintercept_list(voipintercept_t **voipints, parse_sip_targets(newcept->targets, doc, value); } - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "mediator") == 0 - && newcept->common.destid == 0) { - newcept->common.destid = strtoul((char *)value->data.scalar.value, - NULL, 10); - if (newcept->common.destid == 0) { - logger(LOG_INFO, "OpenLI: 0 is not a valid value for the 'mediator' config option."); - } - } - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "starttime") == 0) { - newcept->common.tostart_time = strtoul( - (char *)value->data.scalar.value, NULL, 10); - } - - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "endtime") == 0) { - newcept->common.toend_time = strtoul( - (char *)value->data.scalar.value, NULL, 10); - } - - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "agencyid") == 0) { - SET_CONFIG_STRING_OPTION(newcept->common.targetagency, value); - } - } if (newcept->common.liid != NULL && newcept->common.authcc != NULL && @@ -653,11 +869,12 @@ static int parse_ipintercept_list(ipintercept_t **ipints, yaml_document_t *doc, newcept->common.authcc_len = 0; newcept->common.delivcc_len = 0; newcept->vendmirrorid = OPENLI_VENDOR_MIRROR_NONE; - newcept->accesstype = INTERNET_ACCESS_TYPE_UNDEFINED; + newcept->accesstype = INTERNET_ACCESS_TYPE_UNDEFINED; newcept->statics = NULL; newcept->options = 0; newcept->common.tostart_time = 0; newcept->common.toend_time = 0; + newcept->common.tomediate = OPENLI_INTERCEPT_OUTPUTS_ALL; /* Mappings describe the parameters for each intercept */ for (pair = node->data.mapping.pairs.start; @@ -667,20 +884,7 @@ static int parse_ipintercept_list(ipintercept_t **ipints, yaml_document_t *doc, key = yaml_document_get_node(doc, pair->key); value = yaml_document_get_node(doc, pair->value); - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "liid") == 0) { - SET_CONFIG_STRING_OPTION(newcept->common.liid, value); - newcept->common.liid_len = strlen(newcept->common.liid); - } - - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, - "authcountrycode") == 0) { - SET_CONFIG_STRING_OPTION(newcept->common.authcc, value); - newcept->common.authcc_len = strlen(newcept->common.authcc); - } + parse_intercept_common_fields(&(newcept->common), key, value); if (key->type == YAML_SCALAR_NODE && value->type == YAML_SEQUENCE_NODE && @@ -689,14 +893,6 @@ static int parse_ipintercept_list(ipintercept_t **ipints, yaml_document_t *doc, add_intercept_static_ips(&(newcept->statics), doc, value); } - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, - "deliverycountrycode") == 0) { - SET_CONFIG_STRING_OPTION(newcept->common.delivcc, value); - newcept->common.delivcc_len = strlen(newcept->common.delivcc); - } - if (key->type == YAML_SCALAR_NODE && value->type == YAML_SCALAR_NODE && strcmp((char *)key->data.scalar.value, "user") == 0) { @@ -733,21 +929,6 @@ static int parse_ipintercept_list(ipintercept_t **ipints, yaml_document_t *doc, } } - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "mediator") == 0 - && newcept->common.destid == 0) { - newcept->common.destid = strtoul((char *)value->data.scalar.value, - NULL, 10); - if (newcept->common.destid == 0) { - logger(LOG_INFO, "OpenLI: 0 is not a valid value for the 'mediator' config option."); - } - } - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "agencyid") == 0) { - SET_CONFIG_STRING_OPTION(newcept->common.targetagency, value); - } if (key->type == YAML_SCALAR_NODE && value->type == YAML_SCALAR_NODE && @@ -764,21 +945,6 @@ static int parse_ipintercept_list(ipintercept_t **ipints, yaml_document_t *doc, } } - - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "starttime") == 0) { - newcept->common.tostart_time = strtoul( - (char *)value->data.scalar.value, NULL, 10); - } - - if (key->type == YAML_SCALAR_NODE && - value->type == YAML_SCALAR_NODE && - strcmp((char *)key->data.scalar.value, "endtime") == 0) { - newcept->common.toend_time = strtoul( - (char *)value->data.scalar.value, NULL, 10); - } - } if (newcept->common.liid != NULL && newcept->common.authcc != NULL && @@ -940,6 +1106,15 @@ static int global_parser(void *arg, yaml_document_t *doc, SET_CONFIG_STRING_OPTION(glob->sipdebugfile, value); } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_MAPPING_NODE && + strcmp((char *)key->data.scalar.value, "emailingest") == 0) { + if (parse_email_ingest_config(glob, doc, value) == -1) { + return -1; + } + } + if (key->type == YAML_SCALAR_NODE && value->type == YAML_SEQUENCE_NODE && strcmp((char *)key->data.scalar.value, "alumirrors") == 0) { @@ -1050,6 +1225,23 @@ static int global_parser(void *arg, yaml_document_t *doc, glob->trust_sip_from = check_onoff((char *)value->data.scalar.value); } + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcasecmp((char *)key->data.scalar.value, + "maskimapcreds") == 0) { + + glob->mask_imap_creds = check_onoff((char *)value->data.scalar.value); + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SEQUENCE_NODE && + strcmp((char *)key->data.scalar.value, + "emailsessiontimeouts") == 0) { + if (parse_email_timeouts_config(glob, doc, value) == -1) { + return -1; + } + } + if (key->type == YAML_SCALAR_NODE && value->type == YAML_SCALAR_NODE && strcmp((char *)key->data.scalar.value, "RMQname") == 0) { @@ -1226,6 +1418,12 @@ static int mediator_parser(void *arg, yaml_document_t *doc, SET_CONFIG_STRING_OPTION(state->RMQ_conf.pass, value); } + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SCALAR_NODE && + strcmp((char *)key->data.scalar.value, "RMQlocalpass") == 0) { + SET_CONFIG_STRING_OPTION(state->RMQ_conf.internalpass, value); + } + if (key->type == YAML_SCALAR_NODE && value->type == YAML_SCALAR_NODE && strcmp((char *)key->data.scalar.value, "RMQhostname") == 0) { @@ -1284,6 +1482,15 @@ static int intercept_parser(void *arg, yaml_document_t *doc, } } + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SEQUENCE_NODE && + strcmp((char *)key->data.scalar.value, "emailintercepts") == 0) { + if (parse_emailintercept_list(&state->emailintercepts, doc, + value) == -1) { + return -1; + } + } + if (key->type == YAML_SCALAR_NODE && value->type == YAML_SEQUENCE_NODE && strcmp((char *)key->data.scalar.value, "agencies") == 0) { @@ -1326,6 +1533,33 @@ static int intercept_parser(void *arg, yaml_document_t *doc, return -1; } } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SEQUENCE_NODE && + strcmp((char *)key->data.scalar.value, "smtpservers") == 0) { + if (parse_core_server_list(&state->smtpservers, + OPENLI_CORE_SERVER_SMTP, doc, value) == -1) { + return -1; + } + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SEQUENCE_NODE && + strcmp((char *)key->data.scalar.value, "imapservers") == 0) { + if (parse_core_server_list(&state->imapservers, + OPENLI_CORE_SERVER_IMAP, doc, value) == -1) { + return -1; + } + } + + if (key->type == YAML_SCALAR_NODE && + value->type == YAML_SEQUENCE_NODE && + strcmp((char *)key->data.scalar.value, "pop3servers") == 0) { + if (parse_core_server_list(&state->pop3servers, + OPENLI_CORE_SERVER_POP3, doc, value) == -1) { + return -1; + } + } return 0; } diff --git a/src/coreserver.c b/src/coreserver.c index a92e4951..ddea646b 100644 --- a/src/coreserver.c +++ b/src/coreserver.c @@ -41,6 +41,12 @@ const char *coreserver_type_to_string(uint8_t cstype) { return "DHCP"; case OPENLI_CORE_SERVER_SIP: return "SIP"; + case OPENLI_CORE_SERVER_SMTP: + return "SMTP"; + case OPENLI_CORE_SERVER_IMAP: + return "IMAP"; + case OPENLI_CORE_SERVER_POP3: + return "POP3"; case OPENLI_CORE_SERVER_GTP: return "GTP"; case OPENLI_CORE_SERVER_ALUMIRROR: @@ -90,9 +96,11 @@ char *construct_coreserver_key(coreserver_t *cs) { } if (cs->portstr == NULL) { - snprintf(keyspace, 256, "%s-default", cs->ipstr); + snprintf(keyspace, 256, "%s-default-%s", cs->ipstr, + coreserver_type_to_string(cs->servertype)); } else { - snprintf(keyspace, 256, "%s-%s", cs->ipstr, cs->portstr); + snprintf(keyspace, 256, "%s-%s-%s", cs->ipstr, cs->portstr, + coreserver_type_to_string(cs->servertype)); } cs->serverkey = strdup(keyspace); return cs->serverkey; @@ -123,7 +131,7 @@ coreserver_t *match_packet_to_coreserver(coreserver_t *serverlist, cs->info = populate_addrinfo(cs->ipstr, cs->portstr, SOCK_DGRAM); if (!cs->info) { logger(LOG_INFO, - "Removing %s:%s from %s ALU source list due to getaddrinfo error", + "Removing %s:%s from %s core server list due to getaddrinfo error", cs->ipstr, cs->portstr, coreserver_type_to_string(cs->servertype)); HASH_DELETE(hh, serverlist, cs); diff --git a/src/coreserver.h b/src/coreserver.h index 42e48c67..e37d751d 100644 --- a/src/coreserver.h +++ b/src/coreserver.h @@ -43,6 +43,9 @@ enum { OPENLI_CORE_SERVER_SIP, OPENLI_CORE_SERVER_ALUMIRROR, OPENLI_CORE_SERVER_GTP, + OPENLI_CORE_SERVER_SMTP, + OPENLI_CORE_SERVER_IMAP, + OPENLI_CORE_SERVER_POP3, }; typedef struct packetinfo { diff --git a/src/etsili_core.c b/src/etsili_core.c index 4b7f43ea..c590ab77 100644 --- a/src/etsili_core.c +++ b/src/etsili_core.c @@ -31,12 +31,15 @@ #include "etsili_core.h" #include "collector/ipiri.h" #include "collector/umtsiri.h" +#include "collector/emailiri.h" #include "logger.h" uint8_t etsi_ipccoid[4] = {0x05, 0x03, 0x0a, 0x02}; uint8_t etsi_ipirioid[4] = {0x05, 0x03, 0x0a, 0x01}; uint8_t etsi_ipmmccoid[4] = {0x05, 0x05, 0x06, 0x02}; uint8_t etsi_ipmmirioid[4] = {0x05, 0x05, 0x06, 0x01}; +uint8_t etsi_emailirioid[4] = {0x05, 0x02, 0x0f, 0x01}; +uint8_t etsi_emailccoid[4] = {0x05, 0x02, 0x0f, 0x02}; uint8_t etsi_umtsirioid[9] = {0x00, 0x04, 0x00, 0x02, 0x02, 0x04, 0x01, 0x0f, 0x05}; uint8_t etsi_hi1operationoid[8] = {0x00, 0x04, 0x00, 0x02, 0x02, 0x00, 0x01, 0x06}; @@ -98,8 +101,12 @@ static inline void encode_hi1_notification_body(wandder_encoder_t *encoder, sizeof(tv)); wandder_encode_endseq(encoder); // End Timestamp - /* TODO? target-Information? */ - + /* target-Information? */ + if (not_data->target_info) { + wandder_encode_next(encoder, WANDDER_TAG_OCTETSTRING, + WANDDER_CLASS_CONTEXT_PRIMITIVE, 6, not_data->target_info, + strlen(not_data->target_info)); + } wandder_encode_endseq(encoder); // End Notification } @@ -150,6 +157,37 @@ wandder_encoded_result_t *encode_umtscc_body(wandder_encoder_t *encoder, return wandder_encode_finish(encoder); } +static inline void encode_emailcc_body(wandder_encoder_t *encoder, + wandder_encode_job_t *precomputed, void *content, uint32_t len, + uint8_t format, uint8_t dir) { + + wandder_encode_job_t *jobarray[7]; + uint32_t format32 = format; + + jobarray[0] = &(precomputed[OPENLI_PREENCODE_CSEQUENCE_2]); + jobarray[1] = &(precomputed[OPENLI_PREENCODE_CSEQUENCE_1]); + jobarray[2] = &(precomputed[OPENLI_PREENCODE_USEQUENCE]); + + if (dir == 0) { + jobarray[3] = &(precomputed[OPENLI_PREENCODE_DIRFROM]); + } else if (dir == 1) { + jobarray[3] = &(precomputed[OPENLI_PREENCODE_DIRTO]); + } else { + jobarray[3] = &(precomputed[OPENLI_PREENCODE_DIRUNKNOWN]); + } + jobarray[4] = &(precomputed[OPENLI_PREENCODE_CSEQUENCE_2]); + jobarray[5] = &(precomputed[OPENLI_PREENCODE_CSEQUENCE_1]); + jobarray[6] = &(precomputed[OPENLI_PREENCODE_EMAILCCOID]); + wandder_encode_next_preencoded(encoder, jobarray, 7); + + wandder_encode_next(encoder, WANDDER_TAG_ENUM, + WANDDER_CLASS_CONTEXT_PRIMITIVE, 1, &format32, sizeof(format32)); + wandder_encode_next(encoder, WANDDER_TAG_IPPACKET, + WANDDER_CLASS_CONTEXT_PRIMITIVE, 2, content, len); + END_ENCODED_SEQUENCE(encoder, 5); + +} + static inline void encode_ipcc_body(wandder_encoder_t *encoder, wandder_encode_job_t *precomputed, void *ipcontent, uint32_t iplen, uint8_t dir) { @@ -358,6 +396,21 @@ static inline void encode_ipiri_id(wandder_encoder_t *encoder, wandder_encode_endseq(encoder); } +static inline void encode_email_recipients(wandder_encoder_t *encoder, + etsili_email_recipients_t *recipients) { + + int i; + + ENC_USEQUENCE(encoder); + for (i = 0; i < recipients->count; i++) { + wandder_encode_next(encoder, WANDDER_TAG_UTF8STR, + WANDDER_CLASS_CONTEXT_PRIMITIVE, 0, recipients->addresses[i], + strlen(recipients->addresses[i])); + } + + END_ENCODED_SEQUENCE(encoder, 1); +} + static inline void encode_other_targets(wandder_encoder_t *encoder, etsili_other_targets_t *others) { @@ -649,6 +702,82 @@ wandder_encoded_result_t *encode_umtsiri_body(wandder_encoder_t *encoder, } +wandder_encoded_result_t *encode_emailiri_body(wandder_encoder_t *encoder, + wandder_encode_job_t *precomputed, + etsili_iri_type_t iritype, etsili_generic_t **params) { + + etsili_generic_t *p, *tmp; + wandder_encode_job_t *jobarray[4]; + + jobarray[0] = &(precomputed[OPENLI_PREENCODE_CSEQUENCE_2]); + jobarray[1] = &(precomputed[OPENLI_PREENCODE_CSEQUENCE_0]); + jobarray[2] = &(precomputed[OPENLI_PREENCODE_USEQUENCE]); + wandder_encode_next_preencoded(encoder, jobarray, 3); + + wandder_encode_next(encoder, WANDDER_TAG_ENUM, + WANDDER_CLASS_CONTEXT_PRIMITIVE, 0, &iritype, + sizeof(iritype)); + + jobarray[0] = &(precomputed[OPENLI_PREENCODE_CSEQUENCE_2]); + jobarray[1] = &(precomputed[OPENLI_PREENCODE_CSEQUENCE_1]); + jobarray[2] = &(precomputed[OPENLI_PREENCODE_EMAILIRIOID]); + wandder_encode_next_preencoded(encoder, jobarray, 3); + + HASH_SRT(hh, *params, sort_etsili_generic); + + HASH_ITER(hh, *params, p, tmp) { + switch(p->itemnum) { + case EMAILIRI_CONTENTS_EVENT_TYPE: + case EMAILIRI_CONTENTS_PROTOCOL_ID: + case EMAILIRI_CONTENTS_STATUS: + case EMAILIRI_CONTENTS_SENDER_VALIDITY: + wandder_encode_next(encoder, WANDDER_TAG_ENUM, + WANDDER_CLASS_CONTEXT_PRIMITIVE, p->itemnum, + p->itemptr, p->itemlen); + break; + case EMAILIRI_CONTENTS_CLIENT_ADDRESS: + case EMAILIRI_CONTENTS_SERVER_ADDRESS: + ENC_CSEQUENCE(encoder, p->itemnum); + encode_ipaddress(encoder, (etsili_ipaddress_t *)(p->itemptr)); + END_ENCODED_SEQUENCE(encoder, 1); + break; + case EMAILIRI_CONTENTS_CLIENT_PORT: + case EMAILIRI_CONTENTS_SERVER_PORT: + case EMAILIRI_CONTENTS_SERVER_OCTETS_SENT: + case EMAILIRI_CONTENTS_CLIENT_OCTETS_SENT: + case EMAILIRI_CONTENTS_TOTAL_RECIPIENTS: + wandder_encode_next(encoder, WANDDER_TAG_INTEGER, + WANDDER_CLASS_CONTEXT_PRIMITIVE, p->itemnum, + p->itemptr, p->itemlen); + break; + case EMAILIRI_CONTENTS_SENDER: + wandder_encode_next(encoder, WANDDER_TAG_UTF8STR, + WANDDER_CLASS_CONTEXT_PRIMITIVE, p->itemnum, + p->itemptr, p->itemlen); + break; + case EMAILIRI_CONTENTS_MESSAGE_ID: + case EMAILIRI_CONTENTS_NATIONAL_PARAMETER: + wandder_encode_next(encoder, WANDDER_TAG_OCTETSTRING, + WANDDER_CLASS_CONTEXT_PRIMITIVE, p->itemnum, + p->itemptr, p->itemlen); + break; + case EMAILIRI_CONTENTS_RECIPIENTS: + ENC_CSEQUENCE(encoder, p->itemnum); + encode_email_recipients(encoder, + (etsili_email_recipients_t *)(p->itemptr)); + END_ENCODED_SEQUENCE(encoder, 1); + break; + + case EMAILIRI_CONTENTS_NATIONAL_ASN1_PARAMETERS: + case EMAILIRI_CONTENTS_AAA_INFORMATION: + /* TODO? */ + break; + } + } + END_ENCODED_SEQUENCE(encoder, 5); + return wandder_encode_finish(encoder); +} + wandder_encoded_result_t *encode_ipiri_body(wandder_encoder_t *encoder, wandder_encode_job_t *precomputed, etsili_iri_type_t iritype, etsili_generic_t **params) { @@ -1255,6 +1384,20 @@ void etsili_preencode_static_fields( p->encodeas = WANDDER_TAG_RELATIVEOID; wandder_encode_preencoded_value(p, etsi_ipirioid, sizeof(etsi_ipirioid)); + p = &(pendarray[OPENLI_PREENCODE_EMAILIRIOID]); + p->identclass = WANDDER_CLASS_CONTEXT_PRIMITIVE; + p->identifier = 0; + p->encodeas = WANDDER_TAG_RELATIVEOID; + wandder_encode_preencoded_value(p, etsi_emailirioid, + sizeof(etsi_emailirioid)); + + p = &(pendarray[OPENLI_PREENCODE_EMAILCCOID]); + p->identclass = WANDDER_CLASS_CONTEXT_PRIMITIVE; + p->identifier = 0; + p->encodeas = WANDDER_TAG_RELATIVEOID; + wandder_encode_preencoded_value(p, etsi_emailccoid, + sizeof(etsi_emailccoid)); + p = &(pendarray[OPENLI_PREENCODE_UMTSIRIOID]); p->identclass = WANDDER_CLASS_CONTEXT_PRIMITIVE; p->identifier = 0; @@ -1547,7 +1690,8 @@ int etsili_create_ipmmcc_template(wandder_encoder_t *encoder, enum { CC_TEMPLATE_TYPE_IPCC, - CC_TEMPLATE_TYPE_UMTSCC + CC_TEMPLATE_TYPE_UMTSCC, + CC_TEMPLATE_TYPE_EMAILCC, }; static int etsili_create_generic_cc_template(wandder_encoder_t *encoder, @@ -1620,6 +1764,49 @@ int etsili_create_umtscc_template(wandder_encoder_t *encoder, ipclen, tplate, CC_TEMPLATE_TYPE_UMTSCC); } +int etsili_create_emailcc_template(wandder_encoder_t *encoder, + wandder_encode_job_t *precomputed, uint8_t format, uint8_t dir, + uint16_t contentlen, encoded_global_template_t *tplate) { + + wandder_encoded_result_t *encres; + const char *funcname = "etsili_create_emailcc_template"; + + if (tplate == NULL) { + logger(LOG_INFO, "OpenLI: called %s with NULL template?", funcname); + return -1; + } + + if (encoder == NULL) { + logger(LOG_INFO, "OpenLI: called %s with NULL encoder?", funcname); + return -1; + } + + reset_wandder_encoder(encoder); + + encode_emailcc_body(encoder, precomputed, NULL, contentlen, format, dir); + encres = wandder_encode_finish(encoder); + + if (encres == NULL || encres->len == 0 || encres->encoded == NULL) { + logger(LOG_INFO, "OpenLI: failed to encode ETSI CC body in %s", + funcname); + if (encres) { + wandder_release_encoded_result(encoder, encres); + } + return -1; + } + + /* Copy the encoded header to the template */ + tplate->cc_content.cc_wrap = malloc(encres->len); + memcpy(tplate->cc_content.cc_wrap, encres->encoded, encres->len); + tplate->cc_content.cc_wrap_len = encres->len; + tplate->cc_content.content_size = contentlen; + tplate->cc_content.content_ptr = NULL; + + /* Release the encoded result -- the caller will use the templated copy */ + wandder_release_encoded_result(encoder, encres); + return 0; +} + int etsili_create_ipcc_template(wandder_encoder_t *encoder, wandder_encode_job_t *precomputed, uint8_t dir, uint16_t ipclen, encoded_global_template_t *tplate) { diff --git a/src/etsili_core.h b/src/etsili_core.h index ba715e4f..0651ac12 100644 --- a/src/etsili_core.h +++ b/src/etsili_core.h @@ -31,7 +31,7 @@ #include #include -#include "src/intercept.h" +#include "intercept.h" #define ENC_USEQUENCE(enc) wandder_encode_next(enc, WANDDER_TAG_SEQUENCE, \ WANDDER_CLASS_UNIVERSAL_CONSTRUCT, WANDDER_TAG_SEQUENCE, NULL, 0) @@ -82,6 +82,20 @@ typedef struct etsili_ipaddress { uint8_t *ipvalue; } etsili_ipaddress_t; +typedef struct etsili_email_iri { + uint32_t eventtype; + struct sockaddr_storage *serveraddr; + struct sockaddr_storage *clientaddr; + uint32_t server_octets; + uint32_t client_octets; + uint32_t protocol; + uint32_t recipient_count; + char *sender; + char **recipients; + uint32_t status; + char *messageid; +} etsili_email_iri_content_t; + typedef struct etsili_other_targets { uint8_t count; @@ -89,6 +103,11 @@ typedef struct etsili_other_targets { etsili_ipaddress_t *targets; } etsili_other_targets_t; +typedef struct etsili_email_recipients { + uint32_t count; + char **addresses; +} etsili_email_recipients_t; + typedef enum { ETSILI_IRI_NONE = 0, ETSILI_IRI_BEGIN = 1, @@ -107,6 +126,29 @@ enum { ETSILI_IPADDRESS_REP_TEXT = 2, }; +enum { + ETSILI_EMAIL_STATUS_UNKNOWN = 1, + ETSILI_EMAIL_STATUS_FAILED = 2, + ETSILI_EMAIL_STATUS_SUCCESS = 3 +}; + +enum { + ETSILI_EMAIL_CC_FORMAT_IP = 1, + ETSILI_EMAIL_CC_FORMAT_APP = 2, +}; + +enum { + ETSILI_EMAIL_EVENT_SEND = 1, + ETSILI_EMAIL_EVENT_RECEIVE = 2, + ETSILI_EMAIL_EVENT_DOWNLOAD = 3, + ETSILI_EMAIL_EVENT_LOGON_ATTEMPT = 4, + ETSILI_EMAIL_EVENT_LOGON = 5, + ETSILI_EMAIL_EVENT_LOGON_FAILURE = 6, + ETSILI_EMAIL_EVENT_LOGOFF = 7, + ETSILI_EMAIL_EVENT_PARTIAL_DOWNLOAD = 8, + ETSILI_EMAIL_EVENT_UPLOAD = 9, +}; + enum { ETSILI_IPADDRESS_ASSIGNED_STATIC = 1, ETSILI_IPADDRESS_ASSIGNED_DYNAMIC = 2, @@ -138,6 +180,8 @@ typedef enum { OPENLI_PREENCODE_IPCCOID, OPENLI_PREENCODE_IPIRIOID, OPENLI_PREENCODE_UMTSIRIOID, + OPENLI_PREENCODE_EMAILIRIOID, + OPENLI_PREENCODE_EMAILCCOID, OPENLI_PREENCODE_IPMMCCOID, OPENLI_PREENCODE_DIRFROM, OPENLI_PREENCODE_DIRTO, @@ -211,6 +255,10 @@ wandder_encoded_result_t *encode_umtsiri_body(wandder_encoder_t *encoder, wandder_encode_job_t *precomputed, etsili_iri_type_t iritype, etsili_generic_t *params); +wandder_encoded_result_t *encode_emailiri_body(wandder_encoder_t *encoder, + wandder_encode_job_t *precomputed, + etsili_iri_type_t iritype, etsili_generic_t **params); + wandder_encoded_result_t *encode_etsi_keepalive(wandder_encoder_t *encoder, wandder_etsipshdr_data_t *hdrdata, int64_t seqno); @@ -253,6 +301,9 @@ int etsili_update_header_template(encoded_header_template_t *tplate, int etsili_create_ipcc_template(wandder_encoder_t *encoder, wandder_encode_job_t *precomputed, uint8_t dir, uint16_t ipclen, encoded_global_template_t *tplate); +int etsili_create_emailcc_template(wandder_encoder_t *encoder, + wandder_encode_job_t *precomputed, uint8_t format, uint8_t dir, + uint16_t ipclen, encoded_global_template_t *tplate); #endif // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/export_buffer.c b/src/export_buffer.c index 94310ad1..6662bedf 100644 --- a/src/export_buffer.c +++ b/src/export_buffer.c @@ -59,6 +59,14 @@ uint64_t get_buffered_amount(export_buffer_t *buf) { return (buf->buftail - (buf->bufhead + buf->deadfront)); } +uint8_t *get_buffered_head(export_buffer_t *buf, uint64_t *rem) { + *rem = get_buffered_amount(buf); + if (*rem == 0) { + return NULL; + } + return (buf->bufhead + buf->deadfront); +} + void reset_export_buffer(export_buffer_t *buf) { buf->partialfront = 0; buf->partialrem = 0; @@ -408,4 +416,17 @@ int transmit_buffered_records_RMQ(export_buffer_t *buf, return sent; } +int advance_export_buffer_head(export_buffer_t *buf, uint64_t amount) { + + uint64_t rem = get_buffered_amount(buf); + + if (amount > rem) { + amount = rem; + } + + buf->deadfront += amount; + post_transmit(buf); + return 0; +} + // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/export_buffer.h b/src/export_buffer.h index 103b0efd..1fa96fe9 100644 --- a/src/export_buffer.h +++ b/src/export_buffer.h @@ -79,6 +79,8 @@ int transmit_buffered_records_RMQ(export_buffer_t *buf, amqp_bytes_t exchange, amqp_bytes_t routing_key, uint64_t bytelimit); int transmit_heartbeat(int fd, SSL *ssl); +int advance_export_buffer_head(export_buffer_t *buf, uint64_t amount); +uint8_t *get_buffered_head(export_buffer_t *buf, uint64_t *rem); #endif // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/intercept.c b/src/intercept.c index 5abe2a7e..ef3fac77 100644 --- a/src/intercept.c +++ b/src/intercept.c @@ -48,6 +48,7 @@ static inline void copy_intercept_common(intercept_common_t *src, dest->hi1_seqno = src->hi1_seqno; dest->tostart_time = src->tostart_time; dest->toend_time = src->toend_time; + dest->tomediate = src->tomediate; } int are_sip_identities_same(openli_sip_identity_t *a, @@ -72,6 +73,19 @@ int are_sip_identities_same(openli_sip_identity_t *a, return 0; } +void intercept_mediation_mode_as_string(intercept_outputs_t mode, + char *space, int spacelen) { + + if (mode == OPENLI_INTERCEPT_OUTPUTS_IRIONLY) { + snprintf(space, spacelen, "IRI-Only"); + } else if (mode == OPENLI_INTERCEPT_OUTPUTS_CCONLY) { + snprintf(space, spacelen, "CC-Only"); + } else { + snprintf(space, spacelen, "Both"); + } + +} + sipregister_t *create_sipregister(voipintercept_t *vint, char *callid, uint32_t cin) { sipregister_t *newreg; @@ -86,6 +100,13 @@ sipregister_t *create_sipregister(voipintercept_t *vint, char *callid, return newreg; } +emailsession_t *create_emailsession(emailintercept_t *mailint, char *sessionid, + uint32_t cin) { + + /* TODO */ + return NULL; +} + rtpstreaminf_t *create_rtpstream(voipintercept_t *vint, uint32_t cin) { rtpstreaminf_t *newcin = NULL; @@ -201,6 +222,67 @@ static inline void free_intercept_common(intercept_common_t *cept) { } } +char *list_email_targets(emailintercept_t *m, int maxchars) { + + char *space = NULL; + int spaceused = 0; + int required = 0; + + email_target_t *tgt, *tmp; + + HASH_ITER(hh, m->targets, tgt, tmp) { + + if (!space) { + space = calloc(1, maxchars + 1); + } + + if (!tgt->address) { + continue; + } + + required = strlen(tgt->address); + if (spaceused > 0) { + required += 1; + } + + /* Only allowed a certain number of characters in the HI1 message, so + * stop here */ + if (required > maxchars - spaceused) { + break; + } + + if (spaceused > 0) { + *(space + spaceused) = ','; + spaceused ++; + } + memcpy(space + spaceused, tgt->address, strlen(tgt->address)); + spaceused += strlen(tgt->address); + } + return space; +} +static void free_email_targets(emailintercept_t *m) { + + email_target_t *tgt, *tmp; + + HASH_ITER(hh, m->targets, tgt, tmp) { + if (tgt->address) { + free(tgt->address); + } + HASH_DELETE(hh, m->targets, tgt); + free(tgt); + } + +} + +void free_single_emailintercept(emailintercept_t *m) { + + free_intercept_common(&(m->common)); + if (m->targets) { + free_email_targets(m); + } + free(m); +} + void free_single_ipintercept(ipintercept_t *cept) { static_ipranges_t *ipr, *tmp; @@ -217,6 +299,14 @@ void free_single_ipintercept(ipintercept_t *cept) { free(cept); } +void free_all_emailintercepts(emailintercept_t **mailintercepts) { + emailintercept_t *cept, *tmp; + HASH_ITER(hh_liid, *mailintercepts, cept, tmp) { + HASH_DELETE(hh_liid, *mailintercepts, cept); + free_single_emailintercept(cept); + } +} + void free_all_ipintercepts(ipintercept_t **interceptlist) { ipintercept_t *cept, *tmp; @@ -290,6 +380,58 @@ static void free_voip_registrations(sipregister_t *sipregs) { } +char *list_sip_targets(voipintercept_t *v, int maxchars) { + + char *space = NULL; + int spaceused = 0; + int required = 0; + + openli_sip_identity_t *sipid; + libtrace_list_node_t *n; + n = v->targets->head; + + while (n) { + sipid = *((openli_sip_identity_t **)(n->data)); + + if (!space) { + space = calloc(1, maxchars + 1); + } + n = n->next; + + if (!sipid->username) { + continue; + } + + required = strlen(sipid->username); + if (sipid->realm) { + required += (1 + strlen(sipid->realm)); + } + if (spaceused > 0) { + required += 1; + } + + /* Only allowed a certain number of characters in the HI1 message, so + * stop here */ + if (required > maxchars - spaceused) { + break; + } + + if (spaceused > 0) { + *(space + spaceused) = ','; + spaceused ++; + } + memcpy(space + spaceused, sipid->username, strlen(sipid->username)); + spaceused += strlen(sipid->username); + if (sipid->realm) { + *(space + spaceused) = '@'; + spaceused ++; + memcpy(space + spaceused, sipid->realm, strlen(sipid->realm)); + spaceused += strlen(sipid->realm); + } + } + return space; +} + static void free_sip_targets(libtrace_list_t *targets) { libtrace_list_node_t *n; @@ -531,6 +673,56 @@ void free_all_ipsessions(ipsession_t **sessions) { } } +void free_all_emailsessions(emailsession_t **sessions) { + /* TODO */ + +} + +int add_intercept_to_email_user_intercept_list( + email_user_intercept_list_t **ulist, emailintercept_t *em, + email_target_t *tgt) { + + email_user_intercept_list_t *found; + email_intercept_ref_t *intref; + + if (tgt->address == NULL) { + logger(LOG_INFO, + "OpenLI: attempted to add address-less email intercept to user intercept list."); + return -1; + } + + HASH_FIND(hh, *ulist, tgt->address, strlen(tgt->address), found); + if (!found) { + found = (email_user_intercept_list_t *) + malloc(sizeof(email_user_intercept_list_t)); + if (!found) { + logger(LOG_INFO, + "OpenLI: out of memory in add_intercept_to_email_user_intercept_list()"); + return -1; + } + found->emailaddr = strdup(tgt->address); + if (!found->emailaddr) { + free(found); + logger(LOG_INFO, + "OpenLI: out of memory in add_intercept_to_email_user_intercept_list()"); + return -1; + } + found->intlist = NULL; + HASH_ADD_KEYPTR(hh, *ulist, found->emailaddr, strlen(found->emailaddr), + found); + } + + HASH_FIND(hh, found->intlist, em->common.liid, em->common.liid_len, intref); + if (!intref) { + intref = calloc(1, sizeof(email_intercept_ref_t)); + intref->em = em; + + HASH_ADD_KEYPTR(hh, found->intlist, em->common.liid, + em->common.liid_len, intref); + } + return 0; +} + int add_intercept_to_user_intercept_list(user_intercept_list_t **ulist, ipintercept_t *ipint) { @@ -577,6 +769,44 @@ int add_intercept_to_user_intercept_list(user_intercept_list_t **ulist, return 0; } +int remove_intercept_from_email_user_intercept_list( + email_user_intercept_list_t **ulist, emailintercept_t *em, + email_target_t *tgt) { + + email_user_intercept_list_t *found; + email_intercept_ref_t *existing; + + if (tgt->address == NULL) { + logger(LOG_INFO, + "OpenLI: attempted to remove address-less email intercept from user intercept list."); + return -1; + } + + HASH_FIND(hh, *ulist, tgt->address, strlen(tgt->address), found); + + if (!found) { + return 0; + } + + HASH_FIND(hh, found->intlist, em->common.liid, em->common.liid_len, + existing); + if (!existing) { + return 0; + } + + HASH_DELETE(hh, found->intlist, existing); + free(existing); + + /* If there are no intercepts left associated with this address, we can + * remove them from the user list */ + if (HASH_CNT(hh, found->intlist) == 0) { + HASH_DELETE(hh, *ulist, found); + free(found->emailaddr); + free(found); + } + return 0; +} + int remove_intercept_from_user_intercept_list(user_intercept_list_t **ulist, ipintercept_t *ipint) { @@ -632,6 +862,23 @@ void clear_user_intercept_list(user_intercept_list_t *ulist) { } } +void clear_email_user_intercept_list(email_user_intercept_list_t *ulist) { + email_user_intercept_list_t *u, *tmp; + email_intercept_ref_t *em, *tmp2; + + HASH_ITER(hh, ulist, u, tmp) { + /* Again, don't free the email intercepts in the list -- someone else + * should have that covered. */ + HASH_ITER(hh, u->intlist, em, tmp2) { + HASH_DELETE(hh, u->intlist, em); + free(em); + } + HASH_DELETE(hh, ulist, u); + free(u->emailaddr); + free(u); + } +} + uint32_t map_radius_ident_string(char *confstr) { if (strcasecmp(confstr, "csid") == 0) { return (1 << OPENLI_IPINT_OPTION_RADIUS_IDENT_CSID); diff --git a/src/intercept.h b/src/intercept.h index a018d1af..c491fe76 100644 --- a/src/intercept.h +++ b/src/intercept.h @@ -58,6 +58,12 @@ typedef enum { OPENLI_IPINT_OPTION_RADIUS_IDENT_USER = 1, } ipintercept_options_t; +typedef enum { + OPENLI_INTERCEPT_OUTPUTS_ALL = 0, + OPENLI_INTERCEPT_OUTPUTS_IRIONLY = 1, + OPENLI_INTERCEPT_OUTPUTS_CCONLY = 2, +} intercept_outputs_t; + typedef enum { HI1_LI_ACTIVATED = 1, HI1_LI_DEACTIVATED = 2, @@ -86,6 +92,7 @@ typedef struct intercept_common { uint32_t hi1_seqno; uint64_t tostart_time; uint64_t toend_time; + intercept_outputs_t tomediate; } intercept_common_t; typedef struct hi1_notify_data { @@ -97,6 +104,7 @@ typedef struct hi1_notify_data { uint32_t seqno; uint64_t ts_sec; uint32_t ts_usec; + char *target_info; } hi1_notify_data_t; typedef struct ipintercept { @@ -119,12 +127,39 @@ typedef struct ipintercept { UT_hash_handle hh_user; } ipintercept_t; +typedef struct email_target { + char *address; + uint8_t awaitingconfirm; + UT_hash_handle hh; +} email_target_t; + typedef struct userinterceptlist { char *username; ipintercept_t *intlist; UT_hash_handle hh; } user_intercept_list_t; +typedef struct emailintercept { + intercept_common_t common; + email_target_t *targets; + + uint8_t awaitingconfirm; + UT_hash_handle hh_liid; + +} emailintercept_t; + +typedef struct email_intercept_ref { + emailintercept_t *em; + UT_hash_handle hh; +} email_intercept_ref_t; + +typedef struct emailinterceptlist { + char *emailaddr; + email_intercept_ref_t *intlist; + UT_hash_handle hh; +} email_user_intercept_list_t; + + typedef struct sip_identity { char *username; int username_len; @@ -172,6 +207,7 @@ typedef struct voipsdpmap { typedef struct rtpstreaminf rtpstreaminf_t; typedef struct ipsession ipsession_t; +typedef struct emailsession emailsession_t; typedef struct vendmirror_intercept vendmirror_intercept_t; typedef struct staticipsession staticipsession_t; typedef struct sipregister sipregister_t; @@ -182,8 +218,17 @@ typedef struct sipregister sipregister_t; (strcmp(a->common.targetagency, b->common.targetagency) == 0) && \ (a->common.tostart_time == b->common.tostart_time) && \ (a->common.toend_time == b->common.toend_time) && \ + (a->common.tomediate == b->common.tomediate) && \ (a->options == b->options)) +#define email_intercept_equal(a,b) \ + ((strcmp(a->common.authcc, b->common.authcc) == 0) && \ + (strcmp(a->common.delivcc, b->common.delivcc) == 0) && \ + (strcmp(a->common.targetagency, b->common.targetagency) == 0) && \ + (a->common.tostart_time == b->common.tostart_time) && \ + (a->common.toend_time == b->common.toend_time) && \ + (a->common.tomediate == b->common.tomediate)) + typedef struct voipintercept { @@ -220,6 +265,40 @@ struct sipmediastream { char *mediatype; }; +typedef struct email_participant { + char *emailaddr; + uint8_t is_sender; + + UT_hash_handle hh; +} email_participant_t; + +struct emailsession { + char *key; + uint32_t cin; + + char *session_id; + int ai_family; + struct sockaddr_storage *serveraddr; + struct sockaddr_storage *clientaddr; + uint32_t server_octets; + uint32_t client_octets; + uint64_t login_time; + uint8_t login_sent; + uint64_t event_time; + + email_participant_t sender; + email_participant_t *participants; + + uint8_t protocol; + uint8_t currstate; + uint8_t mask_credentials; + void *timeout_ev; + + void *proto_state; + + UT_hash_handle hh; +}; + struct rtpstreaminf { char *streamkey; uint32_t cin; @@ -298,23 +377,40 @@ typedef struct default_radius_user { void free_all_ipintercepts(ipintercept_t **interceptlist); void free_all_voipintercepts(voipintercept_t **vintercepts); +void free_all_emailintercepts(emailintercept_t **mailintercepts); void free_all_rtpstreams(rtpstreaminf_t **streams); void free_all_ipsessions(ipsession_t **sessions); +void free_all_emailsessions(emailsession_t **sessions); void free_all_vendmirror_intercepts(vendmirror_intercept_list_t **mirror_intercepts); void free_all_staticipsessions(staticipsession_t **statintercepts); void free_voip_cinmap(voipcinmap_t *cins); void free_single_ipintercept(ipintercept_t *cept); void free_single_voipintercept(voipintercept_t *v); +void free_single_emailintercept(emailintercept_t *m); void free_single_ipsession(ipsession_t *sess); +void free_single_emailsession(emailsession_t *sess); void free_single_rtpstream(rtpstreaminf_t *rtp); void free_single_vendmirror_intercept(vendmirror_intercept_t *mirror); void free_single_staticipsession(staticipsession_t *statint); void free_single_staticiprange(static_ipranges_t *ipr); +/* Create a comma-separated string containing all of the SIP target IDs + * for a VoIP intercept. + */ +char *list_sip_targets(voipintercept_t *v, int maxchars); + +/* Create a comma-separated string containing all of the target addresses + * for an email intercept. + */ +char *list_email_targets(emailintercept_t *m, int maxchars); + sipregister_t *create_sipregister(voipintercept_t *vint, char *callid, uint32_t cin); +emailsession_t *create_emailsession(emailintercept_t *mailint, char *sessionid, + uint32_t cin); + rtpstreaminf_t *create_rtpstream(voipintercept_t *vint, uint32_t cin); rtpstreaminf_t *deep_copy_rtpstream(rtpstreaminf_t *rtp); @@ -335,10 +431,21 @@ int remove_intercept_from_user_intercept_list(user_intercept_list_t **ulist, int add_intercept_to_user_intercept_list(user_intercept_list_t **ulist, ipintercept_t *ipint); +void clear_email_user_intercept_list(email_user_intercept_list_t *ulist); +int remove_intercept_from_email_user_intercept_list( + email_user_intercept_list_t **ulist, emailintercept_t *em, + email_target_t *tgt); +int add_intercept_to_email_user_intercept_list( + email_user_intercept_list_t **ulist, emailintercept_t *em, + email_target_t *tgt); + const char *get_access_type_string(internet_access_method_t method); const char *get_radius_ident_string(uint32_t radoptions); internet_access_method_t map_access_type_string(char *confstr); uint32_t map_radius_ident_string(char *confstr); + +void intercept_mediation_mode_as_string(intercept_outputs_t mode, + char *space, int spacelen); #endif // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/coll_recv_thread.c b/src/mediator/coll_recv_thread.c new file mode 100644 index 00000000..fc6779b6 --- /dev/null +++ b/src/mediator/coll_recv_thread.c @@ -0,0 +1,974 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#include +#include + +#include "netcomms.h" +#include "util.h" +#include "logger.h" +#include "coll_recv_thread.h" +#include "lea_send_thread.h" +#include "mediator_rmq.h" +#include "med_epoll.h" + +/** This file implements a "collector receive" thread for the OpenLI mediator. + * Each OpenLI collector that reports to a mediator will be handled using + * a separate instance of one of these threads. + * + * The core functionality of a collector receive thread is to: + * - receive LI records from the collector via either a RMQ queue hosted + * on the collector OR a TCP socket + * - insert each received record into the appropriate internal RMQ queue, + * named after the LIID that the record was intercepted for and the + * record type (e.g. IRI or CC). + * + */ + +/** Maximum amount of data (in bytes) to receive from a collector before + * returning to the main epoll loop + */ +#define MAX_COLL_RECV (10 * 1024 * 1024) + +/** Period of inactivity before we decide to remove our internal state for + * an LIID queue declared on the local RabbitMQ instance -- if we see the + * LIID again after removal, we'll just re-declare the queue. + * + * Note: the queue will NOT be deleted from RabbitMQ until the "x-expires" + * threshold for inactivity is reached. This is set when the queue is + * declared -- 30 mins is the original default I've set. + */ +#define LIID_QUEUE_EXPIRY_THRESH (10 * 60) + +/** Initialises the shared configuration for the collectors managed by a + * mediator. + * + * @param config The global config for the collectors that is to be + * initialised. + * @param usetls The value of the global flag that indicates whether + * new collector connections must use TLS. + * @param sslconf A pointer to the SSL configuration for this mediator. + * @param rmqconf A pointer to the RabbitMQ configuration for this + * mediator. + * @param mediatorid The ID number of the mediator + */ +void init_med_collector_config(mediator_collector_config_t *config, + uint8_t usetls, openli_ssl_config_t *sslconf, + openli_RMQ_config_t *rmqconf, uint32_t mediatorid) { + + config->usingtls = usetls; + config->sslconf = sslconf; + config->rmqconf = rmqconf; + config->parent_mediatorid = mediatorid; + + pthread_mutex_init(&(config->mutex), NULL); +} + +/** Updates the shared configuration for the collectors managed by a + * mediator. + * + * @param config The global config for the collectors that is to be + * updated. + * @param usetls The value of the global flag that indicates whether + * new collector connections must use TLS. + * @param mediatorid The ID number of the mediator + */ +void update_med_collector_config(mediator_collector_config_t *config, + uint8_t usetls, uint32_t mediatorid) { + + pthread_mutex_lock(&(config->mutex)); + + config->usingtls = usetls; + config->parent_mediatorid = mediatorid; + + pthread_mutex_unlock(&(config->mutex)); +} + +/** Frees any resources allocated to the shared collector configuration. + * + * + * @param config The global config to be destroyed + */ +void destroy_med_collector_config(mediator_collector_config_t *config) { + pthread_mutex_destroy(&(config->mutex)); +} + +/** Grabs the mutex for the shared collector configuration to prevent + * any other threads from modifying it while we're reading it. + * + * @param config The global config to be locked + */ +void lock_med_collector_config(mediator_collector_config_t *config) { + pthread_mutex_lock(&(config->mutex)); +} + +/** Releases the mutex for the shared collector configuration. + * + * @param config The global config to be unlocked + */ +void unlock_med_collector_config(mediator_collector_config_t *config) { + pthread_mutex_unlock(&(config->mutex)); +} + +/** Removes any local state for LIIDs which our collector has not sent + * any data for recently. + * + * @param col The state object for this collector receive thread + */ +static void remove_expired_liid_queues(coll_recv_t *col) { + col_known_liid_t *known, *tmp; + struct timeval tv; + + gettimeofday(&tv, NULL); + + HASH_ITER(hh, col->known_liids, known, tmp) { + if (tv.tv_sec - known->lastseen < LIID_QUEUE_EXPIRY_THRESH) { + /* Not expired yet, so redeclare the queue to keep rabbitmq + * from deleting it accidentally */ + declare_mediator_liid_RMQ_queue(col->amqp_producer_state, + known->liid, known->liidlen); + continue; + } + + if (known->liid) { + free(known->liid); + } + HASH_DELETE(hh, col->known_liids, known); + free(known); + } +} + +/** Perform the necessary setup to establish a TLS connection with the + * OpenLI collector that we are responsible for. + * + * @param col The state object for this collector receive thread + * + * @return -1 if an error occurs, MED_EPOLL_COLLECTOR_HANDSHAKE if the + * connection is established but TLS handshake is incomplete, + * MED_EPOLL_COLLECTOR if the connection is established and the + * TLS handshake has completed. + */ +static int start_collector_ssl(coll_recv_t *col) { + + int r; + + lock_med_collector_config(col->parentconfig); + r = listen_ssl_socket(col->parentconfig->sslconf, &col->ssl, col->col_fd); + unlock_med_collector_config(col->parentconfig); + + if (r == OPENLI_SSL_CONNECT_FAILED) { + close(col->col_fd); + col->col_fd = -1; + SSL_free(col->ssl); + col->ssl = NULL; + + if (r != col->lastsslerror) { + logger(LOG_INFO, + "OpenLI Mediator: SSL handshake failed for collector %s", + col->ipaddr); + } + col->lastsslerror = r; + return -1; + } + + col->using_tls = 1; + if (r == OPENLI_SSL_CONNECT_WAITING) { + return MED_EPOLL_COLLECTOR_HANDSHAKE; + } + col->lastsslerror = 0; + return MED_EPOLL_COLLECTOR; +} + +/** Connects to the RMQ queue for this mediator on the collector and + * (if successful) creates an epoll read event for the underlying TCP + * socket for the RMQ connection. + * + * @param col The state object for this collector receive thread + * @param epoll_fd The epoll file descriptor to add the read event to + * + * @return NULL if the RMQ connection cannot established or added to the + * epoll event list, otherwise returns a pointer to the mediator + * epoll event structure that was successfully created by this + * function. + */ +static med_epoll_ev_t *prepare_collector_receive_rmq(coll_recv_t *col, + int epoll_fd) { + + med_epoll_ev_t *rmqev = NULL; + int rmq_sock = -1; + + /* method defined in mediator_rmq.c -- establishes the RMQ connection */ + amqp_connection_state_t amqp_state = join_collector_RMQ(col); + + if (!amqp_state) { + if (!col->disabled_log) { + logger(LOG_INFO, "OpenLI Mediator: error while connecting to RMQ for collector %s", col->ipaddr); + } + col->disabled_log = 1; + return NULL; + } + + /* Get the file descriptor from the RMQ connection so we can listen to + * it via epoll. + */ + rmq_sock = amqp_get_sockfd(amqp_state); + if (rmq_sock < 0) { + if (!col->disabled_log) { + logger(LOG_INFO, "OpenLI Mediator: bad socket returned by RMQ for collector %s", col->ipaddr); + } + col->disabled_log = 1; + return NULL; + } + + col->amqp_state = amqp_state; + /* Create a net buffer for receiving data from the RMQ socket */ + if (col->incoming_rmq) { + destroy_net_buffer(col->incoming_rmq); + } + col->incoming_rmq = create_net_buffer(NETBUF_RECV, 0, NULL); + + /* Create an epoll event and add it to our epoll FD set */ + rmqev = create_mediator_fdevent(epoll_fd, col, MED_EPOLL_COL_RMQ, rmq_sock, + EPOLLIN | EPOLLRDHUP); + if (rmqev == NULL) { + if (!col->disabled_log) { + logger(LOG_INFO, + "OpenLI Mediator: unable to add RMQ fd for collector %s to epoll: %s", + col->ipaddr, strerror(errno)); + } + col->disabled_log = 1; + close(rmq_sock); + return NULL; + } + + if (col->disabled_log == 0) { + logger(LOG_INFO, + "OpenLI Mediator: joined RMQ on collector %s successfully", + col->ipaddr); + } + return rmqev; +} + +/** Creates an epoll read event for an existing TCP socket that is + * connected to the OpenLI collector forwarding thread. + * + * @param col The state object for this collector receive thread + * @param epoll_fd The epoll file descriptor to add the read event to + * + * @return NULL if the socket cannot be added to the + * epoll event list, otherwise returns a pointer to the mediator + * epoll event structure that was successfully created by this + * function. + */ +static med_epoll_ev_t *prepare_collector_receive_fd(coll_recv_t *col, + int epoll_fd) { + + med_epoll_ev_t *colev = NULL; + int fdtype; + + /* If we are supposed to be using TLS, establish a TLS session */ + if (col->parentconfig->usingtls) { + fdtype = start_collector_ssl(col); + } else { + /* Otherwise, we can use the existing socket as is */ + fdtype = MED_EPOLL_COLLECTOR; + col->using_tls = 0; + } + + /* Create an epoll event and add it to our epoll FD set */ + colev = create_mediator_fdevent(epoll_fd, col, fdtype, col->col_fd, + EPOLLIN | EPOLLRDHUP); + if (colev == NULL && col->disabled_log == 0) { + logger(LOG_INFO, + "OpenLI Mediator: unable to add collector fd to epoll: %s", + strerror(errno)); + col->disabled_log = 1; + close(col->col_fd); + col->col_fd = -1; + return NULL; + } + + /* Create a net buffer for receiving data from the TCP socket */ + if (col->incoming) { + destroy_net_buffer(col->incoming); + } + col->incoming = create_net_buffer(NETBUF_RECV, col->col_fd, + col->ssl); + + if (col->disabled_log == 0) { + logger(LOG_INFO, + "OpenLI Mediator: accepted connection from collector %s.", + col->ipaddr); + } + return colev; +} + +/** Completes a partially-established TLS handshake on the TCP socket that + * connects this thread to the collector. + * + * @param col The state object for this collector receive thread + * @param mev The mediator epoll event for the TCP socket + * + * @return -1 if an error occurs, 0 if the handshake remains incomplete, 1 + * if the handshake is now complete + */ +static int continue_collector_handshake(coll_recv_t *col, med_epoll_ev_t *mev) { + + int ret = SSL_accept(col->ssl); + + if (ret <= 0) { + ret = SSL_get_error(col->ssl, ret); + if (ret == SSL_ERROR_WANT_READ || ret == SSL_ERROR_WANT_WRITE) { + /* Not fatal -- can keep trying */ + return 0; + } else { + logger(LOG_INFO, "OpenLI Mediator: Pending SSL handshake for collector %s failed", col->ipaddr); + return -1; + } + } + logger(LOG_INFO, "OpenLI Mediator: Pending SSL handshake for collector %s completed", col->ipaddr); + col->lastsslerror = 0; + mev->fdtype = MED_EPOLL_COLLECTOR; + + /* If we're meant to be reading records from RMQ, then we are now + * ready to set that event up too. + */ + if (col->rmqenabled && col->rmq_colev == NULL) { + col->rmq_colev = prepare_collector_receive_rmq(col, mev->epoll_fd); + } + return 1; +} + +/** Processes an intercept record received from a collector and inserts + * it into the appropriate mediator-internal LIID queue. + * + * @param col The state object for this collector receive thread + * @param msgbody A pointer to the start of the received record + * @param msglen The length of the received record, in bytes + * @param msgtype The record type (e.g. CC, IRI, etc). + * + * @return 1 if the record is processed successfully, 0 if an error + * occurs. + */ +static int process_received_data(coll_recv_t *col, uint8_t *msgbody, + uint16_t msglen, openli_proto_msgtype_t msgtype) { + + unsigned char liidstr[65536]; + uint16_t liidlen; + col_known_liid_t *found; + struct timeval tv; + int r; + + /* The queue that this record must be published to is derived from + * the LIID for the record and the record type + */ + extract_liid_from_exported_msg(msgbody, msglen, liidstr, 65536, &liidlen); + + if (liidlen > 2) { + liidlen -= 2; + } else { + return 0; + } + + if (col->disabled_log) { + col->disabled_log = 0; + } + + HASH_FIND(hh, col->known_liids, liidstr, liidlen, found); + if (!found) { + /* This is an LIID that we haven't seen before (or recently), so + * make sure we have a set of internal mediator RMQ queues for it. + */ + found = (col_known_liid_t *)calloc(1, sizeof(col_known_liid_t)); + found->liid = strdup((const char *)liidstr); + found->liidlen = strlen(found->liid); + found->lastseen = 0; + + HASH_ADD_KEYPTR(hh, col->known_liids, found->liid, found->liidlen, + found); + logger(LOG_INFO, "OpenLI Mediator: LIID %s %u has been seen coming from collector %s", found->liid, liidlen, col->ipaddr); + + /* declare amqp queue for this LIID */ + if (declare_mediator_liid_RMQ_queue(col->amqp_producer_state, + found->liid, found->liidlen) < 0) { + logger(LOG_INFO, "OpenLI Mediator: failed to create internal RMQ queues for LIID %s in collector thread %s", found->liid, col->ipaddr); + return -1; + } + } + + gettimeofday(&tv, NULL); + found->lastseen = tv.tv_sec; + + /* Hand off to publishing methods defined in mediator_rmq.c */ + if (msgtype == OPENLI_PROTO_ETSI_CC) { + r = publish_cc_on_mediator_liid_RMQ_queue(col->amqp_producer_state, + msgbody + (liidlen + 2), msglen - (liidlen + 2), found->liid); + return r; + } + + if (msgtype == OPENLI_PROTO_ETSI_IRI) { + return publish_iri_on_mediator_liid_RMQ_queue(col->amqp_producer_state, + msgbody + (liidlen + 2), msglen - (liidlen + 2), found->liid); + } + + if (msgtype == OPENLI_PROTO_RAWIP_SYNC) { + + /* declare a queue for raw IP */ + declare_mediator_rawip_RMQ_queue(col->amqp_producer_state, found->liid, + found->liidlen); + + /* publish to raw IP queue */ + return publish_rawip_on_mediator_liid_RMQ_queue( + col->amqp_producer_state, msgbody, msglen, found->liid); + } + + return 1; +} + +/** Reads and processes a message from the collector that this thread + * is responsible for. + * + * @param col The state object for this collector receive thread + * @param mev The epoll event for the connection to the collector + * + * @return -1 if an error occurs, 0 otherwise + */ +static int receive_collector(coll_recv_t *col, med_epoll_ev_t *mev) { + + uint8_t *msgbody = NULL; + uint16_t msglen = 0; + uint64_t internalid; + int total_recvd = 0; + openli_proto_msgtype_t msgtype; + + /* An epoll read event fired for our collector connection, so there + * should be at least one message for us to read. + */ + do { + /* Read the next available message -- see netcomms.c for the + * implementation of these methods */ + if (mev->fdtype == MED_EPOLL_COL_RMQ) { + msgtype = receive_RMQ_buffer(col->incoming_rmq, col->amqp_state, + &msgbody, &msglen, &internalid); + } else { + msgtype = receive_net_buffer(col->incoming, &msgbody, + &msglen, &internalid); + } + + if (msgtype < 0) { + if (col->disabled_log == 0) { + nb_log_receive_error(msgtype); + logger(LOG_INFO, "OpenLI Mediator: error receiving message from collector %s.", col->ipaddr); + } + return -1; + } + + total_recvd += msglen; + switch(msgtype) { + case OPENLI_PROTO_DISCONNECT: + if (col->disabled_log == 0) { + logger(LOG_INFO, "OpenLI Mediator: received disconnect message from collector %s", col->ipaddr); + } + return -1; + case OPENLI_PROTO_NO_MESSAGE: + case OPENLI_PROTO_HEARTBEAT: + /* Heartbeats are periodically sent to ensure that RMQ doesn't + * kill our connection for being too idle -- they don't + * serve any actual messaging purpose so we can just read them + * and discard them. + */ + break; + case OPENLI_PROTO_RAWIP_SYNC: + case OPENLI_PROTO_ETSI_CC: + case OPENLI_PROTO_ETSI_IRI: + /* Intercept record -- process it appropriately */ + if (process_received_data(col, msgbody, msglen, msgtype) < 0) { + return -1; + } + break; + default: + /* Unexpected message type, probably OK to just ignore... */ + break; + } + } while (msgtype != OPENLI_PROTO_NO_MESSAGE && total_recvd < MAX_COLL_RECV); + + /* We use a cap of MAX_COLL_RECV bytes per receive method call so that + * we can periodically go back and check for "halt" messages etc. even + * when the receive socket is getting hammered. + */ + + return 0; + +} + +/** Handler method for any file descriptors or timers that are reported as + * "ready" by epoll. + * + * @param col The state object for this collector receive thread + * @param ev The generic epoll event for the fd or timer that is ready + * + * @return -1 if an error occurs, 0 otherwise. + */ +static int collector_thread_epoll_event(coll_recv_t *col, + struct epoll_event *ev) { + + med_epoll_ev_t *mev = (med_epoll_ev_t *)(ev->data.ptr); + int ret = 0; + + switch(mev->fdtype) { + case MED_EPOLL_SIGCHECK_TIMER: + /* Time to check for control messages -- fires once per second */ + if (ev->events & EPOLLIN) { + ret = 1; + } else { + logger(LOG_INFO, + "OpenLI Mediator: main epoll timer has failed in collector thread for %s", + col->ipaddr); + ret = -1; + } + break; + case MED_EPOLL_QUEUE_EXPIRE_TIMER: + /* Time to purge any state for inactive LIIDs */ + halt_mediator_timer(mev); + + remove_expired_liid_queues(col); + + if (start_mediator_timer(mev, 120) < 0) { + logger(LOG_INFO, "OpenLI Mediator: unable to reset queue expiry timer in collector thread for %s: %s", col->ipaddr, strerror(errno)); + ret = -1; + } else { + ret = 1; + } + break; + + case MED_EPOLL_COLLECTOR_HANDSHAKE: + /* A socket with an incomplete SSL handshake is active -- try + * to complete the handshake. + */ + ret = continue_collector_handshake(col, mev); + if (ret == -1) { + return -1; + } + break; + case MED_EPOLL_COLLECTOR: + case MED_EPOLL_COL_RMQ: + /* Data is readable from our collector socket / RMQ */ + if (ev->events & EPOLLRDHUP) { + ret = -1; + } else if (ev->events & EPOLLIN) { + ret = receive_collector(col, mev); + } + break; + default: + logger(LOG_INFO, + "OpenLI Mediator: invalid epoll event type %d seen in collector thread for %s", mev->fdtype, col->ipaddr); + ret = -1; + + } + return ret; +} + +/** Destroys the state for a collector receive thread and frees any + * allocated memory. + * + * @param col The state object for this collector receive thread + */ +static void cleanup_collector_thread(coll_recv_t *col) { + col_known_liid_t *known, *tmp; + + if (col->colev) { + remove_mediator_fdevent(col->colev); + } + if (col->rmq_colev) { + remove_mediator_fdevent(col->rmq_colev); + } + if (col->incoming) { + destroy_net_buffer(col->incoming); + } + if (col->incoming_rmq) { + destroy_net_buffer(col->incoming_rmq); + } + if (col->amqp_state) { + amqp_destroy_connection(col->amqp_state); + } + if (col->amqp_producer_state) { + amqp_destroy_connection(col->amqp_producer_state); + } + + if (col->ssl) { + SSL_free(col->ssl); + } + + if (col->internalpass) { + free(col->internalpass); + } + HASH_ITER(hh, col->known_liids, known, tmp) { + if (known->liid) { + free(known->liid); + } + HASH_DELETE(hh, col->known_liids, known); + free(known); + } + + if (col->ipaddr) { + logger(LOG_INFO, "OpenLI mediator: exiting collector thread for %s", + col->ipaddr); + free(col->ipaddr); + } + +} + +/** pthread_create() callback to start a collector receive thread + * + * @param params The state object for this collector receive thread (as + * a void pointer) + * + * @return NULL when the thread exits + */ +static void *start_collector_thread(void *params) { + + coll_recv_t *col = (coll_recv_t *)params; + int is_halted = 0, i; + col_thread_msg_t msg; + int epoll_fd = -1, timerexpired, nfds; + med_epoll_ev_t *timerev, *queuecheck = NULL; + struct epoll_event evs[64]; + + if (col->ipaddr == NULL) { + logger(LOG_INFO, "OpenLI Mediator: started collector thread for NULL collector IP??"); + pthread_exit(NULL); + } + + /* Save frequently read fields from parent config so we don't have to + * lock it frequently for reading. We'll get a RELOAD message when + * we need to check if these values may have changed. + */ + lock_med_collector_config(col->parentconfig); + if (col->parentconfig->rmqconf) { + col->rmq_hb_freq = col->parentconfig->rmqconf->heartbeatFreq; + col->rmqenabled = col->parentconfig->rmqconf->enabled; + col->internalpass = strdup(col->parentconfig->rmqconf->internalpass); + } + unlock_med_collector_config(col->parentconfig); + + epoll_fd = epoll_create1(0); + + timerev = col->colev = col->rmq_colev = NULL; + col->incoming = NULL; + + logger(LOG_INFO, "OpenLI Mediator: starting collector thread for %s", + col->ipaddr); + + /* timerev is used to regularly break from epoll_wait() so we can check + * for incoming messages on our control socket. + */ + timerev = create_mediator_timer(epoll_fd, NULL, MED_EPOLL_SIGCHECK_TIMER, 0); + if (timerev == NULL) { + logger(LOG_INFO, "OpenLI Mediator: failed to create main loop timer in collector thread for %s", col->ipaddr); + goto threadexit; + } + + queuecheck = create_mediator_timer(epoll_fd, NULL, + MED_EPOLL_QUEUE_EXPIRE_TIMER, 60); + + while (!is_halted) { + + /* Check for messages on the control socket */ + if (libtrace_message_queue_try_get(&(col->in_main), (void *)&msg) != + LIBTRACE_MQ_FAILED) { + + if (msg.type == MED_COLL_MESSAGE_HALT) { + /* Parent thread has told us to exit asap */ + is_halted = 1; + continue; + } + + if (msg.type == MED_COLL_MESSAGE_RELOAD) { + /* Parent thread has reloaded the shared configuration, so + * we need to update our local copies of these values. + */ + lock_med_collector_config(col->parentconfig); + + /* Stop using RMQ if it has been disabled */ + if (col->parentconfig->rmqconf->enabled == 0 && + col->rmqenabled == 1) { + if (col->rmq_colev) { + remove_mediator_fdevent(col->rmq_colev); + col->rmq_colev = NULL; + } + } + + /* TODO handle change in mediator ID ? */ + + if (strcmp(col->internalpass, + col->parentconfig->rmqconf->internalpass) != 0) { + + if (col->internalpass) { + free(col->internalpass); + } + if (col->parentconfig->rmqconf->internalpass) { + col->internalpass = + strdup(col->parentconfig->rmqconf->internalpass); + } + /* Need to reconnect to RMQ */ + remove_mediator_fdevent(col->rmq_colev); + col->rmq_colev = NULL; + } + + /* If our FD socket has changed TLS status, we should + * disconnect the current session and reconnect using + * the new TLS status. + */ + if (col->using_tls != col->parentconfig->usingtls) { + if (col->colev) { + remove_mediator_fdevent(col->colev); + col->colev = NULL; + } + } + + /* re-save rmqconf->heartbeat */ + col->rmq_hb_freq = col->parentconfig->rmqconf->heartbeatFreq; + col->rmqenabled = col->parentconfig->rmqconf->enabled; + + unlock_med_collector_config(col->parentconfig); + + } + + if (msg.type == MED_COLL_MESSAGE_DISCONNECT) { + /* A configuration change means that we need to disconnect + * from the collector. + */ + if (col->colev) { + remove_mediator_fdevent(col->colev); + col->colev = NULL; + } + if (col->rmq_colev) { + remove_mediator_fdevent(col->rmq_colev); + col->rmq_colev = NULL; + } + /* Disable logging until the collector starts working + * properly again to avoid spamming connection failure + * messages if the collector is down for a long time. + */ + col->was_dropped = 1; + col->disabled_log = 1; + } + + if (msg.type == MED_COLL_MESSAGE_RECONNECT) { + /* A collector has reconnected, so we need to shift our + * epoll events to the new socket. + */ + if (col->colev) { + remove_mediator_fdevent(col->colev); + col->colev = NULL; + } + if (col->rmq_colev) { + remove_mediator_fdevent(col->rmq_colev); + col->rmq_colev = NULL; + } + col->col_fd = (int)msg.arg; + col->was_dropped = 0; + } + + } + + if (col->was_dropped) { + continue; + } + + /* Prepare our local RMQ state for emitting records for the LEA + * threads to consume. + */ + if (col->amqp_producer_state == NULL) { + if (join_mediator_RMQ_as_producer(col) == NULL) { + col->disabled_log = 1; + continue; + } + } + + /* If we don't have epoll events for this collector AND the + * collector is actually connected to us, then make some + * epoll events for both the plain socket and RMQ (if enabled). + */ + if (col->colev == NULL && col->col_fd != -1) { + col->colev = prepare_collector_receive_fd(col, epoll_fd); + } + + if (col->colev && col->colev->fdtype == MED_EPOLL_COLLECTOR && + col->rmqenabled && col->rmq_colev == NULL) { + col->rmq_colev = prepare_collector_receive_rmq(col, epoll_fd); + } + + /* Start our timer to break out and check for control messages once + * per second. + */ + if (start_mediator_timer(timerev, 1) < 0) { + logger(LOG_INFO, "OpenLI Mediator: failed to add timer to epoll in collector thread for %s", col->ipaddr); + break; + } + + timerexpired = 0; + while (!timerexpired && !is_halted) { + /* See if there is any activity on any of our timers or fds */ + nfds = epoll_wait(epoll_fd, evs, 64, -1); + if (nfds < 0) { + if (errno == EINTR) { + continue; + } + logger(LOG_INFO, "OpenLI Mediator: error while waiting for epoll events in collector thread for %s: %s", col->ipaddr, strerror(errno)); + is_halted = true; + continue; + } + + for (i = 0; i < nfds; i++) { + timerexpired = collector_thread_epoll_event(col, &(evs[i])); + if (timerexpired == -1) { + /* We're in an error state -- disable this thread for now */ + if (col->colev) { + remove_mediator_fdevent(col->colev); + col->colev = NULL; + } + if (col->rmq_colev) { + remove_mediator_fdevent(col->rmq_colev); + col->rmq_colev = NULL; + } + if (col->disabled_log == 0) { + logger(LOG_INFO, "OpenLI Mediator: collector thread for %s is now inactive", col->ipaddr); + } + col->was_dropped = 1; + col->disabled_log = 1; + break; + } + } + } + /* If we get here, the message timer expired -- loop around and + * check for new messages. + */ + halt_mediator_timer(timerev); + } + +threadexit: + + destroy_mediator_timer(queuecheck); + destroy_mediator_timer(timerev); + cleanup_collector_thread(col); + + close(epoll_fd); + pthread_exit(NULL); + +} + +/** Accepts a connection from a collector and spawns a new collector + * receive thread for that collector. + * + * @param medcol The shared config for all collector receive threads + * @param listenfd The listening file descriptor that the connection + * arrived on + * + * @return -1 if an error occurs, otherwise returns the file descriptor + * for the newly accepted connection. + */ +int mediator_accept_collector_connection(mediator_collector_t *medcol, + int listenfd) { + int newfd = -1; + struct sockaddr_storage saddr; + socklen_t socklen = sizeof(saddr); + char strbuf[INET6_ADDRSTRLEN]; + coll_recv_t *newcol = NULL; + mediator_collector_config_t *config = &(medcol->config); + + /* Standard socket connection accept code... */ + newfd = accept(listenfd, (struct sockaddr *)&saddr, &socklen); + fd_set_nonblock(newfd); + + if (getnameinfo((struct sockaddr *)&saddr, socklen, strbuf, sizeof(strbuf), + 0, 0, NI_NUMERICHOST) != 0) { + logger(LOG_INFO, "OpenLI Mediator: getnameinfo error in mediator: %s.", + strerror(errno)); + } + + if (newfd < 0) { + return newfd; + } + + HASH_FIND(hh, medcol->threads, strbuf, strlen(strbuf), newcol); + + if (newcol == NULL) { + /* Never seen a connection from this collector before, so spawn + * a new receive thread for it. + */ + newcol = (coll_recv_t *)calloc(1, sizeof(coll_recv_t)); + newcol->parentconfig = config; + + newcol->ipaddr = strdup(strbuf); + newcol->iplen = strlen(strbuf); + newcol->col_fd = newfd; + + HASH_ADD_KEYPTR(hh, medcol->threads, newcol->ipaddr, newcol->iplen, + newcol); + + libtrace_message_queue_init(&(newcol->in_main), + sizeof(col_thread_msg_t)); + pthread_create(&(newcol->tid), NULL, start_collector_thread, newcol); + } else { + /* We've already got a thread for this collector (?), so swap over to + * using the new file descriptor as the old one is probably dead + */ + col_thread_msg_t reconn_msg; + reconn_msg.type = MED_COLL_MESSAGE_RECONNECT; + reconn_msg.arg = newfd; + libtrace_message_queue_put(&(newcol->in_main), &reconn_msg); + } + + return newfd; +} + +/** Halts all collector receive threads and waits for the threads to + * terminate. + * + * @param medcol The shared state for all collector receive threads + */ +void mediator_disconnect_all_collectors(mediator_collector_t *medcol) { + + coll_recv_t *col, *tmp; + + /* Send a halt message to all known threads, then use pthread_join() to + * block until each thread exits. + */ + HASH_ITER(hh, medcol->threads, col, tmp) { + col_thread_msg_t end_msg; + end_msg.type = MED_COLL_MESSAGE_HALT; + end_msg.arg = 0; + libtrace_message_queue_put(&(col->in_main), &end_msg); + + pthread_join(col->tid, NULL); + libtrace_message_queue_destroy(&(col->in_main)); + HASH_DELETE(hh, medcol->threads, col); + free(col); + } +} + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/coll_recv_thread.h b/src/mediator/coll_recv_thread.h new file mode 100644 index 00000000..b371e78a --- /dev/null +++ b/src/mediator/coll_recv_thread.h @@ -0,0 +1,297 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#ifndef OPENLI_MEDIATOR_COLL_RECV_THREAD_H +#define OPENLI_MEDIATOR_COLL_RECV_THREAD_H + +#include +#include +#include "netcomms.h" +#include "openli_tls.h" +#include "med_epoll.h" + +/** This file defines public types and methods for interactive with a + * "collector receive" thread for the OpenLI mediator. + * Each OpenLI collector that reports to a mediator will be handled using + * a separate instance of one of these threads. + * + * The core functionality of a collector receive thread is to: + * - receive LI records from the collector via either a RMQ queue hosted + * on the collector OR a TCP socket + * - insert each received record into the appropriate internal RMQ queue, + * named after the LIID that the record was intercepted for and the + * record type (e.g. IRI or CC). + * + */ + + +/** Types of messages that can be sent from the main mediator thread to a + * collector receive thread. + */ +enum { + /** Collector has reconnected on a different file descriptor */ + MED_COLL_MESSAGE_RECONNECT, + + /** Collector has disconnected, but thread needs to remain active + * (message unused in current implementation). + */ + MED_COLL_MESSAGE_DISCONNECT, + + /** Mediator is exiting, please terminate the collector thread. */ + MED_COLL_MESSAGE_HALT, + + /** Global shared configuration has changed, update local copy of this + * config. + */ + MED_COLL_MESSAGE_RELOAD +}; + + +/** Structure defining a message that may be sent from the main mediator + * thread to a collector receive thread. + */ +typedef struct col_thread_msg { + /** The message type -- see enum above for possible values */ + int type; + + /** A message argument -- the type and meaning will depend on the message + * type. + */ + uint64_t arg; +} col_thread_msg_t; + +/** Structure for keeping track of the LIIDs that a collector receive thread + * has seen + */ +typedef struct col_known_liid { + /** The LIID itself, as a string */ + char *liid; + + /** The length of the LIID string */ + int liidlen; + + /** Timestamp when this LIID was last seen */ + uint64_t lastseen; + + UT_hash_handle hh; +} col_known_liid_t; + +/** Collector thread configuration that is shared across all collector + * receive threads. + */ +typedef struct mediator_collector_config { + /** The SSL configuration for this mediator instance. */ + openli_ssl_config_t *sslconf; + + /** The RabbitMQ configuration for this mediator instance */ + openli_RMQ_config_t *rmqconf; + + /** The ID of the mediator instance */ + uint32_t parent_mediatorid; + + /** A mutex to protect against race conditions when reading this config */ + pthread_mutex_t mutex; + + /** Boolean flag indicating whether collector connections are using TLS */ + uint8_t usingtls; +} mediator_collector_config_t; + + +/** State associated with a single collector connection */ +typedef struct single_coll_receiver { + + /** ID of the thread that this connection is running in */ + pthread_t tid; + + /** The IP address that the collector has connected from */ + char *ipaddr; + + /** The length of the IP address string */ + int iplen; + + /** The file descriptor for the connection with the collector */ + int col_fd; + + /** The most recent SSL connection error, if there has been one */ + int lastsslerror; + + /** Flag indicating if the collector thread should be inactive */ + int was_dropped; + + /** The frequency at which the thread needs to attempt a read from any + * RMQ consumers to allow heartbeats to be handled. + */ + int rmq_hb_freq; + + /** Flag indicating whether the collector receive thread should be + * consuming from RMQ (as opposed to reading from a TCP socket) + */ + int rmqenabled; + + /** Password to access the local RMQ instance where received records + * are published to so that LEA threads can read them when ready/ + */ + char *internalpass; + + /** Flag indicating whether the TCP socket to the collector should be + * encrypted with TLS or not + */ + uint8_t using_tls; + + /** A mediator epoll event for reading from a TCP connection to the + * collector. + */ + med_epoll_ev_t *colev; + + /** A mediator epoll event for reading from RabbitMQ */ + med_epoll_ev_t *rmq_colev; + + /** The AMQP connection state for the connection to the collector RMQ */ + amqp_connection_state_t amqp_state; + + /** The AMQP connection state for the connection to our local RMQ */ + amqp_connection_state_t amqp_producer_state; + + /** The socket for sending records onto the local RMQ instance */ + amqp_socket_t *amqp_producer_sock; + + /** The buffer used to store ETSI records received from the collector via + * a network connection */ + net_buffer_t *incoming; + + /** The buffer used to store ETSI records received from the collector via + * RabbitMQ */ + net_buffer_t *incoming_rmq; + + /** A flag indicating whether error logging is disabled for this + * collector. + */ + int disabled_log; + + /** The SSL socket for this collector connection, if not using RMQ */ + SSL *ssl; + + /** The set of LIIDs that we have seen */ + col_known_liid_t *known_liids; + + /** A pointer to the shared global config for collector receive threads + * (owned by the main mediator thread) + */ + mediator_collector_config_t *parentconfig; + + /** The message queue on which this thread will receive instructions + * from the main mediator thread. + */ + libtrace_message_queue_t in_main; + + UT_hash_handle hh; + +} coll_recv_t; + +/** Structure that tracks the set of existing collector receive threads + * and their shared configuration. + */ +typedef struct mediator_collectors { + /** A hashmap containing the set of collector receive threads */ + coll_recv_t *threads; + + /** Shared configuration for all collector receive threads */ + mediator_collector_config_t config; + +} mediator_collector_t; + +/** Initialises the shared configuration for the collectors managed by a + * mediator. + * + * @param medcol The global state for the collectors that is to be + * initialised. + * @param usetls The value of the global flag that indicates whether + * new collector connections must use TLS. + * @param sslconf A pointer to the SSL configuration for this mediator. + * @param rmqconf A pointer to the RabbitMQ configuration for this + * mediator. + * @param mediatorid The ID number of the mediator + */ +void init_med_collector_config(mediator_collector_config_t *config, + uint8_t usetls, openli_ssl_config_t *sslconf, + openli_RMQ_config_t *rmqconf, uint32_t mediatorid); + +/** Locks the shared collector configuration for exclusive use. + * + * + * @param config The global config to be locked + */ +void lock_med_collector_config(mediator_collector_config_t *config); + +/** Unlocks the shared collector configuration from exclusive use. + * + * + * @param config The global config to be unlocked + */ +void unlock_med_collector_config(mediator_collector_config_t *config); + +/** Updates the shared configuration for the collectors managed by a + * mediator. + * + * @param config The global config for the collectors that is to be + * updated. + * @param usetls The value of the global flag that indicates whether + * new collector connections must use TLS. + * @param mediatorid The ID number of the mediator + */ +void update_med_collector_config(mediator_collector_config_t *config, + uint8_t usetls, uint32_t mediatorid); + +/** Frees any resources allocated to the shared collector configuration. + * + * + * @param config The global config to be destroyed + */ +void destroy_med_collector_config(mediator_collector_config_t *config); + +/** Accepts a connection from a collector and spawns a new collector + * receive thread for that collector. + * + * @param medcol The shared config for all collector receive threads + * @param listenfd The listening file descriptor that the connection + * arrived on + * + * @return -1 if an error occurs, otherwise returns the file descriptor + * for the newly accepted connection. + */ +int mediator_accept_collector_connection(mediator_collector_t *medcol, + int listenfd); + +/** Halts all collector receive threads and waits for the threads to + * terminate. + * + * @param medcol The shared state for all collector receive threads + */ +void mediator_disconnect_all_collectors(mediator_collector_t *medcol); + +#endif + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/handover.c b/src/mediator/handover.c index 8ddadff8..efb68e3a 100644 --- a/src/mediator/handover.c +++ b/src/mediator/handover.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -33,18 +33,16 @@ #include "etsili_core.h" #include "handover.h" #include "med_epoll.h" +#include "config.h" +#include "mediator_rmq.h" -/** Send some buffered ETSI records out via a handover. +/** Sends any pending keep-alive message out via a handover. * - * If there is a keep alive message pending for this handover, that will - * be sent before sending any buffered records. - * - * @param mev The epoll event for the handover + * @param ho The handover to send the keep-alive over * * @return -1 is an error occurs, 0 otherwise. */ -int xmit_handover(med_epoll_ev_t *mev) { - handover_t *ho = (handover_t *)(mev->state); +int xmit_handover_keepalive(handover_t *ho) { /* We don't lock the handover mutex here, because we're going to be * doing this a lot and the mutex is mostly protecting logging-related @@ -53,93 +51,86 @@ int xmit_handover(med_epoll_ev_t *mev) { * everytime we want to send a record to a client. */ int ret = 0; - struct timeval tv; - if (ho->ho_state->pending_ka) { - /* There's a keep alive to be sent */ - ret = send(mev->fd, ho->ho_state->pending_ka->encoded, - ho->ho_state->pending_ka->len, MSG_DONTWAIT); - if (ret < 0) { - /* XXX should be worry about EAGAIN here? */ + if (!ho->ho_state->pending_ka) { + return 0; + } + + /* There's a keep alive to be sent */ + ret = send(ho->outev->fd, ho->ho_state->pending_ka->encoded, + ho->ho_state->pending_ka->len, MSG_DONTWAIT); + if (ret < 0) { + /* XXX should be worry about EAGAIN here? */ + + if (ho->disconnect_msg == 0) { + logger(LOG_INFO, + "OpenLI Mediator: error while transmitting keepalive for handover %s:%s HI%d -- %s", + ho->ipstr, ho->portstr, ho->handover_type, + strerror(errno)); + } + return -1; + } + if (ret == 0) { + return -1; + } + if (ret == ho->ho_state->pending_ka->len) { + /* Sent the whole thing successfully */ + wandder_release_encoded_result(NULL, ho->ho_state->pending_ka); + ho->ho_state->pending_ka = NULL; + + /* + logger(LOG_INFO, "successfully sent keep alive to %s:%s HI%d", + ho->ipstr, ho->portstr, ho->handover_type); + */ + halt_mediator_timer(ho->aliverespev); + /* Start the timer for the response */ + if (start_mediator_timer(ho->aliverespev, + ho->ho_state->kawait) == -1) { if (ho->disconnect_msg == 0) { logger(LOG_INFO, - "OpenLI Mediator: error while transmitting keepalive for handover %s:%s HI%d -- %s", - ho->ipstr, ho->portstr, ho->handover_type, + "OpenLI Mediator: unable to start keepalive response timer: %s", strerror(errno)); } return -1; } - if (ret == 0) { - return -1; - } - if (ret == ho->ho_state->pending_ka->len) { - /* Sent the whole thing successfully */ - wandder_release_encoded_result(NULL, ho->ho_state->pending_ka); - ho->ho_state->pending_ka = NULL; - - /* - logger(LOG_INFO, "successfully sent keep alive to %s:%s HI%d", - ho->ipstr, ho->portstr, ho->handover_type); - */ - /* Start the timer for the response */ - if (start_mediator_timer(ho->aliverespev, - ho->ho_state->kawait) == -1) { - if (ho->disconnect_msg == 0) { - logger(LOG_INFO, - "OpenLI Mediator: unable to start keepalive response timer: %s", - strerror(errno)); - } - return -1; - } - if (ho->aliverespev == NULL && ho->disconnect_msg == 1) { - /* Not expecting a response, so we have to assume that - * the connection is good again as soon as we successfully - * send a KA */ - ho->disconnect_msg = 0; - logger(LOG_INFO, - "OpenLI Mediator: reconnected to handover %s:%s HI%d successfully.", - ho->ipstr, ho->portstr, ho->handover_type); - } - - /* If there are no actual records waiting to be sent, then - * we can disable write on this handover and go back to the - * epoll loop. - */ - if (get_buffered_amount(&(ho->ho_state->buf)) == 0) { - if (disable_handover_writing(ho) < 0) - { - return -1; - } - } - - } else { - /* Partial send -- try the rest next time */ - memmove(ho->ho_state->pending_ka->encoded, - ho->ho_state->pending_ka->encoded + ret, - ho->ho_state->pending_ka->len - ret); - ho->ho_state->pending_ka->len -= ret; + if (ho->aliverespev == NULL && ho->disconnect_msg == 1) { + /* Not expecting a response, so we have to assume that + * the connection is good again as soon as we successfully + * send a KA */ + ho->disconnect_msg = 0; + logger(LOG_INFO, + "OpenLI Mediator: reconnected to handover %s:%s HI%d successfully.", + ho->ipstr, ho->portstr, ho->handover_type); } - return 0; + } else { + /* Partial send -- try the rest next time */ + memmove(ho->ho_state->pending_ka->encoded, + ho->ho_state->pending_ka->encoded + ret, + ho->ho_state->pending_ka->len - ret); + ho->ho_state->pending_ka->len -= ret; } + return 0; +} - /* As long as we have an unanswered keep alive, hold off on sending - * any buffered records -- the recipient may be unavailable and we'd - * be better off to keep those records in our buffer until we're - * confident that they're able to receive them. - */ - if (ho->aliverespev && ho->aliverespev->fd != -1) { - return 0; - } +/** Sends a buffer of ETSI records out via a handover. + * + * @param ho The handover to send the records over + * @param maxsend The maximum amount of data to send (in bytes) + * + * @return -1 is an error occurs, 0 otherwise. + */ +int xmit_handover_records(handover_t *ho, uint32_t maxsend) { + int ret; + struct timeval tv; - /* Send some of our buffered records, but no more than 1MB at - * a time -- we need to go back to our epoll loop to handle other events - * rather than getting stuck trying to send massive amounts of data in - * one go. + /* Send some of our buffered records -- we need to go back to our epoll + * loop to handle other events rather than getting stuck trying to send + * massive amounts of data in one go. */ - if ((ret = transmit_buffered_records(&(ho->ho_state->buf), mev->fd, - (1024 * 1024), NULL)) == -1) { + if ((ret = transmit_buffered_records(&(ho->ho_state->buf), ho->outev->fd, + maxsend, NULL)) == -1) { return -1; } @@ -147,42 +138,145 @@ int xmit_handover(med_epoll_ev_t *mev) { return 0; } - /* If we've sent everything that we've got, we can disable the epoll - * write event for this handover. - */ - if (get_buffered_amount(&(ho->ho_state->buf)) == 0) { - if (disable_handover_writing(ho) < 0) { - return -1; - } - } - /* Reset the keep alive timer */ gettimeofday(&tv, NULL); - if (ho->aliveev && ho->ho_state->katimer_setsec < tv.tv_sec) { + if (ho->aliveev && ho->ho_state->kafreq != 0 && + ho->ho_state->katimer_setsec < tv.tv_sec) { halt_mediator_timer(ho->aliveev); if (start_mediator_timer(ho->aliveev, ho->ho_state->kafreq) == -1) { if (ho->disconnect_msg == 0) { logger(LOG_INFO, "OpenLI Mediator: error while trying to disable xmit for handover %s:%s HI%d -- %s", - ho->ipstr, ho->portstr, ho->handover_type, strerror(errno)); + ho->ipstr, ho->portstr, ho->handover_type, + strerror(errno)); } return -1; } ho->ho_state->katimer_setsec = tv.tv_sec; } - if (ho->aliveev == NULL && ho->disconnect_msg == 1) { - /* Keep alives are disabled, so we are going to use a successful - * transmit as an indicator that the connection is stable again - * and we can stop suppressing logs */ - logger(LOG_INFO, - "OpenLI Mediator: reconnected to handover %s:%s HI%d successfully.", + return 0; +} + +/** Restarts the keep alive timer for a handover + * + * @param ho The handover to restart the keep alive timer for + * + * @return -1 if an error occurs, 0 otherwise + */ +static int restart_handover_keepalive(handover_t *ho) { + + int ret = 0; + pthread_mutex_lock(&(ho->ho_state->ho_mutex)); + + halt_mediator_timer(ho->aliveev); + if (start_mediator_timer(ho->aliveev, ho->ho_state->kafreq) == -1) { + if (ho->disconnect_msg == 0) { + logger(LOG_INFO, + "OpenLI Mediator: unable to reset keepalive timer for %s:%s HI%d :s", + ho->ipstr, ho->portstr, ho->handover_type, strerror(errno)); + } + ret = -1; + } + + pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); + return ret; +} + +/** React to a handover's failure to respond to a keep alive before the + * response timer expired. + * + * @param ho The handover that failed to reply to a KA message + * + */ +void trigger_handover_ka_failure(handover_t *ho) { + + if (ho->disconnect_msg == 0) { + logger(LOG_INFO, "OpenLI Mediator: failed to receive KA response from LEA on handover %s:%s HI%d, dropping connection.", ho->ipstr, ho->portstr, ho->handover_type); + } + + halt_mediator_timer(ho->aliverespev); + disconnect_handover(ho); +} + +/** Creates and sends a keep-alive message over a handover + * + * @param ho The handover that needs to send a keep alive + * @param mediator_id The ID of this mediator (to be included in the KA msg) + * @param operator_id The operator ID string (to be included in the KA msg) + * + * @return -1 if an error occurs, 0 otherwise + */ +int trigger_handover_keepalive(handover_t *ho, uint32_t mediator_id, + char *operator_id) { - ho->disconnect_msg = 0; + wandder_encoded_result_t *kamsg; + wandder_etsipshdr_data_t hdrdata; + char elemstring[16]; + char liidstring[24]; + + if (ho->outev == NULL) { + return 0; } - return 0; + if (ho->ho_state->pending_ka == NULL && + ho->aliverespev->fd == -1 && + get_buffered_amount(&(ho->ho_state->buf)) == 0) { + /* Only create a new KA message if we have sent the last one we + * had queued up. + * Also only create one if we don't already have data to send. We + * should only be sending keep alives if the socket is idle. + */ + if (ho->ho_state->encoder == NULL) { + ho->ho_state->encoder = init_wandder_encoder(); + } else { + reset_wandder_encoder(ho->ho_state->encoder); + } + + /* Include the OpenLI version in the LIID field, so the LEAs can + * identify which version of the software is being used by the + * sender. + */ + /* PACKAGE_NAME and PACKAGE_VERSION come from config.h */ + snprintf(liidstring, 24, "%s-%s", PACKAGE_NAME, PACKAGE_VERSION); + hdrdata.liid = liidstring; + hdrdata.liid_len = strlen(hdrdata.liid); + + hdrdata.authcc = "NA"; + hdrdata.authcc_len = strlen(hdrdata.authcc); + hdrdata.delivcc = "NA"; + hdrdata.delivcc_len = strlen(hdrdata.delivcc); + + if (operator_id) { + hdrdata.operatorid = operator_id; + } else { + hdrdata.operatorid = "unspecified"; + } + hdrdata.operatorid_len = strlen(hdrdata.operatorid); + + /* Stupid 16 character limit... */ + snprintf(elemstring, 16, "med-%u", mediator_id); + hdrdata.networkelemid = elemstring; + hdrdata.networkelemid_len = strlen(hdrdata.networkelemid); + + hdrdata.intpointid = NULL; + hdrdata.intpointid_len = 0; + + kamsg = encode_etsi_keepalive(ho->ho_state->encoder, &hdrdata, + ho->ho_state->lastkaseq + 1); + if (kamsg == NULL) { + logger(LOG_INFO, + "OpenLI Mediator: failed to construct a keep-alive."); + return -1; + } + + ho->ho_state->pending_ka = kamsg; + ho->ho_state->lastkaseq += 1; + } + + /* Reset the keep alive timer */ + return restart_handover_keepalive(ho); } /** Disconnects a single mediator handover connection to an LEA. @@ -252,6 +346,9 @@ void disconnect_handover(handover_t *ho) { */ reset_export_buffer(&(ho->ho_state->buf)); + /* Drop the RMQ connection */ + reset_handover_rmq(ho); + /* This handover is officially disconnected, so no more logging for it * until / unless it reconnects. */ @@ -263,7 +360,7 @@ void disconnect_handover(handover_t *ho) { * * @param ho The handover object that is being destroyed */ -static void free_handover(handover_t *ho) { +void free_handover(handover_t *ho) { /* This should close all of our sockets and halt any running timers */ disconnect_handover(ho); @@ -271,6 +368,10 @@ static void free_handover(handover_t *ho) { destroy_mediator_timer(ho->aliveev); destroy_mediator_timer(ho->aliverespev); + if (ho->rmq_consumer) { + amqp_destroy_connection(ho->rmq_consumer); + } + if (ho->ho_state) { release_export_buffer(&(ho->ho_state->buf)); pthread_mutex_destroy(&(ho->ho_state->ho_mutex)); @@ -286,157 +387,183 @@ static void free_handover(handover_t *ho) { free(ho); } -/** Modify a handover's epoll event to NOT check if writing is possible. +/** Destroys the state for a particular agency entity, including its + * corresponding handovers * - * If an error occurs, the handover will be disconnected. + * @param ag The agency to be destroyed. + */ +void destroy_agency(mediator_agency_t *ag) { + /* Disconnect the HI2 and HI3 handovers */ + if (ag == NULL) { + return; + } + if (ag->hi2) { + free_handover(ag->hi2); + } + if (ag->hi3) { + free_handover(ag->hi3); + } + if (ag->agencyid) { + free(ag->agencyid); + } +} + +/** Registers a single RMQ queue for an LIID with the RMQ consumer for a + * handover. * - * @param ho The handover to modify + * If the handover is HI2, the IRI queue is registered. + * If the handover is HI3, the CC queue is registered. * - * @return -1 if an error occurs, 0 otherwise. + * Used as a callback for foreach_liid_agency_mapping() to register all + * LIIDs in a known LIID set. + * + * @param m The LIID to be registered with the handover's RMQ consumer + * @param arg The handover to register with + * + * @return 0 always */ -int disable_handover_writing(handover_t *ho) { - int ret = 0; - uint32_t events = EPOLLRDHUP | EPOLLIN ; +static int register_known_liid_consumers(liid_map_entry_t *m, void *arg) { + handover_t *ho = (handover_t *)arg; + int r; + const char *histr = "??"; + uint8_t *delflag = NULL; - if (!ho->ho_state->outenabled) { + if (ho->handover_type == HANDOVER_HI2) { + r = register_mediator_iri_RMQ_consumer(ho->rmq_consumer, m->liid); + histr = "IRI"; + delflag = &(m->iriqueue_deleted); + } else if (ho->handover_type == HANDOVER_HI3) { + r = register_mediator_cc_RMQ_consumer(ho->rmq_consumer, m->liid); + histr = "CC"; + delflag = &(m->ccqueue_deleted); + } else { return 0; } - ret = modify_mediator_fdevent(ho->outev, events); - if (ret == -1) { - logger(LOG_INFO, - "OpenLI Mediator: error while trying to enable xmit for handover %s:%s HI%d -- %s", - ho->ipstr, ho->portstr, ho->handover_type, strerror(errno)); - disconnect_handover(ho); - } else { - ho->ho_state->outenabled = 0; - } + if (r == -1) { + logger(LOG_INFO, "OpenLI Mediator: failed to declare consumer %s queue for LIID %s", histr, m->liid); + } else if (r == -2) { + logger(LOG_INFO, "OpenLI Mediator: failed to subscribe to consumer %s queue for LIID %s", histr, m->liid); + *delflag = 0; + } else { + *delflag = 0; + } - return ret; + return 0; } -/** Modify a handover's epoll event to check if writing is possible. - * - * If an error occurs, the handover will be disconnected. +/** Creates an RMQ connection for consumption and registers it with + * the IRI or CC queues for each LIID that is to be exported via this + * handover. * - * @param ho The handover to modify + * @param ho The handover to be registered with RMQ + * @param liidmap The set of known LIIDs associated with this handover + * @param agencyid The name of the agency that this handover belongs to + * @param password The password to use to authenticate with RMQ * - * @return -1 if an error occurs, 0 otherwise. + * @return -1 if an error occurs during registration, 1 if all LIIDs + * are successfully registered. */ -int enable_handover_writing(handover_t *ho) { - int ret = 0; - uint32_t events = EPOLLRDHUP | EPOLLIN | EPOLLOUT; +int register_handover_RMQ_all(handover_t *ho, liid_map_t *liidmap, + char *agencyid, char *password) { - if (ho->ho_state->outenabled) { - return 0; + /* Attach to RMQ if required */ + if (ho->rmq_consumer == NULL) { + ho->rmq_consumer = join_mediator_RMQ_as_consumer(agencyid, + ho->amqp_log_failure, password); + + if (ho->rmq_consumer == NULL) { + ho->amqp_log_failure = 0; + } else { + ho->amqp_log_failure = 1; + } } - ret = modify_mediator_fdevent(ho->outev, events); + /* If we've just attached to RMQ, register our interest in any existing + * LIIDs that we know about. + */ + if (ho->rmq_registered == 0) { - if (ret == -1) { - logger(LOG_INFO, - "OpenLI Mediator: error while trying to enable xmit for handover %s:%s HI%d -- %s", - ho->ipstr, ho->portstr, ho->handover_type, strerror(errno)); - disconnect_handover(ho); - } else { - ho->ho_state->outenabled = 1; - } - return ret; -} + if (liidmap && foreach_liid_agency_mapping(liidmap, (void *)ho, + register_known_liid_consumers) < 0) { + if (ho->amqp_log_failure) { + logger(LOG_INFO, "OpenLI Mediator: unable to register consumer queues for HI%d for agency %s", ho->handover_type, agencyid); -/** Disconnects and drops all known agencies - * - * @param state The global handover state for this mediator. - */ -void drop_all_agencies(handover_state_t *state) { - mediator_agency_t ag; - libtrace_list_t *a = state->agencies; - - pthread_mutex_lock(state->agency_mutex); - - while (libtrace_list_get_size(a) > 0) { - libtrace_list_pop_back(a, &ag); - /* Disconnect the HI2 and HI3 handovers */ - free_handover(ag.hi2); - free_handover(ag.hi3); - if (ag.agencyid) { - free(ag.agencyid); + ho->amqp_log_failure = 0; + } + reset_handover_rmq(ho); + return -1; } + ho->amqp_log_failure = 1; + ho->rmq_registered = 1; } - pthread_mutex_unlock(state->agency_mutex); + return 1; } -/** Attempt to create a handover connection to an LEA. +/** Establish an agency handover connection + * + * The resulting socket will be added to the provided epoll event set as + * available for reading and writing. * - * @param state The global handover state for this mediator - * @param ho The handover that we attempting to connect + * This method also starts the keepalive timer for the handover, if + * keepalives are required. * - * @return -1 if there was a fatal error, 0 if there was a temporary - * error (i.e. try again later) or the handover is already - * connected, 1 if a new successful connection is made. + * @param ho The handover object that is to be connected + * @param epoll_fd The epoll fd to add handover events to + * @param ho_id The unique ID number for this handover + * @param agency_id The name of the agency this handover is connecting to + * + * @return -1 if the connection fails, 0 otherwise. */ -static int connect_handover(handover_state_t *state, handover_t *ho) { +int connect_mediator_handover(handover_t *ho, int epoll_fd, uint32_t ho_id, + char *agencyid) { + uint32_t epollev; int outsock; - /* Grab the lock, just in case the other thread decides to disconnect - * us while we're partway through connecting. - */ - pthread_mutex_lock(&(ho->ho_state->ho_mutex)); - /* Check if we're already connected? */ if (ho->outev) { - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); return 0; } /* Connect the handover socket */ outsock = connect_socket(ho->ipstr, ho->portstr, ho->disconnect_msg, 1); if (outsock == -1) { - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); return -1; } /* If fd is 0, we can try again another time instead */ if (outsock == 0) { ho->disconnect_msg = 1; - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); return 0; } /* Create a buffer for receiving messages (i.e. keep-alive responses) * from the LEA via the handover. */ - ho->ho_state->incoming = (libtrace_scb_t *)malloc(sizeof(libtrace_scb_t)); - ho->ho_state->incoming->fd = -1; - ho->ho_state->incoming->address = NULL; - libtrace_scb_init(ho->ho_state->incoming, (64 * 1024 * 1024), - state->next_handover_id); - - state->next_handover_id ++; - - /* If we've got records to send via this handover, enable it for - * write events in epoll. - */ - if (get_buffered_amount(&(ho->ho_state->buf)) > 0) { - epollev = EPOLLIN | EPOLLOUT | EPOLLRDHUP; - ho->ho_state->outenabled = 1; - } else { - epollev = EPOLLIN | EPOLLRDHUP; - ho->ho_state->outenabled = 0; + if (!ho->ho_state->incoming) { + ho->ho_state->incoming = + (libtrace_scb_t *)malloc(sizeof(libtrace_scb_t)); + ho->ho_state->incoming->fd = -1; + ho->ho_state->incoming->address = NULL; + libtrace_scb_init(ho->ho_state->incoming, (64 * 1024 * 1024), ho_id); } - ho->outev = create_mediator_fdevent(state->epoll_fd, ho, MED_EPOLL_LEA, + /* Enable both epoll reading and writing for this handover */ + epollev = EPOLLIN | EPOLLOUT | EPOLLRDHUP; + + ho->outev = create_mediator_fdevent(epoll_fd, ho, MED_EPOLL_LEA, outsock, epollev); if (ho->outev == NULL) { - logger(LOG_INFO, + if (ho->disconnect_msg == 0) { + logger(LOG_INFO, "OpenLI Mediator: unable to add agency handover for %s:%s HI%d to epoll.", ho->ipstr, ho->portstr, ho->handover_type, strerror(errno)); + } ho->disconnect_msg = 1; close(outsock); - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); return 0; } @@ -454,104 +581,10 @@ static int connect_handover(handover_state_t *state, handover_t *ho) { } } - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); + /* Don't reset disconnect_msg until we've sent a record successfully */ return 1; } -/** Attempt to connect all handovers for all known agencies - * - * @param state The global handover state for this mediator - */ -void connect_agencies(handover_state_t *state) { - libtrace_list_node_t *n; - mediator_agency_t *ag; - int ret; - - /* This method will be called regularly by the agency connection - * thread to ensure that as many handovers as up and running as - * possible. - */ - - /* Must have agency_mutex at this point! */ - n = state->agencies->head; - while (n) { - ag = (mediator_agency_t *)(n->data); - n = n->next; - - /* Skip any disabled agencies */ - if (ag->disabled) { - if (!ag->disabled_msg) { - logger(LOG_INFO, - "OpenLI Mediator: cannot connect to agency %s because it is disabled", - ag->agencyid); - ag->disabled_msg = 1; - } - continue; - } - - /* Connect the HI2 handover */ - ret = connect_handover(state, ag->hi2); - if (ret == -1) { - continue; - } - - if (ret == 1) { - logger(LOG_INFO, - "OpenLI Mediator: Connected to agency %s on HI2 %s:%s.", - ag->agencyid, ag->hi2->ipstr, ag->hi2->portstr); - ag->hi2->disconnect_msg = 0; - } - - /* Connect the HI3 handover */ - ret = connect_handover(state, ag->hi3); - if (ret == -1) { - ag->disabled = 1; - continue; - } - - if (ret == 1) { - ag->hi3->disconnect_msg = 0; - logger(LOG_INFO, - "OpenLI Mediator: Connected to agency %s on HI3 %s:%s.", - ag->agencyid, ag->hi3->ipstr, ag->hi3->portstr); - } - - } - -} - -/** Starts the thread that continuously attempts to connect any handovers - * that are not currently active. - * - * @param params The global state for the mediator - */ -static void *start_connect_thread(void *params) { - - handover_state_t *state = (handover_state_t *)params; - - while (1) { - - /* We need a mutex lock here because our set of agencies could - * be modified by a message from the provisioner, which will be - * handled in the main epoll thread. - */ - pthread_mutex_lock(state->agency_mutex); - if (state->halt_flag) { - pthread_mutex_unlock(state->agency_mutex); - break; - } - connect_agencies(state); - pthread_mutex_unlock(state->agency_mutex); - - /* Try again in 0.5 of a second */ - usleep(500000); - } - - logger(LOG_INFO, "OpenLI Mediator: has ended agency connection thread."); - pthread_exit(NULL); - -} - /* Creates a new instance of a handover. * @@ -567,7 +600,7 @@ static void *start_connect_thread(void *params) { * * @return a pointer to a new handover instance, or NULL if an error occurs. */ -static handover_t *create_new_handover(int epoll_fd, char *ipstr, char *portstr, +handover_t *create_new_handover(int epoll_fd, char *ipstr, char *portstr, int handover_type, uint32_t kafreq, uint32_t kawait) { handover_t *ho = (handover_t *)malloc(sizeof(handover_t)); @@ -586,7 +619,6 @@ static handover_t *create_new_handover(int epoll_fd, char *ipstr, char *portstr, /* Initialise all of the handover-specific state for this handover */ init_export_buffer(&(ho->ho_state->buf)); - ho->ho_state->outenabled = 0; ho->ho_state->katimer_setsec = 0; ho->ho_state->incoming = NULL; ho->ho_state->encoder = NULL; @@ -594,39 +626,33 @@ static handover_t *create_new_handover(int epoll_fd, char *ipstr, char *portstr, ho->ho_state->pending_ka = NULL; ho->ho_state->kafreq = kafreq; ho->ho_state->kawait = kawait; + ho->ho_state->next_rmq_ack = 0; + ho->ho_state->valid_rmq_ack = 0; + ho->rmq_consumer = NULL; + ho->rmq_registered = 0; + ho->amqp_log_failure = 1; pthread_mutex_init(&(ho->ho_state->ho_mutex), NULL); /* Keep alive frequency of 0 (or less) will mean that no keep alives are * sent (this may necessary for some agencies). */ - if (kafreq > 0) { - ho->aliveev = create_mediator_timer(epoll_fd, ho, MED_EPOLL_KA_TIMER, - 0); - if (ho->aliveev == NULL) { - logger(LOG_INFO, "OpenLI Mediator: unable to create keep alive timer for agency %s:%s", ipstr, portstr); - } - } else { - logger(LOG_INFO, "OpenLI Mediator: Warning, keep alive timer has been disabled for agency %s:%s", ipstr, portstr); - ho->aliveev = NULL; + ho->aliveev = create_mediator_timer(epoll_fd, ho, MED_EPOLL_KA_TIMER, + 0); + if (ho->aliveev == NULL) { + logger(LOG_INFO, "OpenLI Mediator: unable to create keep alive timer for agency %s:%s", ipstr, portstr); } /* If keep alive wait is 0 (or less), then we will not require a response * for a successful keep alive. */ - if (kawait > 0) { - ho->aliverespev = create_mediator_timer(epoll_fd, ho, - MED_EPOLL_KA_RESPONSE_TIMER, 0); - if (ho->aliverespev == NULL) { - logger(LOG_INFO, "OpenLI Mediator: unable to create keep alive response timer for agency %s:%s", ipstr, portstr); - } - } else { - ho->aliverespev = NULL; + ho->aliverespev = create_mediator_timer(epoll_fd, ho, + MED_EPOLL_KA_RESPONSE_TIMER, 0); + if (ho->aliverespev == NULL) { + logger(LOG_INFO, "OpenLI Mediator: unable to create keep alive response timer for agency %s:%s", ipstr, portstr); } - /* The output event will be created when the handover is connected by the - * connection thread. - */ + /* The output event will be created when the handover is connected */ ho->outev = NULL; /* Initialise the remaining handover state */ @@ -638,375 +664,14 @@ static handover_t *create_new_handover(int epoll_fd, char *ipstr, char *portstr, return ho; } -/** Creates a new instance of an agency. - * - * @param state The global handover state for this mediator. - * @param lea The agency details received from the provisioner. - */ -static void create_new_agency(handover_state_t *state, liagency_t *lea) { - - mediator_agency_t newagency; - - newagency.agencyid = lea->agencyid; - newagency.awaitingconfirm = 0; - newagency.disabled = 0; - newagency.disabled_msg = 0; - - /* Create the HI2 and HI3 handovers */ - newagency.hi2 = create_new_handover(state->epoll_fd, - lea->hi2_ipstr, lea->hi2_portstr, - HANDOVER_HI2, lea->keepalivefreq, lea->keepalivewait); - newagency.hi3 = create_new_handover(state->epoll_fd, - lea->hi3_ipstr, lea->hi3_portstr, - HANDOVER_HI3, lea->keepalivefreq, lea->keepalivewait); - - /* This lock protects the agency list that may be being iterated over - * by the handover connection thread */ - pthread_mutex_lock(state->agency_mutex); - libtrace_list_push_back(state->agencies, &newagency); - - /* Start the handover connection thread if necessary */ - if (libtrace_list_get_size(state->agencies) == 1 && - state->connectthread == -1) { - pthread_create(&(state->connectthread), NULL, start_connect_thread, - state); - } - pthread_mutex_unlock(state->agency_mutex); - -} - -/* Compares a handover announced by the provisioner against an existing - * local instance of the handover to see if there are any changes that - * need to made to update the local handover. If so, the changes are - * actioned (including a possible disconnection of the existing handover if - * need be). - * - * Used when a provisioner re-announces an existing agency -- if the IP - * address or port for a handover changes, for instance, we would need to - * close the handover and re-connect to the new IP + port. - * - * @param state The global handover state for the mediator. - * @param ho The existing local handover instance. - * @param ipstr The IP address of the announced handover (as a string). - * @param port The port number of the announced handover (as a string). - * @param existing The existing instance of the parent agency. - * @param newag The new agency details received from the provisioner. - * - * @return -1 if an error occurs, 0 if the handover did not require a reconnect, - * 1 if a reconnect was required. - */ - -static int has_handover_changed(handover_state_t *state, - handover_t *ho, char *ipstr, char *portstr, mediator_agency_t *existing, - liagency_t *newag) { - char *hitypestr; - int changedloc = 0; - int changedkaresp = 0; - int changedkafreq = 0; - - /* TODO this function is a bit awkward at the moment */ - - if (ho == NULL) { - return -1; - } - - pthread_mutex_lock(&(ho->ho_state->ho_mutex)); - - if (!ho->ipstr || !ho->portstr || !ipstr || !portstr) { - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); - return -1; - } - - if (newag->keepalivewait != ho->ho_state->kawait && - (newag->keepalivewait == 0 || ho->ho_state->kawait == 0)) { - changedkaresp = 1; - } - - if (newag->keepalivefreq != ho->ho_state->kafreq && - (newag->keepalivefreq == 0 || ho->ho_state->kafreq == 0)) { - changedkafreq = 1; - } - - if (strcmp(ho->ipstr, ipstr) != 0 || strcmp(ho->portstr, portstr) != 0) { - changedloc = 1; - } - - /* Update keep alive timer frequencies */ - ho->ho_state->kawait = newag->keepalivewait; - ho->ho_state->kafreq = newag->keepalivefreq; - - if (!changedkaresp && !changedloc && !changedkafreq) { - /* Nothing has changed so nothing more needs to be done */ - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); - return 0; - } - - /* Preparing some string bits for logging */ - if (ho->handover_type == HANDOVER_HI2) { - hitypestr = "HI2"; - } else if (ho->handover_type == HANDOVER_HI3) { - hitypestr = "HI3"; - } else { - hitypestr = "Unknown handover"; - } - - if (changedloc) { - /* Re-connect is going to be required */ - logger(LOG_INFO, - "OpenLI Mediator: %s connection info for LEA %s has changed from %s:%s to %s:%s.", - hitypestr, existing->agencyid, ho->ipstr, ho->portstr, ipstr, portstr); - disconnect_handover(ho); - free(ho->ipstr); - free(ho->portstr); - ho->ipstr = ipstr; - ho->portstr = portstr; - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); - return 1; - } - - if (changedkaresp) { - if (newag->keepalivewait == 0) { - /* Keep alive responses are no longer necessary */ - if (ho->handover_type == HANDOVER_HI2) { - /* We only log for HI2 to prevent duplicate logging when the - * same agency-level option is updated for HI3. - */ - logger(LOG_INFO, - "OpenLI Mediator: disabled keep-alive response requirement for LEA %s", - existing->agencyid); - } - /* Stop any existing keep alive response timer to prevent us - * from dropping the handover for our most recent keep alive. - */ - destroy_mediator_timer(ho->aliverespev); - ho->aliverespev = NULL; - } else { - /* Keep alive responses are enabled (or have changed frequency) */ - if (ho->handover_type == HANDOVER_HI2) { - /* We only log for HI2 to prevent duplicate logging when the - * same agency-level option is updated for HI3. - */ - logger(LOG_INFO, - "OpenLI Mediator: enabled keep-alive response requirement for LEA %s", - existing->agencyid); - } - if (ho->aliverespev == NULL) { - ho->aliverespev = create_mediator_timer(state->epoll_fd, - ho, MED_EPOLL_KA_RESPONSE_TIMER, 0); - } - } - } - - if (changedkafreq) { - if (newag->keepalivefreq == 0) { - /* Sending keep alives is now disabled */ - if (ho->handover_type == HANDOVER_HI2) { - /* We only log for HI2 to prevent duplicate logging when the - * same agency-level option is updated for HI3. - */ - logger(LOG_INFO, - "OpenLI Mediator: disabled keep-alives for LEA %s", - existing->agencyid); - } - /* Halt the existing keep alive timer */ - destroy_mediator_timer(ho->aliveev); - ho->aliveev = NULL; - } else { - /* Keep alives have been enabled (or changed frequency) */ - if (ho->handover_type == HANDOVER_HI2) { - /* We only log for HI2 to prevent duplicate logging when the - * same agency-level option is updated for HI3. - */ - logger(LOG_INFO, - "OpenLI Mediator: enabled keep-alives for LEA %s", - existing->agencyid); - } - - /* Start a new keep alive timer with the new frequency */ - if (ho->aliveev == NULL) { - ho->aliveev = create_mediator_timer(state->epoll_fd, ho, - MED_EPOLL_KA_TIMER, 0); - } else { - halt_mediator_timer(ho->aliveev); - } - if (start_mediator_timer(ho->aliveev, newag->keepalivefreq) < 0) { - logger(LOG_INFO, - "OpenLI Mediator: unable to restart keepalive timer for handover %s:%s HI%d.", - ho->ipstr, ho->portstr, ho->handover_type, - strerror(errno)); - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); - return -1; - } - } - } - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); - - return 0; -} - -/** Adds an agency to the known agency list. - * - * If an agency with the same ID already exists, we update its handovers - * to match the details we just received. - * - * If the agency was awaiting confirmation after a lost provisioner - * connection, it will be marked as confirmed. - * - * @param state The global handover state for the mediator - * @param agencyid The agency to add to the list. - * - * @return -1 if an error occurs, 0 otherwise. - */ -int enable_agency(handover_state_t *state, liagency_t *lea) { - - int ret = 0; - libtrace_list_node_t *n; - - /* Add / enable the agency in the agency list -- note that lock to protect - * concurrent access to the list by the handover connection thread. - */ - pthread_mutex_lock(state->agency_mutex); - n = state->agencies->head; - while (n) { - mediator_agency_t *x = (mediator_agency_t *)(n->data); - n = n->next; - - if (strcmp(x->agencyid, lea->agencyid) == 0) { - /* Agency with this ID already exists; check if this - * announcement contains differences to our last knowledge of - * this agency. - */ - if ((ret = has_handover_changed(state, x->hi2, lea->hi2_ipstr, - lea->hi2_portstr, x, lea)) == -1) { - x->disabled = 1; - x->disabled_msg = 0; - goto freelea; - } else if (ret == 1) { - lea->hi2_portstr = NULL; - lea->hi2_ipstr = NULL; - } - - if ((ret = has_handover_changed(state, x->hi3, lea->hi3_ipstr, - lea->hi3_portstr, x, lea)) == -1) { - x->disabled = 1; - x->disabled_msg = 0; - goto freelea; - } else if (ret == 1) { - lea->hi3_portstr = NULL; - lea->hi3_ipstr = NULL; - } - - x->awaitingconfirm = 0; - x->disabled = 0; - ret = 0; - goto freelea; - } - } - - /* If we get here, this is an entirely new agency so we can create a - * fresh instance (plus handovers). - */ - pthread_mutex_unlock(state->agency_mutex); - create_new_agency(state, lea); - return 0; - -freelea: - /* If we get here, the agency already existed in our list so we can - * remove any extra memory left over from the announcement (e.g. - * IP address or port strings that were unchanged). - */ - pthread_mutex_unlock(state->agency_mutex); - if (lea->agencyid) { - free(lea->agencyid); - } - if (lea->hi2_portstr) { - free(lea->hi2_portstr); - } - if (lea->hi2_ipstr) { - free(lea->hi2_ipstr); - } - if (lea->hi3_portstr) { - free(lea->hi3_portstr); - } - if (lea->hi3_ipstr) { - free(lea->hi3_ipstr); - } - return ret; -} - -/** Disables a specific agency. - * - * A disabled agency will have its handovers disconnected and they - * will not be reconnected until the provisioner announces the agency - * is valid again. - * - * @param state The global handover state for the mediator - * @param agencyid The ID of the agency to be disabled, as a string. - */ -void withdraw_agency(handover_state_t *state, char *agencyid) { - libtrace_list_node_t *n; - - /* Disable the agency in the agency list -- note that lock to protect - * concurrent access to the list by the handover connection thread. - */ - pthread_mutex_lock(state->agency_mutex); - n = state->agencies->head; - while (n) { - mediator_agency_t *x = (mediator_agency_t *)(n->data); - n = n->next; - - if (strcmp(x->agencyid, agencyid) == 0) { - /* We've found the agency with the appropriate ID to withdraw */ - x->disabled = 1; - x->disabled_msg = 0; - disconnect_handover(x->hi2); - disconnect_handover(x->hi3); - - /* Note that we leave the agency in the list -- it's simpler to do - * that than to actually try and remove it. - * - * TODO replace agency list with a Judy map keyed by agency id. - */ - break; - } - } - pthread_mutex_unlock(state->agency_mutex); -} - -/** Restarts the keep alive timer for a handover - * - * @param ho The handover to restart the keep alive timer for - * - * @return -1 if an error occurs, 0 otherwise - */ -int restart_handover_keepalive(handover_t *ho) { - - int ret = 0; - pthread_mutex_lock(&(ho->ho_state->ho_mutex)); - - halt_mediator_timer(ho->aliveev); - if (start_mediator_timer(ho->aliveev, ho->ho_state->kafreq) == -1) { - if (ho->disconnect_msg == 0) { - logger(LOG_INFO, - "OpenLI Mediator: unable to reset keepalive timer for %s:%s HI%d :s", - ho->ipstr, ho->portstr, ho->handover_type, strerror(errno)); - } - ret = -1; - } - - pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); - return ret; -} - /** Receives and actions a message sent to the mediator over a handover * (typically a keep alive response). * - * @param mev The epoll event for the handover + * @param ho The handover to receive the message on * * @return -1 if an error occurs, 0 otherwise. */ -int receive_handover(med_epoll_ev_t *mev) { - handover_t *ho = (handover_t *)(mev->state); +int receive_handover(handover_t *ho) { int ret; uint8_t *ptr = NULL; uint32_t reclen = 0; @@ -1015,7 +680,8 @@ int receive_handover(med_epoll_ev_t *mev) { /* receive the incoming message into a local SCB */ pthread_mutex_lock(&(ho->ho_state->ho_mutex)); - ret = libtrace_scb_recv_sock(ho->ho_state->incoming, mev->fd, MSG_DONTWAIT); + ret = libtrace_scb_recv_sock(ho->ho_state->incoming, ho->outev->fd, + MSG_DONTWAIT); if (ret == -1) { if (errno == EAGAIN || errno == EWOULDBLOCK) { pthread_mutex_unlock(&(ho->ho_state->ho_mutex)); @@ -1079,8 +745,8 @@ int receive_handover(med_epoll_ev_t *mev) { } /* logger(LOG_INFO, "OpenLI mediator -- received KA response for %ld from LEA handover %s:%s HI%d", - recvseq, mas->parent->ipstr, mas->parent->portstr, - mas->parent->handover_type); + recvseq, ho->ipstr, ho->portstr, + ho->handover_type); */ halt_mediator_timer(ho->aliverespev); libtrace_scb_advance_read(ho->ho_state->incoming, reclen); @@ -1108,38 +774,58 @@ int receive_handover(med_epoll_ev_t *mev) { return 0; } -/** Finds an agency that matches a given ID in the agency list +/** Checks if a handover's RMQ connection is still alive and error-free. If + * not, destroy the connection and reset it to NULL * - * @param state The global handover state for the mediator - * @param id A string containing the agency ID to search for + * @param ho The handover which needs its RMQ connection checked. + * @param agencyid The name of the agency that the handover belongs to (for + * logging purposes). * - * @return a pointer to the agency with the given ID, or NULL if no such - * agency is found. + * @return -1 if the RMQ connection was destroyed, 0 otherwise */ -mediator_agency_t *lookup_agency(handover_state_t *state, char *id) { - - mediator_agency_t *ma; - libtrace_list_t *alist; - libtrace_list_node_t *n; - - pthread_mutex_lock(state->agency_mutex); - alist = state->agencies; +int check_handover_rmq_status(handover_t *ho, char *agencyid) { + const char *hi_str = NULL; + int r; - /* Fingers crossed we don't have too many agencies at any one time. */ - - n = alist->head; - while (n) { - ma = (mediator_agency_t *)(n->data); - n = n->next; + if (ho->handover_type == HANDOVER_HI2) { + hi_str = "HI2"; + r = consume_mediator_iri_messages(ho->rmq_consumer, + &(ho->ho_state->buf), 1, &(ho->ho_state->next_rmq_ack)); + } else { + hi_str = "HI3"; + r = consume_mediator_cc_messages(ho->rmq_consumer, + &(ho->ho_state->buf), 1, &(ho->ho_state->next_rmq_ack)); + } - if (strcmp(ma->agencyid, id) == 0) { - pthread_mutex_unlock(state->agency_mutex); - return ma; - } + if (r == -2) { + logger(LOG_INFO, "OpenLI Mediator: RMQ Heartbeat timer expired for %s handover for agency %s", hi_str, agencyid); + reset_handover_rmq(ho); + return -1; + } else if (r == -1) { + logger(LOG_INFO, "OpenLI Mediator: RMQ connection error for %s handover for agency %s", hi_str, agencyid); + reset_handover_rmq(ho); + return -1; } - pthread_mutex_unlock(state->agency_mutex); - return NULL; + + return 0; } +/** Resets the RMQ state for a given handover. + * + * This is typically used when an error occurs with the RMQ consumer for + * a handover, which will then force the handover to re-register its + * connection to the RMQ service. + * + * @param ho The handover to reset RMQ state for + */ +void reset_handover_rmq(handover_t *ho) { + if (ho->rmq_consumer) { + amqp_destroy_connection(ho->rmq_consumer); + } + /* MUST set to NULL, so that re-registration will take place */ + ho->rmq_consumer = NULL; + ho->rmq_registered = 0; + ho->ho_state->valid_rmq_ack = 0; +} // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/handover.h b/src/mediator/handover.h index 81eaedc5..0414201a 100644 --- a/src/mediator/handover.h +++ b/src/mediator/handover.h @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -30,19 +30,35 @@ #include #include #include +#include #include "export_buffer.h" #include "med_epoll.h" +#include "liidmapping.h" +/** Possible handover types */ enum { + /** HI2 -- used for transmiting IRIs and other "meta" messages */ HANDOVER_HI2 = 2, + + /** HI3 -- used for transmitting CCs */ HANDOVER_HI3 = 3, + + /** Raw IP -- OpenLI-specific handover for writing raw IP packets to + * pcap files on disk + */ + HANDOVER_RAWIP = 4, }; +/** State that needs to be retained for each mediator handover */ typedef struct per_handover_state { + /** A buffer for storing data queued for sending over the handover */ export_buffer_t buf; + + /** A buffer for storing data received over the handover + * (e.g. keepalives) + */ libtrace_scb_t *incoming; - int outenabled; uint32_t katimer_setsec; wandder_encoded_result_t *pending_ka; int64_t lastkaseq; @@ -51,12 +67,17 @@ typedef struct per_handover_state { uint32_t kafreq; uint32_t kawait; pthread_mutex_t ho_mutex; + uint64_t next_rmq_ack; + uint8_t valid_rmq_ack; } per_handover_state_t; typedef struct handover { char *ipstr; char *portstr; int handover_type; + amqp_connection_state_t rmq_consumer; + int amqp_log_failure; + uint8_t rmq_registered; med_epoll_ev_t *outev; med_epoll_ev_t *aliveev; med_epoll_ev_t *aliverespev; @@ -64,15 +85,6 @@ typedef struct handover { uint8_t disconnect_msg; } handover_t; -typedef struct handover_state { - uint16_t next_handover_id; - int epoll_fd; - libtrace_list_t *agencies; - pthread_mutex_t *agency_mutex; - int halt_flag; - pthread_t connectthread; -} handover_state_t; - typedef struct mediator_agency { char *agencyid; int awaitingconfirm; @@ -82,111 +94,143 @@ typedef struct mediator_agency { handover_t *hi3; } mediator_agency_t; -/** Send some buffered ETSI records out via a handover. +/** Destroys the state for a particular agency entity, including its + * corresponding handovers * - * If there is a keep alive message pending for this handover, that will - * be sent before sending any buffered records. + * @param ag The agency to be destroyed. + */ +void destroy_agency(mediator_agency_t *ag); + +/** Sends a buffer of ETSI records out via a handover. * - * @param mev The epoll event for the handover + * @param ho The handover to send the records over + * @param maxsend The maximum amount of data to send (in bytes) * * @return -1 is an error occurs, 0 otherwise. */ -int xmit_handover(med_epoll_ev_t *mev); +int xmit_handover_records(handover_t *ho, uint32_t maxsend); -/** Disconnects a single mediator handover connection to an LEA. +/** Sends any pending keep-alive message out via a handover. * - * Typically triggered when an LEA is withdrawn, becomes unresponsive, - * or fails a keep-alive test. + * @param ho The handover to send the keep-alive over * - * @param ho The handover that is being disconnected. + * @return -1 is an error occurs, 0 otherwise. */ -void disconnect_handover(handover_t *ho); +int xmit_handover_keepalive(handover_t *ho); -/** Disconnects and drops all known agencies +/** React to a handover's failure to respond to a keep alive before the + * response timer expired. + * + * @param ho The handover that failed to reply to a KA message * - * @param state The global handover state for this mediator. */ -void drop_all_agencies(handover_state_t *state); +void trigger_handover_ka_failure(handover_t *ho); -/** Attempt to connect all handovers for all known agencies +/** Creates and sends a keep-alive message over a handover * - * @param state The global handover state for this mediator + * @param ho The handover that needs to send a keep alive + * @param mediator_id The ID of this mediator (to be included in the KA msg) + * @param operator_id The operator ID string (to be included in the KA msg) + * + * @return -1 if an error occurs, 0 otherwise */ -void connect_agencies(handover_state_t *state); +int trigger_handover_keepalive(handover_t *ho, uint32_t mediator_id, + char *operator_id); -/** Adds an agency to the known agency list. +/** Disconnects a single mediator handover connection to an LEA. * - * If an agency with the same ID already exists, we update its handovers - * to match the details we just received. + * Typically triggered when an LEA is withdrawn, becomes unresponsive, + * or fails a keep-alive test. * - * If the agency was awaiting confirmation after a lost provisioner - * connection, it will be marked as confirmed. + * @param ho The handover that is being disconnected. + */ +void disconnect_handover(handover_t *ho); + +/** Receives and actions a message sent to the mediator over a handover + * (typically a keep alive response). * - * @param state The global handover state for the mediator - * @param agencyid The agency to add to the list. + * @param ho The handover to receive the message on * * @return -1 if an error occurs, 0 otherwise. */ -int enable_agency(handover_state_t *state, liagency_t *lea); +int receive_handover(handover_t *ho); -/** Disables a specific agency. +/* Creates a new instance of a handover. * - * A disabled agency will have its handovers disconnected and they - * will not be reconnected until the provisioner announces the agency - * is valid again. + * @param epoll_fd The global epoll fd for the mediator. + * @param ipstr The IP address of the handover recipient (as a string). + * @param portstr The port that the handover recipient is listening on + * (as a string). + * @param handover_type Either HANDOVER_HI2 or HANDOVER_HI3, to indicate which + * type of handover this is. + * @param kafreq The frequency to send keep alive requests (in seconds). + * @param kawait The time to wait before assuming a keep alive has + * failed (in seconds). * - * @param state The global handover state for the mediator - * @param agencyid The ID of the agency to be disabled, as a string. + * @return a pointer to a new handover instance, or NULL if an error occurs. */ -void withdraw_agency(handover_state_t *state, char *agencyid); +handover_t *create_new_handover(int epoll_fd, char *ipstr, char *portstr, + int handover_type, uint32_t kafreq, uint32_t kawait); -/** Modify a handover's epoll event to check if writing is possible. +/** Establish an agency handover connection * - * If an error occurs, the handover will be disconnected. + * The resulting socket will be added to the provided epoll event set as + * available for reading and writing. * - * @param ho The handover to modify + * This method also starts the keepalive timer for the handover, if + * keepalives are required. * - * @return -1 if an error occurs, 0 otherwise. + * @param ho The handover object that is to be connected + * @param epoll_fd The epoll fd to add handover events to + * @param ho_id The unique ID number for this handover + * @param agency_id The name of the agency this handover is connecting to + * + * @return -1 if the connection fails, 0 otherwise. */ -int enable_handover_writing(handover_t *ho); +int connect_mediator_handover(handover_t *ho, int epoll_fd, uint32_t ho_id, + char *agencyid); -/** Modify a handover's epoll event to NOT check if writing is possible. +/** Releases all memory associated with a single handover object. * - * If an error occurs, the handover will be disconnected. - * - * @param ho The handover to modify - * - * @return -1 if an error occurs, 0 otherwise. + * @param ho The handover object that is being destroyed */ -int disable_handover_writing(handover_t *ho); +void free_handover(handover_t *ho); -/** Restarts the keep alive timer for a handover +/** Creates an RMQ connection for consumption and registers it with + * the IRI or CC queues for each LIID that is to be exported via this + * handover. * - * @param ho The handover to restart the keep alive timer for + * @param ho The handover to be registered with RMQ + * @param liidmap The set of known LIIDs associated with this handover + * @param agencyid The name of the agency that this handover belongs to + * @param password The password to use to authenticate with RMQ * - * @return -1 if an error occurs, 0 otherwise + * @return -1 if an error occurs during registration, 1 if all LIIDs + * are successfully registered. */ -int restart_handover_keepalive(handover_t *ho); +int register_handover_RMQ_all(handover_t *ho, liid_map_t *liidmap, + char *agencyid, char *password); -/** Receives and actions a message sent to the mediator over a handover - * (typically a keep alive response). +/** Resets the RMQ state for a given handover. * - * @param mev The epoll event for the handover + * This is typically used when an error occurs with the RMQ consumer for + * a handover, which will then force the handover to re-register its + * connection to the RMQ service. * - * @return -1 if an error occurs, 0 otherwise. + * @param ho The handover to reset RMQ state for */ -int receive_handover(med_epoll_ev_t *mev); +void reset_handover_rmq(handover_t *ho); -/** Finds an agency that matches a given ID in the agency list +/** Checks if a handover's RMQ connection is still alive and error-free. If + * not, destroy the connection and reset it to NULL * - * @param state The global handover state for the mediator - * @param id A string containing the agency ID to search for + * @param ho The handover which needs its RMQ connection checked. + * @param agencyid The name of the agency that the handover belongs to (for + * logging purposes). * - * @return a pointer to the agency with the given ID, or NULL if no such - * agency is found. + * @return -1 if the RMQ connection was destroyed, 0 otherwise */ -mediator_agency_t *lookup_agency(handover_state_t *state, char *id); - +int check_handover_rmq_status(handover_t *ho, char *agencyid); #endif // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/lea_send_thread.c b/src/mediator/lea_send_thread.c new file mode 100644 index 00000000..6cec4403 --- /dev/null +++ b/src/mediator/lea_send_thread.c @@ -0,0 +1,1361 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#include +#include +#include "logger.h" +#include "lea_send_thread.h" +#include "mediator_rmq.h" +#include "handover.h" +#include "agency.h" + +/** The code in this source file implements an "LEA send" thread for the + * OpenLI mediator. + * Each agency that is configured with the OpenLI provisioner will be + * handled using a separate instance of one of these threads. + * + * The core functionality of an LEA send thread is to: + * - establish the handovers to the agency for both HI2 and HI3. + * - consume any IRIs or CCs for LIIDs that belong to the agency from + * their respective internal RMQ queue, placing them in an export + * buffer for the corresponding handover. + * - send data from the export buffer over the handover socket, when the + * LEA end is able to receive data. + * - send periodic keepalives on each handover, as required. + * + * A lot of the code in this file is re-used by the pcap thread code, hence + * some functions being "public" that might not make sense at first glance. + * They're likely being used by the pcap thread code as well, so resist + * the urge to revert them to being static or merge them into other + * static methods in this file :) + */ + +/** Initialises the agency state for an LEA send thread. + * + * Includes creation of the handover instances for the agency. + * + * @param agency The agency state to be initialised + * @param fromprov The agency details received from the provisioner + * @param epollfd The epoll file descriptor for the LEA send thread + */ +static void init_mediator_agency(mediator_agency_t *agency, + liagency_t *fromprov, int epollfd) { + + agency->awaitingconfirm = 0; + agency->agencyid = strdup(fromprov->agencyid); + agency->disabled = 0; + agency->disabled_msg = 0; + agency->hi2 = create_new_handover(epollfd, fromprov->hi2_ipstr, + fromprov->hi2_portstr, HANDOVER_HI2, fromprov->keepalivefreq, + fromprov->keepalivewait); + + agency->hi3 = create_new_handover(epollfd, fromprov->hi3_ipstr, + fromprov->hi3_portstr, HANDOVER_HI3, fromprov->keepalivefreq, + fromprov->keepalivewait); + + fromprov->hi2_ipstr = NULL; + fromprov->hi2_portstr = NULL; + fromprov->hi3_ipstr = NULL; + fromprov->hi3_portstr = NULL; +} + + +/** Starts a timer which, when expired, will cause this LEA send thread + * to terminate. + * + * This timer is triggered when the provisioner disconnects, as we now + * cannot be sure that our handovers are still pointing at legitimate + * agencies. However, if the provisioner is able to reconnect quickly, it + * is nicer to have avoided any handover interruption during that brief + * period where it was away (e.g. in cases where the provisioner is + * restarted to apply new config). + * + * So instead, we give the provisioner a small window to reconnect and + * only if it doesn't come back in time, then we go ahead and terminate the + * handovers (and the LEA thread itself). + * + * Note: if the provisioner does come back after this timer expires, the + * thread will get created anew if the agency remains active in the + * provisioner intercept configuration. + * + * @param state The state object for this LEA send thread + * @param timeout The number of seconds to set the shutdown timer for + */ +static inline void start_shutdown_timer(lea_thread_state_t *state, + uint16_t timeout) { + + halt_mediator_timer(state->cleanse_liids); + halt_mediator_timer(state->shutdown_wait); + if (start_mediator_timer(state->shutdown_wait, timeout) == -1) { + logger(LOG_INFO, "OpenLI Mediator: unable to set shutdown timer for agency thread %s", state->agencyid); + } + + /* set all known LIIDs to be "unconfirmed" */ + /* XXX technically, we only need to do this when the provisioner + * has reconnected (i.e. when timeout is 60 seconds or less), but I + * don't think it will hurt too much to do this for the case where the + * provisioner has disconnected and we don't know if it is coming back... + */ + foreach_liid_agency_mapping(&(state->active_liids), NULL, + set_liid_as_unconfirmed); + +} + +/** Deletes any RMQ internal queues for an LIID that has both been withdrawn + * (i.e. the intercept is no longer active) AND the queue contains + * no more outstanding messages to be consumed. + * + * Used as a callback for foreach_liid_agency_mapping() to periodcally + * tidy up any extraneous RMQ internal queues. + * + * @param m The LIID to be checked for empty queues + * @param agencyarg The agency that this thread belongs to + * + * @return 1 if both the CC and IRI queues for the LIID have been removed, + * 0 if at least one queue remains or the LIID has not been + * withdrawn yet. + */ +static int purge_empty_withdrawn_liid_queues(liid_map_entry_t *m, + void *agencyarg) { + + mediator_agency_t *agency = (mediator_agency_t *)agencyarg; + int r; + + /* Don't delete queues for LIIDs that are still active! */ + if (m->withdrawn == 0) { + return 0; + } + + if (m->iriqueue_deleted == 0) { + r = check_empty_mediator_iri_RMQ(agency->hi2->rmq_consumer, + m->liid); + if (r > 0) { + /* TODO delete amqp queue */ + m->iriqueue_deleted = 1; + } + } + + if (m->ccqueue_deleted == 0) { + r = check_empty_mediator_cc_RMQ(agency->hi3->rmq_consumer, + m->liid); + if (r > 0) { + /* TODO delete amqp queue */ + m->ccqueue_deleted = 1; + } + } + + if (m->ccqueue_deleted && m->iriqueue_deleted) { + return 1; + } + + return 0; +} + +/** Updates the handovers for an agency based on new information sent + * by the provisioner. + * + * @param currag The current agency state + * @param newag The agency information sent by the provisioner + * @param epollfd The epoll file descriptor for this agency thread + */ +static void update_agency_handovers(mediator_agency_t *currag, + liagency_t *newag, int epollfd) { + + /* If a current handover is NULL or missing an IP or port, just + * try to create a new handover instead. + */ + if (currag->hi2 == NULL || currag->hi2->ipstr == NULL || + currag->hi2->portstr == NULL) { + currag->hi2 = create_new_handover(epollfd, newag->hi2_ipstr, + newag->hi2_portstr, HANDOVER_HI2, newag->keepalivefreq, + newag->keepalivewait); + } else if (strcmp(newag->hi2_ipstr, currag->hi2->ipstr) != 0 || + strcmp(newag->hi2_portstr, currag->hi2->portstr) != 0) { + /* HI2 has changed */ + disconnect_handover(currag->hi2); + + free(currag->hi2->ipstr); + currag->hi2->ipstr = newag->hi2_ipstr; + newag->hi2_ipstr = NULL; + + free(currag->hi2->portstr); + currag->hi2->portstr = newag->hi2_portstr; + newag->hi2_portstr = NULL; + + } + + if (currag->hi3 == NULL || currag->hi3->ipstr == NULL || + currag->hi3->portstr == NULL) { + currag->hi3 = create_new_handover(epollfd, newag->hi3_ipstr, + newag->hi3_portstr, HANDOVER_HI3, newag->keepalivefreq, + newag->keepalivewait); + } else if (strcmp(newag->hi3_ipstr, currag->hi3->ipstr) != 0 || + strcmp(newag->hi3_portstr, currag->hi3->portstr) != 0) { + /* HI3 has changed */ + disconnect_handover(currag->hi3); + + free(currag->hi3->ipstr); + currag->hi3->ipstr = newag->hi3_ipstr; + newag->hi3_ipstr = NULL; + + free(currag->hi3->portstr); + currag->hi3->portstr = newag->hi3_portstr; + newag->hi3_portstr = NULL; + } + + /* Make sure keepalive frequencies are up to date -- won't affect + * outstanding KAs but will apply to subsequent ones + */ + currag->hi2->ho_state->kafreq = newag->keepalivefreq; + currag->hi2->ho_state->kawait = newag->keepalivewait; + currag->hi3->ho_state->kafreq = newag->keepalivefreq; + currag->hi3->ho_state->kawait = newag->keepalivewait; + + free_liagency(newag); +} + +/** Sends intercept records from a handover's local buffer to the + * corresponding agency. + * + * @param ho The handover to send records from + * @param state The state object for the LEA send thread + * + * @return -1 if an error occurred while sending, -2 if an error + * occurred while acknowledging the sent records in RMQ, + * 0 if no records were sent, 1 otherwise. + */ +static inline int send_available_rmq_records(handover_t *ho, + lea_thread_state_t *state) { + int r; + + if (get_buffered_amount(&(ho->ho_state->buf)) == 0) { + /* No records available to send */ + return 0; + } + + /* Send up 16MB at a time */ + if (xmit_handover_records(ho, 1024 * 1024 * 16) < 0) { + return -1; + } + + /* We only acknowledge in RMQ once the whole message set has been + * sent, so try to avoid buffering too many messages at once */ + if (get_buffered_amount(&(ho->ho_state->buf)) == 0 && + ho->ho_state->valid_rmq_ack) { + if (ho->handover_type == HANDOVER_HI2) { + if (ack_mediator_iri_messages(ho->rmq_consumer, + ho->ho_state->next_rmq_ack) < 0) { + logger(LOG_INFO, "OpenLI Mediator: error while acknowledging sent data from internal IRI queue by agency %s", state->agencyid); + return -2; + } + } else if (ho->handover_type == HANDOVER_HI3) { + if ((r = ack_mediator_cc_messages(ho->rmq_consumer, + ho->ho_state->next_rmq_ack)) != 0) { + logger(LOG_INFO, "OpenLI Mediator: error while acknowledging sent data from internal CC queue by agency %s: %d", state->agencyid, r); + return -2; + } + } + ho->ho_state->valid_rmq_ack = 0; + } + return 1; +} + +/** Consumes any available intercept records from the RMQ connection for + * a particular handover and tries to send them to the receiving agency. + * + * Only consumes if the handover local buffer is empty, otherwise this + * function will try to send and acknowledge the existing buffer contents + * first. + * + * @param ho The handover to consume and send records for + * @param state The state object for the LEA send thread + * + * @return -1 if a fatal error occurs with the handover, 0 otherwise + */ +static int consume_available_rmq_records(handover_t *ho, + lea_thread_state_t *state) { + + int r; + + if (ho->rmq_registered == 0) { + return 0; + } + + /* If we have records in the buffer already, try to send and + * acknowledge those. + */ + if ((r = send_available_rmq_records(ho, state)) == 1) { + ho->disconnect_msg = 0; + return 0; + } else if (r == -2) { + reset_handover_rmq(ho); + return 0; + } else if (r == -1) { + return -1; + } + + r = 0; + /* Otherwise, read some new messages from RMQ and try to send those */ + if (ho->handover_type == HANDOVER_HI3) { + r = consume_mediator_cc_messages(ho->rmq_consumer, + &(ho->ho_state->buf), 32, &(ho->ho_state->next_rmq_ack)); + if (r < 0) { + reset_handover_rmq(ho); + logger(LOG_INFO, "OpenLI Mediator: error while consuming CC messages from internal queue by agency %s", state->agencyid); + return 0; + } else if (r > 0) { + ho->ho_state->valid_rmq_ack = 1; + } + } else if (ho->handover_type == HANDOVER_HI2) { + r = consume_mediator_iri_messages(ho->rmq_consumer, + &(ho->ho_state->buf), 32, &(ho->ho_state->next_rmq_ack)); + if (r < 0) { + reset_handover_rmq(ho); + logger(LOG_INFO, "OpenLI Mediator: error while consuming IRI messages from internal queue by agency %s", state->agencyid); + return 0; + } else if (r > 0) { + ho->ho_state->valid_rmq_ack = 1; + } + } + + /* We can reset the RMQ heartbeat timer because any pending heartbeats + * will have been handled when we consumed just earlier. + */ + if (ho->rmq_consumer) { + halt_mediator_timer(state->rmqhb); + start_mediator_timer(state->rmqhb, state->rmq_hb_freq); + } + + /* If our earlier "consume" got us some intercept records, try to send + * them now. + */ + r = send_available_rmq_records(ho, state); + if (r > 0) { + ho->disconnect_msg = 0; + return 0; + } else if (r == -2) { + reset_handover_rmq(ho); + return 0; + } + + return r; +} + +/** De-registers the RMQ consumers for an LIID that has not been + * re-confirmed as still active by a reconnecting provisioner. + * + * Used as a callback for foreach_liid_agency_mapping() to de-register all + * unconfirmed LIIDs in the LIID set. + * + * @param m The LIID that was not confirmed by the provisioner + * @param arg The state object for the LEA send thread + * + * @return 0 always + */ +static int set_unconfirmed_liid_as_withdrawn(liid_map_entry_t *m, void *arg) { + lea_thread_state_t *state = (lea_thread_state_t *)arg; + if (m->unconfirmed == 0) { + return 0; + } + + /* de-register from RMQ just in case this LIID is changing agency for + * some reason, so we don't steal any future records put in the LIID's + * RMQ queues + */ + if (state->agency.hi2->rmq_consumer != NULL) { + deregister_mediator_iri_RMQ_consumer( + state->agency.hi2->rmq_consumer, m->liid); + } + if (state->agency.hi3->rmq_consumer != NULL) { + deregister_mediator_cc_RMQ_consumer( + state->agency.hi3->rmq_consumer, m->liid); + } + + logger(LOG_INFO, "OpenLI Mediator: withdrawing unconfirmed LIID %s from agency thread %s", + m->liid, state->agencyid); + m->withdrawn = 1; + return 0; +} + +/** Handle any outstanding heartbeats for this thread's RMQ connections and + * tidy up any unused RMQ internal queues. + * + * Should be called periodically using a epoll timer event. + * + * @param state The state object for this LEA send thread + * @param mev The mediator epoll timer event that fired to trigger + * this function being called + * + * @return 0 if the triggering timer is unable to be reset, 1 otherwise. + */ +int agency_thread_action_rmqcheck_timer(lea_thread_state_t *state, + med_epoll_ev_t *mev) { + halt_mediator_timer(mev); + /* service RMQ connections */ + check_handover_rmq_status(state->agency.hi2, state->agencyid); + check_handover_rmq_status(state->agency.hi3, state->agencyid); + + /* Remove any empty LIID queues that have been withdrawn */ + foreach_liid_agency_mapping(&(state->active_liids), + (void *)(&(state->agency)), + purge_empty_withdrawn_liid_queues); + + if (start_mediator_timer(mev, state->rmq_hb_freq) < 0) { + logger(LOG_INFO, "OpenLI Mediator: unable to reset RMQ heartbeat timer in agency thread for %s: %s", state->agencyid, strerror(errno)); + return 0; + } + return 1; +} + + +/** Loops over the set of known LIIDs and withdraws any that have not been + * confirmed by the provisioner since it last (re-)connected. + * + * Should be called via a epoll timer event set for some amount of time + * after a provisioner has re-connected to the main mediator thread. + * + * @param state The state object for this LEA send thread + * + * @return 0 always + */ +int agency_thread_action_cease_liid_timer(lea_thread_state_t *state) { + halt_mediator_timer(state->cleanse_liids); + foreach_liid_agency_mapping(&(state->active_liids), state, + set_unconfirmed_liid_as_withdrawn); + return 0; +} + +/** Calls the appropriate action method for a mediator epoll event + * observed by a LEA send thread. + * + * @param state The state object for the LEA send thread + * @param ev The epoll event that was observed + * + * @return -1 if an error occurs, 1 if the epoll loop needs to be forced + * to "break", 0 otherwise. + */ +static int agency_thread_epoll_event(lea_thread_state_t *state, + struct epoll_event *ev) { + + med_epoll_ev_t *mev = (med_epoll_ev_t *)(ev->data.ptr); + int ret = 0; + handover_t *ho; + + switch (mev->fdtype) { + case MED_EPOLL_SIGCHECK_TIMER: + if (ev->events & EPOLLIN) { + /* Time to check for messages from the parent thread again; + * force the epoll loop to break */ + ret = 1; + } else { + logger(LOG_INFO, "OpenLI Mediator: main epoll timer has failed in agency thread for %s", state->agencyid); + ret = 0; + } + break; + case MED_EPOLL_RMQCHECK_TIMER: + /* timer to perform regular RMQ "maintenance" tasks */ + ret = agency_thread_action_rmqcheck_timer(state, mev); + break; + case MED_EPOLL_SHUTDOWN_LEA_THREAD: + /* shutdown timer has expired, end this LEA thread */ + logger(LOG_INFO, "OpenLI Mediator: shutdown timer expired for agency thread %s", state->agencyid); + ret = -1; + break; + + case MED_EPOLL_CEASE_LIID_TIMER: + /* remove any unconfirmed LIIDs in our LIID set */ + ret = agency_thread_action_cease_liid_timer(state); + break; + + case MED_EPOLL_KA_TIMER: + /* we are due to send a keep alive */ + ho = (handover_t *)(mev->state); + trigger_handover_keepalive(ho, state->mediator_id, + state->operator_id); + ret = 0; + break; + case MED_EPOLL_KA_RESPONSE_TIMER: + /* we've gone too long without a response to our last keep alive */ + ho = (handover_t *)(mev->state); + halt_mediator_timer(mev); + trigger_handover_ka_failure(ho); + /* Pause briefly to allow the other end to realise we're gone + * before we try to reconnect the handover + */ + usleep(500000); + ret = 1; // force main thread loop to restart + break; + case MED_EPOLL_LEA: + ho = (handover_t *)(mev->state); + /* the handover is available for either writing or reading */ + if (ev->events & EPOLLRDHUP) { + /* actually, the socket connection has failed -- bail */ + ret = -1; + } else if (ev->events & EPOLLIN) { + /* message from LEA -- hopefully a keep-alive response */ + ret = receive_handover(ho); + } else if (ev->events & EPOLLOUT) { + /* handover is able to send buffered records */ + + /* If we're due to send a keep alive, do that first */ + if (ho->ho_state->pending_ka) { + ret = xmit_handover_keepalive(ho); + } + + /* As long as we have an unanswered keep alive, hold off on + * sending any buffered records -- the recipient may be + * unavailable and we'd be better off to keep those records in + * zmq until we're confident that they're able to receive them. + */ + if (ret != -1 && ho->aliverespev && ho->aliverespev->fd != -1) { + ret = 0; + } else { + ret = consume_available_rmq_records(ho, state); + } + } else { + ret = -1; + } + if (ret == -1) { + /* Something went wrong, disconnect and try again */ + disconnect_handover(ho); + ret = 0; + } + break; + default: + logger(LOG_INFO, "OpenLI Mediator: invalid epoll event type %d seen in agency thread for %s", mev->fdtype, state->agencyid); + ret = -1; + } + + return ret; + +} + +/** Disables an LIID for an LEA send thread. + * + * @param state The state object for the LEA send thread + * @param liid The LIID to disable + * + * @return 1 if successful, 0 if the LIID was not in this thread's LIID set. + */ +int purge_lea_liid_mapping(lea_thread_state_t *state, char *liid) { + + liid_map_entry_t *m = NULL; + + /* An LIID has been withdrawn for this agency -- either because the + * intercept is over or the intercept has changed agencies (?) + */ + + m = lookup_liid_agency_mapping(&(state->active_liids), liid); + if (m == NULL) { + /* This LIID is either already removed, or never was destined for + * this agency to begin with... + */ + return 0; + } + + /* De-register from the LIID's RMQ internal queues */ + if ((deregister_mediator_iri_RMQ_consumer( + state->agency.hi2->rmq_consumer, liid) < 0) || + (deregister_mediator_cc_RMQ_consumer( + state->agency.hi3->rmq_consumer, liid) < 0)) { + logger(LOG_INFO, + "OpenLI Mediator: WARNING failed to deregister RMQ for LIID %s -> %s", + liid, state->agencyid); + } + + /* Remove from this thread's LIID set */ + remove_liid_agency_mapping(&(state->active_liids), m); + logger(LOG_INFO, "OpenLI Mediator: purged LIID %s from agency thread %s", + liid, state->agencyid); + return 1; +} + +/** Adds an LIID to the LIID set for an LEA send thread. + * + * Also registers the corresponding RMQ internal queues with the agency + * handovers so records for that LIID will be consumed by this thread. + * + * @param state The state object for the LEA send thread + * @param liid The LIID to associate with this agency + * + * @return 1 if successful, 0 if the LIID was already in this thread's LIID + * set, -1 if an error occurs. + */ +int insert_lea_liid_mapping(lea_thread_state_t *state, char *liid) { + int r; + + /* Add the LIID to the thread's LIID set */ + r = add_liid_agency_mapping(&(state->active_liids), liid); + if (r < 0) { + logger(LOG_INFO, + "OpenLI Mediator: WARNING failed to add %s -> %s to LIID map", + liid, state->agencyid); + return -1; + } + + if (r == 0) { + /* LIID was already in the map and does not need to + * be registered with RMQ (i.e. wasn't currently + * withdrawn) */ + return 0; + } + + /* Register to consume from the LIID's internal RMQ queues */ + if ((register_mediator_iri_RMQ_consumer( + state->agency.hi2->rmq_consumer, liid) < 0) || + (register_mediator_cc_RMQ_consumer(state->agency.hi3->rmq_consumer, + liid) < 0)) { + logger(LOG_INFO, + "OpenLI Mediator: WARNING failed to register RMQ for LIID %s -> %s", + liid, state->agencyid); + } else { + logger(LOG_INFO, "OpenLI Mediator: added %s -> %s to LIID map", + liid, state->agencyid); + } + return 1; +} + +/** Updates local copies of configuration variables to match the shared + * version of the configuration managed by the main mediator thread. + * + * @param state The state object for the LEA send thread + * + * @return 1 if the RMQ internal password has changed (so all RMQ + * local connections should be restarted, 0 otherwise. + */ +int read_parent_config(lea_thread_state_t *state) { + + int register_required = 0; + + /* To avoid excessive locking, we maintain a local per-thread copy + * of all relevant config options. If the config is re-loaded (e.g. + * via a SIGHUP event observed by the main thread), we need to update + * our local copies to incorporate any changes resulting from the + * reload. + */ + pthread_mutex_lock(&(state->parentconfig->mutex)); + if (state->internalrmqpass) { + if (state->parentconfig->rmqconf->internalpass == NULL || + strcmp(state->parentconfig->rmqconf->internalpass, + state->internalrmqpass) != 0) { + + register_required = 1; + } + free(state->internalrmqpass); + } + if (state->parentconfig->rmqconf->internalpass) { + state->internalrmqpass = + strdup(state->parentconfig->rmqconf->internalpass); + } else { + state->internalrmqpass = NULL; + } + state->rmq_hb_freq = state->parentconfig->rmqconf->heartbeatFreq; + state->mediator_id = state->parentconfig->mediatorid; + state->pcap_compress_level = state->parentconfig->pcap_compress_level; + state->pcap_rotate_frequency = state->parentconfig->pcap_rotate_frequency; + + /* most LEA threads won't need these pcap options, but it's not a + * big cost for us to copy them + */ + if (state->pcap_outtemplate) { + free(state->pcap_outtemplate); + } + if (state->parentconfig->pcap_outtemplate) { + state->pcap_outtemplate = strdup(state->parentconfig->pcap_outtemplate); + } else { + state->pcap_outtemplate = NULL; + } + + if (state->pcap_dir) { + free(state->pcap_dir); + } + if (state->parentconfig->pcap_dir) { + state->pcap_dir = strdup(state->parentconfig->pcap_dir); + } else { + state->pcap_dir = NULL; + } + + if (state->operator_id) { + free(state->operator_id); + } + if (state->parentconfig->operatorid) { + state->operator_id = strdup(state->parentconfig->operatorid); + } else { + state->operator_id = NULL; + } + if (state->short_operator_id) { + free(state->short_operator_id); + } + if (state->parentconfig->shortoperatorid) { + state->short_operator_id = strdup(state->parentconfig->shortoperatorid); + } else { + state->short_operator_id = NULL; + } + pthread_mutex_unlock(&(state->parentconfig->mutex)); + return register_required; +} + +/** Declares and initialises the mediator epoll timer events that are + * used by an LEA send thread (or a pcap writer thread). + * + * @param state The state object for the LEA send thread + * + * @return -1 if an error occurs, 1 otherwise + */ +int create_agency_thread_timers(lea_thread_state_t *state) { + + /* timer to shutdown the LEA send thread if the provisioner goes + * missing for a while + */ + state->shutdown_wait = create_mediator_timer(state->epoll_fd, NULL, + MED_EPOLL_SHUTDOWN_LEA_THREAD, 0); + + if (state->shutdown_wait == NULL) { + logger(LOG_INFO, "OpenLI Mediator: failed to create shutdown timer in agency thread for %s", state->agencyid); + return -1; + } + + /* timer for purging unconfirmed LIIDs after a provisioner reconnect */ + state->cleanse_liids = create_mediator_timer(state->epoll_fd, NULL, + MED_EPOLL_CEASE_LIID_TIMER, 0); + + if (state->cleanse_liids == NULL) { + logger(LOG_INFO, "OpenLI Mediator: failed to create LIID cleansing timer in agency thread for %s", state->agencyid); + return -1; + } + + /* regular once-per-second timer to break out of the epoll loop and check + * for new messages or signals from the main thread + */ + state->timerev = create_mediator_timer(state->epoll_fd, NULL, + MED_EPOLL_SIGCHECK_TIMER, 0); + if (state->timerev == NULL) { + logger(LOG_INFO, "OpenLI Mediator: failed to create main loop timer in agency thread for %s", state->agencyid); + return -1; + } + + /* timer for performing RMQ maintenance tasks */ + state->rmqhb = create_mediator_timer(state->epoll_fd, NULL, + MED_EPOLL_RMQCHECK_TIMER, 0); + + if (start_mediator_timer(state->rmqhb, state->rmq_hb_freq) < 0) { + logger(LOG_INFO,"OpenLI Mediator: failed to add RMQHB timer to epoll in agency thread for %s", state->agencyid); + return -1; + } + + return 1; +} + +/** Converts an HI1 notification message a into properly encoded ETSI + * record and pushes it onto the HI2 export buffer for the agency. + * + * @param state The state object for the LEA send thread + * @param ndata The HI1 notification that was received from the + * provisioner (via the main mediator thread). + */ +static void publish_hi1_notification(lea_thread_state_t *state, + hi1_notify_data_t *ndata) { + + wandder_encoded_result_t *encoded_hi1 = NULL; + + if (!ndata) { + return; + } + + if (state->agency.hi2->ho_state->encoder == NULL) { + state->agency.hi2->ho_state->encoder = init_wandder_encoder(); + } else { + reset_wandder_encoder(state->agency.hi2->ho_state->encoder); + } + + /* encode into ETSI format using libwandder */ + encoded_hi1 = encode_etsi_hi1_notification( + state->agency.hi2->ho_state->encoder, ndata, state->operator_id, + state->short_operator_id); + if (encoded_hi1 == NULL) { + logger(LOG_INFO, "OpenLI Mediator: failed to construct HI1 Notification message for %s:%s", ndata->agencyid, ndata->liid); + goto freehi1; + } + + /* push onto the HI2 export buffer */ + if (append_etsipdu_to_buffer(&(state->agency.hi2->ho_state->buf), + encoded_hi1->encoded, encoded_hi1->len, 0) == 0) { + if (state->agency.hi2->disconnect_msg == 0) { + logger(LOG_INFO, + "OpenLI Mediator: unable to enqueue HI1 Notification PDU for %s:%s", ndata->agencyid, ndata->liid); + } + } + + wandder_release_encoded_result(state->agency.hi2->ho_state->encoder, + encoded_hi1); + +freehi1: + /* free all the malloc'd memory for the HI1 notification */ + if (ndata->agencyid) { + free(ndata->agencyid); + } + if (ndata->liid) { + free(ndata->liid); + } + if (ndata->authcc) { + free(ndata->authcc); + } + if (ndata->delivcc) { + free(ndata->delivcc); + } + if (ndata->target_info) { + free(ndata->target_info); + } + free(ndata); + +} + +/** Read and act upon any messages in the message queue for an LEA send + * thread. These messages all originate from the main mediator thread. + * + * @param state The state object for the LEA send thread + * + * @return 1 if the calling thread should immediately halt, 0 otherwise. + */ +static int process_agency_messages(lea_thread_state_t *state) { + lea_thread_msg_t msg; + + /* messages should be relatively rare, so we should be OK with + * staying in this loop until we've read everything available + */ + while (libtrace_message_queue_try_get(&(state->in_main), (void *)&msg) + != LIBTRACE_MQ_FAILED) { + + if (msg.type == MED_LEA_MESSAGE_HALT) { + /* time to kill this thread! */ + return 1; + } + + if (msg.type == MED_LEA_MESSAGE_SHUTDOWN_TIMER) { + /* provisioner has disconnected, start the thread shutdown timer */ + uint16_t *timeout = (uint16_t *)msg.data; + start_shutdown_timer(state, *timeout); + free(timeout); + } + + if (msg.type == MED_LEA_MESSAGE_RECONNECT) { + /* we need to drop and reconnect the handovers -- do the drop + * here and the reconnect will happen in the main epoll loop + */ + disconnect_handover(state->agency.hi2); + disconnect_handover(state->agency.hi3); + state->agency.disabled = 0; + } + + if (msg.type == MED_LEA_MESSAGE_DISCONNECT) { + /* we need to drop the handovers and NOT try to reconnect until + * told otherwise, so set the "disabled" flag for the agency + */ + disconnect_handover(state->agency.hi2); + disconnect_handover(state->agency.hi3); + state->agency.disabled = 1; + } + + if (msg.type == MED_LEA_MESSAGE_RELOAD_CONFIG) { + /* the main thread has modified the shared config, so we need to + * check if there are any changes that affect this thread + */ + if (read_parent_config(state) == 1) { + reset_handover_rmq(state->agency.hi2); + reset_handover_rmq(state->agency.hi3); + } + + } + + if (msg.type == MED_LEA_MESSAGE_UPDATE_AGENCY) { + /* This agency has been re-announced by the provisioner, so + * incorporate any changes to our handovers + */ + + /* If a shutdown timer is running, halt it */ + halt_mediator_timer(state->shutdown_wait); + + /* If a handover has changed, disconnect it */ + update_agency_handovers(&(state->agency), + (liagency_t *)(msg.data), state->epoll_fd); + + /* Handover reconnections won't happen until the next time + * around this loop (1 second delay max) + */ + + /* Set a timer which upon expiry will declare any + * remaining unconfirmed LIIDs to be withdrawn. + */ + halt_mediator_timer(state->cleanse_liids); + if (start_mediator_timer(state->cleanse_liids, 30) < 0) { + logger(LOG_INFO, "OpenLI Mediator: failed to add timer to remove unconfirmed LIID mappings in agency thread %s", state->agencyid); + } + + } + + if (msg.type == MED_LEA_MESSAGE_REMOVE_LIID) { + /* An LIID is no longer associated with this agency */ + char *liid = (char *)(msg.data); + if (state->agency.hi2->rmq_consumer != NULL) { + deregister_mediator_iri_RMQ_consumer( + state->agency.hi2->rmq_consumer, liid); + } + if (state->agency.hi3->rmq_consumer != NULL) { + deregister_mediator_cc_RMQ_consumer( + state->agency.hi3->rmq_consumer, liid); + } + + withdraw_liid_agency_mapping(&(state->active_liids), liid); + free(liid); + } + + if (msg.type == MED_LEA_MESSAGE_ADD_LIID) { + /* An LIID has been assigned to a particular agency. */ + added_liid_t *added = (added_liid_t *)msg.data; + + if (strcmp(added->agencyid, state->agencyid) != 0) { + /* This agency previously received this LIID but it + * has now changed to another agency. We need to remove it + * from our current handovers so we don't steal records for + * it and send them to the wrong agency. + */ + purge_lea_liid_mapping(state, added->liid); + } else { + /* If the agency ID matches ours, then we should add it to + * our handovers. + */ + insert_lea_liid_mapping(state, added->liid); + } + + free(added->liid); + free(added->agencyid); + free(added); + } + + if (msg.type == MED_LEA_MESSAGE_SEND_HI1_NOTIFICATION) { + /* An HI1 notification has been delivered from the provisioner */ + hi1_notify_data_t *ndata = (hi1_notify_data_t *)(msg.data); + + publish_hi1_notification(state, ndata); + } + + } + return 0; +} + +/** Main loop for the LEA send thread -- runs until a HALT message is received + * from the main mediator thread, or some irrecoverably fatal error occurs. + * + * @param params The state object for the LEA send thread (passed as + * a void pointer). + * + * @return NULL to pthread_join() when the thread exits + */ +static void *run_agency_thread(void *params) { + lea_thread_state_t *state = (lea_thread_state_t *)params; + int is_connected = 0, is_halted = 0; + struct epoll_event evs[64]; + int i, nfds, timerexpired = 0; + + read_parent_config(state); + logger(LOG_INFO, "OpenLI Mediator: starting agency thread for %s", + state->agencyid); + + if (create_agency_thread_timers(state) < 0) { + goto threadexit; + } + + while (!is_halted) { + /* Connect the handovers, if required */ + /* TODO separate function */ + if (state->agency.hi2->outev && state->agency.hi3->outev) { + is_connected = 1; + } else { + is_connected = 0; + } + + if (!is_connected && state->agency.disabled == 0) { + int r_hi2 = 0, r_hi3 = 0; + + r_hi2 = connect_mediator_handover(state->agency.hi2, + state->epoll_fd, state->handover_id, state->agencyid); + r_hi3 = connect_mediator_handover(state->agency.hi3, + state->epoll_fd, state->handover_id + 1, state->agencyid); + if (r_hi2 < 0 || r_hi3 < 0) { + break; + } + + if (r_hi2 > 0 && r_hi3 > 0) { + is_connected = 1; + } + } + + /* Check for messages from the main thread */ + if (process_agency_messages(state)) { + is_halted = 1; + continue; + } + + /* Register all known LIIDs with the handover RMQ consumers -- again, + * only if the RMQ consumers have not been set up already. */ + if (state->agency.disabled == 0) { + if (!state->agency.hi2->rmq_registered && + state->agency.hi2->outev) { + register_handover_RMQ_all(state->agency.hi2, + &(state->active_liids), state->agencyid, + state->internalrmqpass); + } + if (!state->agency.hi3->rmq_registered && + state->agency.hi3->outev) { + register_handover_RMQ_all(state->agency.hi3, + &(state->active_liids), state->agencyid, + state->internalrmqpass); + } + } + + + /* Start the once-per-second timer, so we can check for messages + * regularly regardless of how busy our epoll socket is + */ + if (start_mediator_timer(state->timerev, 1) < 0) { + logger(LOG_INFO,"OpenLI Mediator: failed to add timer to epoll in agency thread for %s", state->agencyid); + break; + } + + /* epoll main loop for a LEA send thread */ + timerexpired = 0; + while (!timerexpired && !is_halted) { + nfds = epoll_wait(state->epoll_fd, evs, 64, -1); + + if (nfds < 0) { + if (errno == EINTR) { + continue; + } + logger(LOG_INFO, "OpenLI Mediator: error while waiting for epoll events in agency thread for %s: %s", state->agencyid, strerror(errno)); + is_halted = 1; + continue; + } + + for (i = 0; i < nfds; i++) { + timerexpired = agency_thread_epoll_event(state, &(evs[i])); + if (timerexpired == -1) { + is_halted = 1; + break; + } + if (timerexpired) { + break; + } + } + + } + + halt_mediator_timer(state->timerev); + } +threadexit: + logger(LOG_INFO, "OpenLI Mediator: ending agency thread for %s", + state->agencyid); + + destroy_agency_thread_state(state); + pthread_exit(NULL); +} + +/** Tidy up the state object for an LEA send thread, freeing all allocated + * memory and closing any open sockets. + * + * @param state The state object for the LEA send thread + */ +void destroy_agency_thread_state(lea_thread_state_t *state) { + destroy_mediator_timer(state->timerev); + destroy_mediator_timer(state->rmqhb); + destroy_mediator_timer(state->shutdown_wait); + destroy_mediator_timer(state->cleanse_liids); + + destroy_agency(&(state->agency)); + if (state->operator_id) { + free(state->operator_id); + } + if (state->short_operator_id) { + free(state->short_operator_id); + } + if (state->pcap_outtemplate) { + free(state->pcap_outtemplate); + } + if (state->internalrmqpass) { + free(state->internalrmqpass); + } + if (state->pcap_dir) { + free(state->pcap_dir); + } + purge_liid_map(&(state->active_liids)); + free(state->agencyid); + close(state->epoll_fd); +} + + +/** The methods below are intended to be called by the main mediator thread + * ======================================================================= + */ + + +/** Sends an UPDATE AGENCY message to an LEA send thread. + * + * @param thread The state object for the LEA send thread that is + * to receive the update agency message. + * @param lea The updated definition of the agency that the + * recipient thread corresponds to. + * + * @return 0 always + */ +int mediator_update_agency_thread(lea_thread_state_t *thread, liagency_t *lea) { + + lea_thread_msg_t update_msg; + + update_msg.type = MED_LEA_MESSAGE_UPDATE_AGENCY; + update_msg.data = (void *)lea; + + libtrace_message_queue_put(&(thread->in_main), &update_msg); + return 0; +} + +/** Initialises and starts a new LEA send thread + * + * @param medleas The set of LEA threads for this mediator. + * @param lea The definition of the agency that the newly + * created thread will be sending records to. + * + * @return -1 if an error occurs, 1 if successful + */ +int mediator_start_agency_thread(mediator_lea_t *medleas, liagency_t *lea) { + + lea_thread_state_t *found = NULL; + mediator_lea_config_t *config = &(medleas->config); + + /* "pcapdisk" is reserved as the "agency" for writing intercepts to disk + * as pcap files + */ + if (strcmp(lea->agencyid, "pcapdisk") == 0) { + logger(LOG_INFO, "OpenLI Mediator: Invalid agency ID: \"pcapdisk\" -- please rename the agency to something else"); + return -1; + } + + /* Declare and initialise the state for the thread */ + found = (lea_thread_state_t *)calloc(1, sizeof(lea_thread_state_t)); + found->parentconfig = config; + found->epoll_fd = epoll_create1(0); + found->handover_id = medleas->next_handover_id; + + /* Increment by 2 to account for HI2 and HI3 */ + medleas->next_handover_id += 2; + + libtrace_message_queue_init(&(found->in_main), + sizeof(lea_thread_msg_t)); + found->agencyid = strdup(lea->agencyid); + + /* Add to the set of running LEA send threads */ + HASH_ADD_KEYPTR(hh, medleas->threads, found->agencyid, + strlen(found->agencyid), found); + + init_mediator_agency(&(found->agency), lea, found->epoll_fd); + pthread_create(&(found->tid), NULL, run_agency_thread, found); + return 1; +} + +/** Halts an LEA send thread. + * + * @param medleas The set of LEA threads for this mediator. + * @param agencyid The ID of the agency whose LEA send thread is to + * be halted. + */ +void mediator_halt_agency_thread(mediator_lea_t *medleas, char *agencyid) { + + lea_thread_state_t *lea; + lea_thread_msg_t end_msg; + memset(&end_msg, 0, sizeof(end_msg)); + + if (strcmp(agencyid, "pcapdisk") == 0) { + logger(LOG_INFO, "OpenLI Mediator: cannot withdraw the \"pcapdisk\" LEA because it is special."); + return; + } + + /* find the thread that matches the provided agency ID, if it exists */ + HASH_FIND(hh, medleas->threads, agencyid, strlen(agencyid), lea); + if (!lea) { + logger(LOG_INFO, "OpenLI Mediator: asked to withdraw LEA %s but no thread for this LEA exists?", agencyid); + return; + } + + /* send a HALT message to the thread */ + end_msg.type = MED_LEA_MESSAGE_HALT; + end_msg.data = NULL; + libtrace_message_queue_put(&(lea->in_main), &end_msg); + + /* wait for the thread to exit cleanly */ + pthread_join(lea->tid, NULL); + + /* clean up remaining state and remove the thread from the set of active + * LEA send threads + */ + libtrace_message_queue_destroy(&(lea->in_main)); + HASH_DELETE(hh, medleas->threads, lea); + free(lea); +} + +/** Halts the LEA send threads for ALL agencies + * + * @param medleas The set of LEA threads for this mediator. + */ +void mediator_disconnect_all_leas(mediator_lea_t *medleas) { + + lea_thread_state_t *lea, *tmp; + + /* Send a HALT message to every known LEA send thread */ + HASH_ITER(hh, medleas->threads, lea, tmp) { + lea_thread_msg_t end_msg; + memset(&end_msg, 0, sizeof(end_msg)); + end_msg.type = MED_LEA_MESSAGE_HALT; + end_msg.data = NULL; + libtrace_message_queue_put(&(lea->in_main), &end_msg); + } + + /* Now wait for each thread to exit cleanly and then tidy up any + * remaining state for each thread. + */ + HASH_ITER(hh, medleas->threads, lea, tmp) { + pthread_join(lea->tid, NULL); + libtrace_message_queue_destroy(&(lea->in_main)); + HASH_DELETE(hh, medleas->threads, lea); + free(lea); + } + +} + +/** Initialises the shared configuration for the LEA send threads + * + * @param config The shared configuration instance to be initialised + * @param rmqconf The RMQ configuration for the mediator + * @param mediatorid The ID number assigned to this mediator + * @param operatorid The operator ID configured for this mediator + * @param shortopid The short operator ID configured for this mediator + * @param pcapdir The directory to write pcap files into + * @param pcaptemplate The template to use when naming pcap files + * @param pcapcompress The compression level to use when writing pcap files + * @param pcaprotate The frequency to rotate pcap files, in minutes + * + */ +void init_med_agency_config(mediator_lea_config_t *config, + openli_RMQ_config_t *rmqconf, uint32_t mediatorid, char *operatorid, + char *shortopid, char *pcapdir, char *pcaptemplate, + uint8_t pcapcompress, uint32_t pcaprotate) { + + memset(config, 0, sizeof(mediator_lea_config_t)); + + config->rmqconf = rmqconf; + config->mediatorid = mediatorid; + if (operatorid) { + config->operatorid = strdup(operatorid); + } + if (shortopid) { + config->shortoperatorid = strdup(shortopid); + } + config->pcap_compress_level = pcapcompress; + config->pcap_rotate_frequency = pcaprotate; + if (pcapdir) { + config->pcap_dir = strdup(pcapdir); + } + if (pcaptemplate) { + config->pcap_outtemplate = strdup(pcaptemplate); + } + + pthread_mutex_init(&(config->mutex), NULL); +} + +/** Updates the shared configuration for the LEA send threads with new values + * + * @param config The shared configuration instance to be updated + * @param mediatorid The ID number assigned to this mediator + * @param operatorid The operator ID configured for this mediator + * @param shortopid The short operator ID configured for this mediator + * @param pcapdir The directory to write pcap files into + * @param pcaptemplate The template to use when naming pcap files + * @param pcapcompress The compression level to use when writing pcap files + * @param pcaprotate The frequency to rotate pcap files, in minutes + * + */ +void update_med_agency_config(mediator_lea_config_t *config, + uint32_t mediatorid, char *operatorid, + char *shortopid, char *pcapdir, char *pcaptemplate, + uint8_t pcapcompress, uint32_t pcaprotate) { + + pthread_mutex_lock(&(config->mutex)); + config->mediatorid = mediatorid; + config->pcap_compress_level = pcapcompress; + config->pcap_rotate_frequency = pcaprotate; + + if (config->operatorid) { + free(config->operatorid); + } + config->operatorid = strdup(operatorid); + + if (config->shortoperatorid) { + free(config->shortoperatorid); + } + config->shortoperatorid = strdup(shortopid); + + if (config->pcap_dir) { + free(config->pcap_dir); + } + config->pcap_dir = strdup(pcapdir); + + if (config->pcap_outtemplate) { + free(config->pcap_outtemplate); + } + config->pcap_outtemplate = strdup(pcaptemplate); + pthread_mutex_unlock(&(config->mutex)); + +} + +/** Destroys the shared configuration for the LEA send threads. + * + * @param config The shared configuration instance to be destroyed + */ +void destroy_med_agency_config(mediator_lea_config_t *config) { + if (config->operatorid) { + free(config->operatorid); + } + if (config->shortoperatorid) { + free(config->shortoperatorid); + } + if (config->pcap_dir) { + free(config->pcap_dir); + } + if (config->pcap_outtemplate) { + free(config->pcap_outtemplate); + } + pthread_mutex_destroy(&(config->mutex)); +} + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : + diff --git a/src/mediator/lea_send_thread.h b/src/mediator/lea_send_thread.h new file mode 100644 index 00000000..fb65b3e7 --- /dev/null +++ b/src/mediator/lea_send_thread.h @@ -0,0 +1,374 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#ifndef OPENLI_MEDIATOR_LEA_SEND_THREAD_H +#define OPENLI_MEDIATOR_LEA_SEND_THREAD_H + +#include +#include +#include "netcomms.h" +#include "openli_tls.h" +#include "med_epoll.h" +#include "handover.h" +#include "agency.h" +#include "liidmapping.h" + + +/** The code in this source file defines types and methods used by an + * "LEA send" thread for the OpenLI mediator. + * Each agency that is configured with the OpenLI provisioner will be + * handled using a separate instance of one of these threads. + * + * The core functionality of an LEA send thread is to: + * - establish the handovers to the agency for both HI2 and HI3. + * - consume any IRIs or CCs for LIIDs that belong to the agency from + * their respective internal RMQ queue, placing them in an export + * buffer for the corresponding handover. + * - send data from the export buffer over the handover socket, when the + * LEA end is able to receive data. + * - send periodic keepalives on each handover, as required. + */ + + + +/** Types of messages that can be sent to an LEA send thread by the main + * mediator thread + */ +enum { + + /** Disconnect and then re-connect the handovers for this agency */ + MED_LEA_MESSAGE_RECONNECT, + + /** Disconnect the handovers for this agency until further notice */ + MED_LEA_MESSAGE_DISCONNECT, + + /** Stop the thread for this agency as soon as possible */ + MED_LEA_MESSAGE_HALT, + + /** The configuration for the agency has changed, update accordingly */ + MED_LEA_MESSAGE_UPDATE_AGENCY, + + /** Disassociate an LIID with this agency */ + MED_LEA_MESSAGE_REMOVE_LIID, + + /** Announcement that an LIID has been associated with an agency */ + MED_LEA_MESSAGE_ADD_LIID, + + /** Shared configuration for the LEA send threads has changed, update + * local version accordingly + */ + MED_LEA_MESSAGE_RELOAD_CONFIG, + + /** An HI1 notification message needs to be sent to this agency */ + MED_LEA_MESSAGE_SEND_HI1_NOTIFICATION, + + /** The provisioner has disconnected, so start a shutdown timer for this + * agency thread. + */ + MED_LEA_MESSAGE_SHUTDOWN_TIMER, +}; + +/** Structure describing an LIID->agency association */ +typedef struct added_liid { + /** The LIID */ + char *liid; + /** The ID of the agency that the LIID is associated with */ + char *agencyid; +} added_liid_t; + +/** Message structure for the LEA send threads */ +typedef struct lea_thread_msg { + /** The message type, defined by the enum above */ + int type; + /** Additional data/context for the message -- actual type will vary + * depending on the message type. + */ + void *data; +} lea_thread_msg_t; + +/** Shared configuration for the LEA send threads -- all config values + * are derived from the mediator configuration file + */ +typedef struct mediator_lea_config { + /** Pointer to the RMQ configuration for the mediator */ + openli_RMQ_config_t *rmqconf; + + /** The mediator ID number */ + uint32_t mediatorid; + + /** The operator ID for the mediator */ + char *operatorid; + + /** The abbreviated operator ID for the mediator */ + char *shortoperatorid; + + /** The compression level to use when writing pcap files */ + uint8_t pcap_compress_level; + /** The template to use when deriving filenames for pcap files */ + char *pcap_outtemplate; + /** The directory to write pcap files into */ + char *pcap_dir; + /** The frequency (in minutes) to rotate pcap files */ + uint32_t pcap_rotate_frequency; + + /** A mutex to protect the shared config from race conditions */ + pthread_mutex_t mutex; +} mediator_lea_config_t; + +/** The local per-thread state for an LEA send thread */ +typedef struct lea_thread_state { + /** The pthread id number for the thread */ + pthread_t tid; + + /** The ID number for HI2 for this agency (HI3 is this number + 1) */ + uint32_t handover_id; + + /** The epoll file descriptor */ + int epoll_fd; + + /** The agency state instance describing the agency and its handovers */ + mediator_agency_t agency; + + /** The set of LIIDs that are associated with the agency */ + liid_map_t active_liids; + + /** The mediator ID number */ + uint32_t mediator_id; + + /** The operator ID for this mediator */ + char *operator_id; + /** The abbreviated operator ID for this mediator */ + char *short_operator_id; + + /** The compression level to use when writing pcap files */ + uint8_t pcap_compress_level; + /** The template to use when deriving filenames for pcap files */ + char *pcap_outtemplate; + /** The directory to write pcap files into */ + char *pcap_dir; + /** The frequency (in minutes) to rotate pcap files */ + uint32_t pcap_rotate_frequency; + + /** The queue for messages from the main mediator thread */ + libtrace_message_queue_t in_main; + + /** The shared configuration for all LEA threads */ + mediator_lea_config_t *parentconfig; + + /** The password to use to authenticate against the internal RMQ vhost */ + char *internalrmqpass; + /** The frequency at which this thread should perform RMQ maintenance + * tasks (in seconds) + */ + int rmq_hb_freq; + + /** The ID string for this agency */ + char *agencyid; + + /** Mediator epoll event for the shutdown timer */ + med_epoll_ev_t *shutdown_wait; + /** Mediator epoll event for the message queue checking timer */ + med_epoll_ev_t *timerev; + /** Mediator epoll event for the RMQ maintenance timer */ + med_epoll_ev_t *rmqhb; + /** Mediator epoll event for a timer to remove unconfirmed LIID mappings */ + med_epoll_ev_t *cleanse_liids; + + UT_hash_handle hh; + +} lea_thread_state_t; + +/** Structure to keep track of all LEA send threads within the main mediator + * thread. + */ +typedef struct mediator_leas { + /** The set of active LEA send threads */ + lea_thread_state_t *threads; + /** The shared configuration for the LEA send threads */ + mediator_lea_config_t config; + /** The ID number to assign to the next newly created LEA handover */ + uint32_t next_handover_id; +} mediator_lea_t; + +/** Functions called from the main mediator thread to start / manage LEA + * send threads. + */ + +/** Initialises and starts a new LEA send thread + * + * @param medleas The set of LEA threads for this mediator. + * @param lea The definition of the agency that the newly + * created thread will be sending records to. + * + * @return -1 if an error occurs, 1 if successful + */ +int mediator_start_agency_thread(mediator_lea_t *medleas, liagency_t *lea); + +/** Sends an UPDATE AGENCY message to an LEA send thread. + * + * @param thread The state object for the LEA send thread that is + * to receive the update agency message. + * @param lea The updated definition of the agency that the + * recipient thread corresponds to. + * + * @return 0 always + */ +int mediator_update_agency_thread(lea_thread_state_t *thread, liagency_t *lea); + +/** Halts an LEA send thread. + * + * @param medleas The set of LEA threads for this mediator. + * @param agencyid The ID of the agency whose LEA send thread is to + * be halted. + */ +void mediator_halt_agency_thread(mediator_lea_t *medleas, char *agencyid); + +/** Halts the LEA send threads for ALL agencies + * + * @param medleas The set of LEA threads for this mediator. + */ +void mediator_disconnect_all_leas(mediator_lea_t *medleas); + +/** Initialises the shared configuration for the LEA send threads + * + * @param config The shared configuration instance to be initialised + * @param rmqconf The RMQ configuration for the mediator + * @param mediatorid The ID number assigned to this mediator + * @param operatorid The operator ID configured for this mediator + * @param shortopid The short operator ID configured for this mediator + * @param pcapdir The directory to write pcap files into + * @param pcaptemplate The template to use when naming pcap files + * @param pcapcompress The compression level to use when writing pcap files + * @param pcaprotate The frequency to rotate pcap files, in minutes + * + */ +void init_med_agency_config(mediator_lea_config_t *config, + openli_RMQ_config_t *rmqconf, uint32_t mediatorid, char *operatorid, + char *shortopid, char *pcapdir, char *pcaptemplate, + uint8_t pcapcompress, uint32_t pcaprotate); + +/** Updates the shared configuration for the LEA send threads with new values + * + * @param config The shared configuration instance to be updated + * @param mediatorid The ID number assigned to this mediator + * @param operatorid The operator ID configured for this mediator + * @param shortopid The short operator ID configured for this mediator + * @param pcapdir The directory to write pcap files into + * @param pcaptemplate The template to use when naming pcap files + * @param pcapcompress The compression level to use when writing pcap files + * @param pcaprotate The frequency to rotate pcap files, in minutes + * + */ +void update_med_agency_config(mediator_lea_config_t *config, + uint32_t mediatorid, char *operatorid, + char *shortopid, char *pcapdir, char *pcaptemplate, + uint8_t pcapcompress, uint32_t pcaprotate); + +/** Destroys the shared configuration for the LEA send threads. + * + * @param config The shared configuration instance to be destroyed + */ +void destroy_med_agency_config(mediator_lea_config_t *config); + +/** Functions used by both LEA send threads and pcap threads */ + +/** Handle any outstanding heartbeats for this thread's RMQ connections and + * tidy up any unused RMQ internal queues. + * + * Should be called periodically using a epoll timer event. + * + * @param state The state object for this LEA send thread + * @param mev The mediator epoll timer event that fired to trigger + * this function being called + * + * @return 0 if the triggering timer is unable to be reset, 1 otherwise. + */ +int agency_thread_action_rmqcheck_timer(lea_thread_state_t *state, + med_epoll_ev_t *mev); + +/** Loops over the set of known LIIDs and withdraws any that have not been + * confirmed by the provisioner since it last (re-)connected. + * + * Should be called via a epoll timer event set for some amount of time + * after a provisioner has re-connected to the main mediator thread. + * + * @param state The state object for this LEA send thread + * + * @return 0 always + */ +int agency_thread_action_cease_liid_timer(lea_thread_state_t *state); + +/** Updates local copies of configuration variables to match the shared + * version of the configuration managed by the main mediator thread. + * + * @param state The state object for the LEA send thread + * @return 1 if the RMQ internal password has changed (so all RMQ + * local connections should be restarted, 0 otherwise. + + */ +int read_parent_config(lea_thread_state_t *state); + +/** Declares and initialises the mediator epoll timer events that are + * used by an LEA send thread (or a pcap writer thread). + * + * @param state The state object for the LEA send thread + * + * @return -1 if an error occurs, 1 otherwise + */ +int create_agency_thread_timers(lea_thread_state_t *state); + +/** Tidy up the state object for an LEA send thread, freeing all allocated + * memory and closing any open sockets. + * + * @param state The state object for the LEA send thread + */ +void destroy_agency_thread_state(lea_thread_state_t *state); + +/** Disables an LIID for an LEA send thread. + * + * @param state The state object for the LEA send thread + * @param liid The LIID to disable + * + * @return 1 if successful, 0 if the LIID was not in this thread's LIID set. + */ +int purge_lea_liid_mapping(lea_thread_state_t *state, char *liid); + +/** Adds an LIID to the LIID set for an LEA send thread. + * + * Also registers the corresponding RMQ internal queues with the agency + * handovers so records for that LIID will be consumed by this thread. + * + * @param state The state object for the LEA send thread + * @param liid The LIID to associate with this agency + * + * @return 1 if successful, 0 if the LIID was already in this thread's LIID + * set, -1 if an error occurs. + */ +int insert_lea_liid_mapping(lea_thread_state_t *state, char *liid); + +#endif + +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/liidmapping.c b/src/mediator/liidmapping.c index 1ca52270..8d8518d3 100644 --- a/src/mediator/liidmapping.c +++ b/src/mediator/liidmapping.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -25,8 +25,8 @@ */ #include +#include #include "liidmapping.h" -#include "med_epoll.h" #include "logger.h" /** Finds an LIID in an LIID map and returns its corresponding agency @@ -47,114 +47,171 @@ liid_map_entry_t *lookup_liid_agency_mapping(liid_map_t *map, char *liidstr) { return (liid_map_entry_t *)(*jval); } -/** Removes an LIID->agency mapping from an LIID map. +/** Frees any memory allocated for an LIID->agency mapping + * + * @param m The LIID map entry to be freed + */ +void destroy_liid_mapping(liid_map_entry_t *m) { + free(m->liid); + free(m); +} + +/** Removes an LIID->agency mapping from an LIID map and frees any memory + * allocated for that mapping. * * @param map The LIID map to remove the mapping from - * @param liidstr The LIID that is to be removed from the map (as a - * string) + * @param m The LIID map entry to be removed * */ -void remove_liid_agency_mapping(liid_map_t *map, char *liidstr) { +void remove_liid_agency_mapping(liid_map_t *map, liid_map_entry_t *m) { int err; - logger(LOG_DEBUG, "OpenLI Mediator: removed agency mapping for LIID %s.", - liidstr); - JSLD(err, map->liid_array, (unsigned char *)liidstr); + JSLD(err, map->liid_array, (unsigned char *)m->liid); + destroy_liid_mapping(m); +} + +/** Flags an LIID->agency mapping as withdrawn. + * + * Withdrawn mappings are deleted (along with their corresponding queues) + * once any outstanding messages in their queue have been processed. + * + * @param map The LIID map to search for the mapping + * @param liidstr The LIID to be withdrawn + * + */ +void withdraw_liid_agency_mapping(liid_map_t *map, char *liidstr) { + PWord_t jval; + liid_map_entry_t *m; + + JSLG(jval, map->liid_array, (unsigned char *)liidstr); + if (jval == NULL) { + return; + } + + m = (liid_map_entry_t *)(*jval); + m->withdrawn = 1; + + logger(LOG_INFO, + "OpenLI Mediator: flagged agency mapping for LIID %s as withdrawn.", + m->liid); } /** Adds a new LIID->agency mapping to an LIID map. * * @param map The LIID map to add the new mapping to * @param liidstr The LIID for the new mapping (as a string) - * @param agency The agency that requested the LIID * - * @return -1 if an error occurs, 0 if the addition is successful, 1 if - * the pcap thread needs to know that this LIID is no longer - * being written to disk + * @return -1 if an error occurs, 0 if the LIID already existed and was not + * withdrawn, 1 if a new LIID->agency mapping was created or an + * existing mapping has been reactivated. */ -int add_liid_agency_mapping(liid_map_t *map, char *liidstr, - mediator_agency_t *agency) { +int add_liid_agency_mapping(liid_map_t *map, char *liidstr) { PWord_t jval; liid_map_entry_t *m; - int err; - int ret = 0; JSLG(jval, map->liid_array, (unsigned char *)liidstr); if (jval != NULL) { + int ret = 0; + /* We've seen this LIID before? Possibly a re-announcement? */ m = (liid_map_entry_t *)(*jval); - if (m->ceasetimer) { - /* was scheduled to be ceased, so halt the timer */ - halt_mediator_timer(m->ceasetimer); - } - - if (m->agency == NULL && agency != NULL) { - /* this LIID used to be written to pcap, now we need to - * tell the pcap thread to stop creating files for it - */ + /* If it was withdrawn, reset it to being active */ + if (m->withdrawn != 0) { + m->withdrawn = 0; ret = 1; + } else { + ret = 0; } - free(m->liid); - } else { - /* Create a new entry in the mapping array */ - JSLI(jval, map->liid_array, (unsigned char *)liidstr); - if (jval == NULL) { - logger(LOG_INFO, "OpenLI Mediator: OOM when allocating memory for new LIID."); - return -1; - } + m->unconfirmed = 0; + m->ccqueue_deleted = 0; + m->iriqueue_deleted = 0; + return ret; + } - m = (liid_map_entry_t *)malloc(sizeof(liid_map_entry_t)); - if (m == NULL) { - logger(LOG_INFO, "OpenLI Mediator: OOM when allocating memory for new LIID."); - return -1; - } - *jval = (Word_t)m; - - /* If this was previously a "unknown" LIID, we can now remove - * it from our missing LIID list -- if it gets withdrawn later, - * we will then alert again about it being missing. */ - JSLG(jval, map->missing_liids, (unsigned char *)liidstr); - if (jval != NULL) { - JSLD(err, map->missing_liids, (unsigned char *)liidstr); - } + /* Create a new entry in the mapping array */ + JSLI(jval, map->liid_array, (unsigned char *)liidstr); + if (jval == NULL) { + logger(LOG_INFO, "OpenLI Mediator: OOM when allocating memory for new LIID."); + return -1; } - m->liid = liidstr; - m->agency = agency; - m->ceasetimer = NULL; - - if (agency) { - logger(LOG_DEBUG, "OpenLI Mediator: added %s -> %s to LIID map", - m->liid, m->agency->agencyid); - } else { - logger(LOG_INFO, "OpenLI Mediator: added %s -> pcapdisk to LIID map", - m->liid); + + m = (liid_map_entry_t *)calloc(1, sizeof(liid_map_entry_t)); + if (m == NULL) { + logger(LOG_INFO, "OpenLI Mediator: OOM when allocating memory for new LIID."); + return -1; } - return ret; + *jval = (Word_t)m; + + m->withdrawn = 0; + m->unconfirmed = 0; + m->liid = strdup(liidstr); + m->ccqueue_deleted = 0; + m->iriqueue_deleted = 0; + + return 1; +} + +/** Callback method for setting the "unconfirmed" flag for an LIID map + * entry. Designed for use in combination with foreach_liid_agency_mapping(). + * + * @param m The LIID map entry to be marked as unconfirmed + * @param arg A user-provided argument (unused) + * + * @return 0 always + */ +int set_liid_as_unconfirmed(liid_map_entry_t *m, void *arg) { + m->unconfirmed = 1; + return 0; } -/** Adds an LIID to the set of LIIDs without agencies in an LIID map. +/** Runs a user-provided function against all LIIDs in the map. + * + * @param map The map to iterate over + * @param arg A user-provided argument that will be passed into each + * invocation of the user function + * @param torun The function to run for each existing LIID. * - * @param map The LIID map to add the missing LIID to - * @param liidstr The LIID that has no corresponding agency (as a string) + * The torun() function must accept two arguments: + * - an liid_map_entry_t * that will point to an LIID mapping + * - a void * that will point to the user-provided argument * - * @return -1 if an error occurs (e.g. OOM), 0 if successful. + * The torun() function must return 1 if the mapping should be deleted after + * the function has completed, or 0 if it should be retained in the map. */ -int add_missing_liid(liid_map_t *map, char *liidstr) { +int foreach_liid_agency_mapping(liid_map_t *map, void *arg, + int (*torun)(liid_map_entry_t *, void *)) { + + unsigned char index[1024]; + liid_map_entry_t *m; PWord_t jval; + int r, jrc; + int err = 0; - JSLI(jval, map->missing_liids, (unsigned char *)liidstr); - if (jval == NULL) { - logger(LOG_INFO, "OpenLI Mediator: OOM when allocating memory for missing LIID."); - return -1; - } + index[0] = '\0'; - if ((*jval) == 0) { - logger(LOG_INFO, "OpenLI Mediator: was unable to find LIID %s in its set of mappings.", liidstr); - } + /* Iterate all known LIIDs */ + JSLF(jval, map->liid_array, index); + while (jval != NULL) { + m = (liid_map_entry_t *)(*jval); - (*jval) = 1; + if (m) { + r = torun(m, arg); + if (r == 1) { + /* if torun() returns 1, delete the mapping */ + JSLD(jrc, map->liid_array, index); + destroy_liid_mapping(m); + } else if (r == -1) { + err = 1; + } + } + JSLN(jval, map->liid_array, index); + } + + if (err) { + return -1; + } return 0; } @@ -175,27 +232,10 @@ void purge_liid_map(liid_map_t *map) { JSLF(jval, map->liid_array, index); while (jval != NULL) { m = (liid_map_entry_t *)(*jval); - - /* If we had a timer running for the removal of a withdrawn LIID, - * make sure we stop that cleanly. - */ - if (m->ceasetimer) { - destroy_mediator_timer(m->ceasetimer); - } - JSLN(jval, map->liid_array, index); - free(m->liid); - free(m); + JSLN(jval, map->liid_array, index); + destroy_liid_mapping(m); } JSLFA(bytes, map->liid_array); } -/** Removes all entries from the missing LIID map - * - * @param map The LIID map to purge missing LIIDs from - */ -void purge_missing_liids(liid_map_t *map) { - Word_t bytes; - - JSLFA(bytes, map->missing_liids); -} // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/liidmapping.h b/src/mediator/liidmapping.h index dfd63e93..fe123df8 100644 --- a/src/mediator/liidmapping.h +++ b/src/mediator/liidmapping.h @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -28,8 +28,7 @@ #define OPENLI_LIID_AGENCY_MAPPING_H_ #include -#include "med_epoll.h" -#include "handover.h" +#include typedef struct liidmapping liid_map_entry_t; @@ -40,19 +39,31 @@ struct liidmapping { /** The LIID, as a string */ char *liid; - /** The agency that should receive this LIID */ - mediator_agency_t *agency; + /** Flag that indicates whether this mapping is unconfirmed by the + * provisioner. + */ + uint8_t unconfirmed; - /** The epoll timer event for a scheduled removal of this mapping */ - med_epoll_ev_t *ceasetimer; + /** Flag that indicates whether this mapping has been withdrawn by the + * provisioner. + */ + uint8_t withdrawn; + + /** Flag that indicates whether the internal CC queue for this LIID has + * been deleted by the mediator. + */ + uint8_t ccqueue_deleted; + + /** Flag that indicates whether the internal IRI queue for this LIID has + * been deleted by the mediator. + */ + uint8_t iriqueue_deleted; }; /** The map used to track which LIIDs should be sent to which agencies */ typedef struct liid_map { /** A map of known LIID->agency mappings */ Pvoid_t liid_array; - /** A set of LIIDs which have no known corresponding agency (yet) */ - Pvoid_t missing_liids; } liid_map_t; /** Finds an LIID in an LIID map and returns its corresponding agency @@ -65,36 +76,54 @@ typedef struct liid_map { */ liid_map_entry_t *lookup_liid_agency_mapping(liid_map_t *map, char *liidstr); -/** Adds an LIID to the set of LIIDs without agencies in an LIID map. - * - * @param map The LIID map to add the missing LIID to - * @param liidstr The LIID that has no corresponding agency (as a string) - * - * @return -1 if an error occurs (e.g. OOM), 0 if successful. - */ -int add_missing_liid(liid_map_t *map, char *liidstr); - /** Removes an LIID->agency mapping from an LIID map. * * @param map The LIID map to remove the mapping from - * @param liidstr The LIID that is to be removed from the map (as a - * string) + * @param m The LIID map entry to be removed * */ -void remove_liid_agency_mapping(liid_map_t *map, char *liidstr); +void remove_liid_agency_mapping(liid_map_t *map, liid_map_entry_t *m); /** Adds a new LIID->agency mapping to an LIID map. * * @param map The LIID map to add the new mapping to * @param liidstr The LIID for the new mapping (as a string) - * @param agency The agency that requested the LIID * - * @return -1 if an error occurs, 0 if the addition is successful, 1 if - * the pcap thread needs to know that this LIID is no longer - * being written to disk + * @return -1 if an error occurs, 0 if the LIID already existed, 1 if + * a new LIID->agency mapping was created + */ +int add_liid_agency_mapping(liid_map_t *map, char *liidstr); + +/** Flags an LIID->agency mapping as withdrawn. + * + * Withdrawn mappings are deleted (along with their corresponding queues) + * once any outstanding messages in their queue have been processed. + * + * @param map The LIID map to search for the mapping + * @param liidstr The LIID to be withdrawn + * */ -int add_liid_agency_mapping(liid_map_t *map, char *liidstr, - mediator_agency_t *agency); +void withdraw_liid_agency_mapping(liid_map_t *map, char *liidstr); + +/** Runs a user-provided function against all LIIDs in the map. + * + * @param map The map to iterate over + * @param arg A user-provided argument that will be passed into each + * invocation of the user function + * @param torun The function to run for each existing LIID. + * + * The torun() function must accept two arguments: + * - an liid_map_entry_t * that will point to an LIID mapping + * - a void * that will point to the user-provided argument + * + * The torun() function must return 1 if the mapping should be deleted after + * the function has completed, or 0 if it should be retained in the map. + * + * @return -1 if any of the function iterations return -1 (i.e. an error). + * Otherwise will return 0. + */ +int foreach_liid_agency_mapping(liid_map_t *map, void *arg, + int (*torun)(liid_map_entry_t *, void *)); /** Removes all current LIID->agency mappings from the LIID map. * @@ -102,11 +131,15 @@ int add_liid_agency_mapping(liid_map_t *map, char *liidstr, */ void purge_liid_map(liid_map_t *map); -/** Removes all entries from the missing LIID map +/** Callback method for setting the "unconfirmed" flag for an LIID map + * entry. Designed for use in combination with foreach_liid_agency_mapping(). + * + * @param m The LIID map entry to be marked as unconfirmed + * @param arg A user-provided argument (unused) * - * @param map The LIID map to purge missing LIIDs from + * @return 0 always */ -void purge_missing_liids(liid_map_t *map); +int set_liid_as_unconfirmed(liid_map_entry_t *m, void *arg); #endif // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/med_epoll.c b/src/mediator/med_epoll.c index 69198e90..2943f259 100644 --- a/src/mediator/med_epoll.c +++ b/src/mediator/med_epoll.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -30,7 +30,7 @@ #include "med_epoll.h" #include "util.h" -/** Starts an existing timer and adds it to the global epoll event set. +/** Starts an existing timer and adds it to an epoll event set. * * Examples of timers that would use this function: * - sending the next keep alive to a handover @@ -56,6 +56,10 @@ int start_mediator_timer(med_epoll_ev_t *timerev, int timeoutval) { return 0; } + if (timeoutval == 0) { + return 0; + } + if ((sock = epoll_add_timer(timerev->epoll_fd, timeoutval, timerev)) == -1) { return -1; @@ -65,6 +69,47 @@ int start_mediator_timer(med_epoll_ev_t *timerev, int timeoutval) { return 0; } +/** Starts an existing timer and adds it to the global epoll event set. + * + * Examples of timers that would use this function: + * - sending the next keep alive to a handover + * - attempting to reconnect to a lost provisioner + * - deciding that a handover has failed to respond to a keep alive + * + * Only call this on timers that have had their state and epoll_fd + * members already set via a call to create_mediator_timer(). + * + * Use this method for timers where you require millisecond precision. + * + * @param timerev The mediator epoll event for the timer. + * @param timeoutval The number of milliseconds to wait before triggering the + * timer event. + * + * @return -1 if an error occured, 0 otherwise (including not setting + * a timer because the timer is disabled). + */ +int start_mediator_ms_timer(med_epoll_ev_t *timerev, int timeoutval) { + + int sock; + + /* Timer is disabled, ignore */ + if (timerev == NULL) { + return 0; + } + + if (timeoutval == 0) { + return 0; + } + + if ((sock = epoll_add_ms_timer(timerev->epoll_fd, timeoutval, + timerev)) == -1) { + return -1; + } + + timerev->fd = sock; + return 0; +} + /** Halts a timer and removes it from the global epoll event set. * * This function applies to the same timers that start_mediator_timer @@ -86,6 +131,10 @@ int halt_mediator_timer(med_epoll_ev_t *timerev) { return 0; } + if (timerev->fd == -1) { + return 0; + } + if (epoll_ctl(timerev->epoll_fd, EPOLL_CTL_DEL, timerev->fd, &ev) == -1) { return -1; } diff --git a/src/mediator/med_epoll.h b/src/mediator/med_epoll.h index ac18cf0f..b30798de 100644 --- a/src/mediator/med_epoll.h +++ b/src/mediator/med_epoll.h @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -28,6 +28,7 @@ #define OPENLI_MEDIATOR_EPOLL_H_ #include +#include /** Structure that stores state for a single epoll event */ typedef struct med_epoll_ev { @@ -91,6 +92,16 @@ enum { /** The mediator needs to send heartbeats to the RabbitMQ connections */ MED_EPOLL_RMQCHECK_TIMER, + + /** A timer for regularly cleaning up unused LIIDs in the collector + * receiver threads + */ + MED_EPOLL_QUEUE_EXPIRE_TIMER, + + /** A timer for shutting down unused LEA threads if the provisioner + * has disconnected, or failed to re-announce them after reconnecting + */ + MED_EPOLL_SHUTDOWN_LEA_THREAD, }; /** Starts an existing timer and adds it to the global epoll event set. @@ -112,6 +123,27 @@ enum { */ int start_mediator_timer(med_epoll_ev_t *timerev, int timeoutval); +/** Starts an existing timer and adds it to the global epoll event set. + * + * Examples of timers that would use this function: + * - sending the next keep alive to a handover + * - attempting to reconnect to a lost provisioner + * - deciding that a handover has failed to respond to a keep alive + * + * Only call this on timers that have had their state and epoll_fd + * members already set via a call to create_mediator_timer(). + * + * Use this method for timers where you require millisecond precision. + * + * @param timerev The mediator epoll event for the timer. + * @param timeoutval The number of milliseconds to wait before triggering the + * timer event. + * + * @return -1 if an error occured, 0 otherwise (including not setting + * a timer because the timer is disabled). + */ +int start_mediator_ms_timer(med_epoll_ev_t *timerev, int timeoutval); + /** Halts a timer and removes it from the global epoll event set. * * This function applies to the same timers that start_mediator_timer diff --git a/src/mediator/mediator.c b/src/mediator/mediator.c index ae2c94ee..fc53fa34 100644 --- a/src/mediator/mediator.c +++ b/src/mediator/mediator.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -57,8 +57,11 @@ #include "handover.h" #include "med_epoll.h" #include "pcapthread.h" +#include "coll_recv_thread.h" +#include "lea_send_thread.h" -#define AMPQ_BYTES_FROM(x) (amqp_bytes_t){.len=sizeof(x),.bytes=&x} +/** This file implements the "main" thread for an OpenLI mediator. + */ /** Flag used to indicate that the mediator is being halted, usually due * to a signal or a fatal error. @@ -129,6 +132,9 @@ static void clear_med_config(mediator_state_t *state) { if (state->RMQ_conf.pass) { free(state->RMQ_conf.pass); } + if (state->RMQ_conf.internalpass) { + free(state->RMQ_conf.internalpass); + } free_ssl_config(&(state->sslconf)); } @@ -139,21 +145,11 @@ static void clear_med_config(mediator_state_t *state) { */ static void destroy_med_state(mediator_state_t *state) { - /* Remove all known LIIDs */ - purge_liid_map(&(state->liidmap)); - - /* Clean up the list of "unknown" LIIDs */ - purge_missing_liids(&(state->liidmap)); - /* Tear down the connection to the provisioner */ free_provisioner(&(state->provisioner)); - destroy_med_collector_state(&(state->collectors)); - - /* Delete all of the agencies and shut down any active handovers */ - drop_all_agencies(&(state->handover_state)); - - libtrace_list_deinit(state->handover_state.agencies); + destroy_med_collector_config(&(state->collector_threads.config)); + destroy_med_agency_config(&(state->agency_threads.config)); /* Close the main epoll file descriptor */ if (state->epoll_fd != -1) { @@ -178,93 +174,21 @@ static void destroy_med_state(mediator_state_t *state) { free(state->timerev); } - if (state->RMQtimerev) { - if (state->RMQtimerev->fd != -1) { - close(state->RMQtimerev->fd); - } - free(state->RMQtimerev); - } - - pthread_join(state->pcapthread, NULL); - - /* Halt the pcap file rotation timer */ - if (state->pcaptimerev) { - if (state->pcaptimerev->fd != -1) { - close(state->pcaptimerev->fd); - } - free(state->pcaptimerev); - } - - /* Clean up the message queue for packets to be written as pcap */ - libtrace_message_queue_destroy(&(state->pcapqueue)); - - /* Wait for the thread that keeps the handovers up to stop */ - pthread_mutex_lock(state->handover_state.agency_mutex); - if (state->handover_state.connectthread != -1) { - pthread_mutex_unlock(state->handover_state.agency_mutex); - pthread_join(state->handover_state.connectthread, NULL); - } else { - pthread_mutex_unlock(state->handover_state.agency_mutex); - } - - pthread_mutex_destroy(state->handover_state.agency_mutex); - free(state->handover_state.agency_mutex); } -/** Sends the current pcap output configuration to the pcap writing thread +/** Reads the configuration for a mediator instance and sets the relevant + * members of the global state structure accordingly. * - * @param medstate The global state for this mediator instance - */ -static inline void update_pcap_msg_thread(mediator_state_t *medstate) { - mediator_pcap_msg_t pcapmsg; - - memset(&pcapmsg, 0, sizeof(pcapmsg)); - pcapmsg.msgtype = PCAP_MESSAGE_CHANGE_DIR; - if (medstate->pcapdirectory != NULL) { - pcapmsg.msgbody = (uint8_t *)strdup(medstate->pcapdirectory); - pcapmsg.msglen = strlen(medstate->pcapdirectory); - } else { - pcapmsg.msgbody = NULL; - pcapmsg.msglen = 0; - } - libtrace_message_queue_put(&(medstate->pcapqueue), &pcapmsg); - - memset(&pcapmsg, 0, sizeof(pcapmsg)); - pcapmsg.msgtype = PCAP_MESSAGE_CHANGE_TEMPLATE; - if (medstate->pcaptemplate != NULL) { - pcapmsg.msgbody = (uint8_t *)strdup(medstate->pcaptemplate); - pcapmsg.msglen = strlen(medstate->pcaptemplate); - } else { - pcapmsg.msgbody = NULL; - pcapmsg.msglen = 0; - } - libtrace_message_queue_put(&(medstate->pcapqueue), &pcapmsg); - - memset(&pcapmsg, 0, sizeof(pcapmsg)); - pcapmsg.msgtype = PCAP_MESSAGE_CHANGE_COMPRESS; - pcapmsg.msgbody = (uint8_t *)&(medstate->pcapcompress); - pcapmsg.msglen = sizeof(medstate->pcapcompress); - libtrace_message_queue_put(&(medstate->pcapqueue), &pcapmsg); -} - -/** Initialises the global state for a mediator instance. - * - * This includes parsing the provided configuration file and setting - * the corresponding fields in the global state structure. - * - * This method is also run whenver a config reload is triggered by the - * user, so some state members are initialised later on to avoid - * unnecessary duplicate allocations -- see prepare_mediator_state() for - * more details. - * - * @param state The global state to be initialised + * @param state The global state to be initialised with configuration * @param configfile The path to the configuration file * * @return -1 if an error occurs, 0 otherwise */ -static int init_med_state(mediator_state_t *state, char *configfile) { - state->mediatorid = 0; +static int init_mediator_config(mediator_state_t *state, + char *configfile) { + state->conffile = configfile; + state->mediatorid = 0; state->listenaddr = NULL; state->listenport = NULL; state->etsitls = 1; @@ -276,6 +200,7 @@ static int init_med_state(mediator_state_t *state, char *configfile) { state->RMQ_conf.name = NULL; state->RMQ_conf.pass = NULL; + state->RMQ_conf.internalpass = NULL; state->RMQ_conf.hostname = NULL; state->RMQ_conf.port = 0; state->RMQ_conf.heartbeatFreq = 0; @@ -287,40 +212,82 @@ static int init_med_state(mediator_state_t *state, char *configfile) { state->pcapdirectory = NULL; state->pcaptemplate = NULL; state->pcapcompress = 1; - state->pcapthread = -1; state->pcaprotatefreq = 30; - state->listenerev = NULL; - state->timerev = NULL; - state->pcaptimerev = NULL; - state->epoll_fd = -1; - state->handover_state.epoll_fd = -1; - state->handover_state.agencies = NULL; - state->handover_state.halt_flag = 0; - state->handover_state.agency_mutex = calloc(1, sizeof(pthread_mutex_t)); - state->handover_state.connectthread = -1; - state->handover_state.next_handover_id = 1; + /* Parse the provided config file */ + if (parse_mediator_config(configfile, state) == -1) { + return -1; + } - pthread_mutex_init(state->handover_state.agency_mutex, NULL); + if (state->RMQ_conf.internalpass == NULL) { + /* First, try to read a password from /etc/openli/rmqinternalpass */ + FILE *f = fopen("/etc/openli/rmqinternalpass", "r"); + char line[2048]; + if (f != NULL) { + if (fgets(line, 2048, f) != NULL) { + if (line[strlen(line) - 1] == '\n') { + line[strlen(line) - 1] = '\0'; + } + state->RMQ_conf.internalpass = strdup(line); + } + } + /* If we can't do that, throw an error */ + if (state->RMQ_conf.internalpass == NULL) { + logger(LOG_ERR, "OpenLI mediator: unable to determine password for internal RMQ vhost -- mediator must exit"); + return -1; + } + } + if (state->shortoperatorid == NULL) { + if (state->operatorid != NULL) { + state->shortoperatorid = strndup(state->operatorid, 5); + } else { + state->shortoperatorid = strdup("?????"); + } + } - state->liidmap.liid_array = NULL; - state->liidmap.missing_liids = NULL; + if (state->operatorid == NULL) { + state->operatorid = strdup("unspecified"); + } + return 0; +} - libtrace_message_queue_init(&(state->pcapqueue), - sizeof(mediator_pcap_msg_t)); +/** Initialises the global state for a mediator instance. + * + * This includes parsing the provided configuration file and setting + * the corresponding fields in the global state structure. + * + * @param state The global state to be initialised + * @param configfile The path to the configuration file + * + * @return -1 if an error occurs, 0 otherwise + */ +static int init_med_state(mediator_state_t *state, char *configfile) { + state->listenerev = NULL; + state->timerev = NULL; + state->epoll_fd = -1; init_provisioner_instance(&(state->provisioner), &(state->sslconf.ctx)); - /* Parse the provided config file */ - if (parse_mediator_config(configfile, state) == -1) { + if (init_mediator_config(state, configfile) < 0) { return -1; } if (create_ssl_context(&(state->sslconf)) < 0) { return -1; } + /* Initialise state and config for the LEA send threads */ + state->agency_threads.threads = NULL; + state->agency_threads.next_handover_id = 0; + init_med_agency_config(&(state->agency_threads.config), + &(state->RMQ_conf), state->mediatorid, state->operatorid, + state->shortoperatorid, + state->pcapdirectory, state->pcaptemplate, state->pcapcompress, + state->pcaprotatefreq); - init_med_collector_state(&(state->collectors), &(state->etsitls), + /* Initialise state and config for the collector receive threads */ + state->collector_threads.threads = NULL; + init_med_collector_config(&(state->collector_threads.config), + state->etsitls, &(state->sslconf), &(state->RMQ_conf), state->mediatorid); logger(LOG_DEBUG, "OpenLI Mediator: ETSI TLS encryption %s", @@ -340,18 +307,6 @@ static int init_med_state(mediator_state_t *state, char *configfile) { state->provisioner.provport = strdup("8993"); } - if (state->shortoperatorid == NULL) { - if (state->operatorid != NULL) { - state->shortoperatorid = strndup(state->operatorid, 5); - } else { - state->shortoperatorid = strdup("?????"); - } - } - - if (state->operatorid == NULL) { - state->operatorid = strdup("unspecified"); - } - return 0; } @@ -372,13 +327,8 @@ static void prepare_mediator_state(mediator_state_t *state) { state->epoll_fd = epoll_create1(0); - state->handover_state.agencies = libtrace_list_init(sizeof(mediator_agency_t)); - state->handover_state.epoll_fd = state->epoll_fd; state->provisioner.epoll_fd = state->epoll_fd; - state->collectors.epoll_fd = state->epoll_fd; - state->collectors.collectors = - libtrace_list_init(sizeof(active_collector_t *)); - + /* Use an fd to catch signals during our main epoll loop, so that we * can provide our own signal handling without causing epoll_wait to * return EINTR. @@ -397,135 +347,90 @@ static void prepare_mediator_state(mediator_state_t *state) { return; } -/** Creates a flush or rotate message and sends it to the pcap writing thread. - * - * On its own, the pcap trace output would be flushed intermittently - * which often gives the impression that no packets are being captured. - * In reality, they are captured but are sitting in a buffer in memory - * rather than being written to disk. +/** Updates the shared configuration for the LEA send threads and tells those + * threads to update their own local copies of this configuration. * - * This method will also cause pcap files to be closed and rotated on a - * regular basis as pcap tools tend to have issues working with incomplete - * files -- regular file rotation means that only the file with the most - * recent packets will be incomplete; the others can be given to LEAs. + * @param state The global state for this mediator */ -static int trigger_pcap_flush(mediator_state_t *state, med_epoll_ev_t *mev) { +static void update_lea_thread_config(mediator_state_t *state) { + lea_thread_state_t *lea_t, *tmp; - mediator_pcap_msg_t pmsg; - struct timeval tv; + lea_thread_msg_t msg; - memset(&pmsg, 0, sizeof(pmsg)); - gettimeofday(&tv, NULL); + update_med_agency_config(&(state->agency_threads.config), + state->mediatorid, state->operatorid, + state->shortoperatorid, + state->pcapdirectory, state->pcaptemplate, state->pcapcompress, + state->pcaprotatefreq); - /* Check if we should be rotating -- the time check here is fairly coarse - * because we cannot guarantee that this event will be triggered in the - * exact second that the rotation should happen. - */ - if (tv.tv_sec % (60 * state->pcaprotatefreq) < 60) { - pmsg.msgtype = PCAP_MESSAGE_ROTATE; - } else { - /* Otherwise, just get the thread to flush any outstanding output */ - pmsg.msgtype = PCAP_MESSAGE_FLUSH; + /* Send the "reload your config" message to every LEA thread */ + memset(&msg, 0, sizeof(msg)); + + HASH_ITER(hh, state->agency_threads.threads, lea_t, tmp) { + msg.type = MED_LEA_MESSAGE_RELOAD_CONFIG; + msg.data = NULL; + libtrace_message_queue_put(&(lea_t->in_main), &msg); } - pmsg.msgbody = NULL; - pmsg.msglen = 0; - libtrace_message_queue_put(&(state->pcapqueue), (void *)(&pmsg)); +} - /* Restart the timer */ - if (halt_mediator_timer(mev) < 0) { - /* don't care? */ - } +static void update_coll_recv_thread_config(mediator_state_t *state) { + coll_recv_t *col_t, *tmp; + col_thread_msg_t msg; - if (start_mediator_timer(state->pcaptimerev, 60) < 0) { - logger(LOG_INFO, - "OpenLI Mediator: failed to create pcap rotation timer"); - return -1; + update_med_collector_config(&(state->collector_threads.config), + state->etsitls, state->mediatorid); + + /* Send the "reload your config" message to every collector thread */ + memset(&msg, 0, sizeof(msg)); + + HASH_ITER(hh, state->collector_threads.threads, col_t, tmp) { + msg.type = MED_COLL_MESSAGE_RELOAD; + libtrace_message_queue_put(&(col_t->in_main), &msg); } - return 0; } -/** Creates and sends a keep-alive message over a handover + +/** Tells every LEA send thread to start a shutdown timer. * - * @param state The global state for this mediator - * @param mev The epoll event for the keepalive timer that has fired + * This method should be called whenever we lose our connection to the + * provisioner. * - * @return -1 if an error occurs, 0 otherwise + * @param state The global state for this mediator + * @param timeout The number of seconds to set the shutdown timer for */ -static int trigger_keepalive(mediator_state_t *state, med_epoll_ev_t *mev) { +static void trigger_lea_thread_shutdown_timers(mediator_state_t *state, + uint16_t timeout) { - handover_t *ho = (handover_t *)(mev->state); - wandder_encoded_result_t *kamsg; - wandder_etsipshdr_data_t hdrdata; - char elemstring[16]; - char liidstring[24]; + lea_thread_state_t *lea_t, *tmp; + lea_thread_msg_t msg; - if (ho->outev == NULL) { - return 0; - } + memset(&msg, 0, sizeof(msg)); - if (ho->ho_state->pending_ka == NULL && - get_buffered_amount(&(ho->ho_state->buf)) == 0) { - /* Only create a new KA message if we have sent the last one we - * had queued up. - * Also only create one if we don't already have data to send. We - * should only be sending keep alives if the socket is idle. - */ - if (ho->ho_state->encoder == NULL) { - ho->ho_state->encoder = init_wandder_encoder(); - } else { - reset_wandder_encoder(ho->ho_state->encoder); - } - - /* Include the OpenLI version in the LIID field, so the LEAs can - * identify which version of the software is being used by the - * sender. - */ - /* PACKAGE_NAME and PACKAGE_VERSION come from config.h */ - snprintf(liidstring, 24, "%s-%s", PACKAGE_NAME, PACKAGE_VERSION); - hdrdata.liid = liidstring; - hdrdata.liid_len = strlen(hdrdata.liid); - - hdrdata.authcc = "NA"; - hdrdata.authcc_len = strlen(hdrdata.authcc); - hdrdata.delivcc = "NA"; - hdrdata.delivcc_len = strlen(hdrdata.delivcc); - - if (state->operatorid) { - hdrdata.operatorid = state->operatorid; - } else { - hdrdata.operatorid = "unspecified"; - } - hdrdata.operatorid_len = strlen(hdrdata.operatorid); + HASH_ITER(hh, state->agency_threads.threads, lea_t, tmp) { + msg.type = MED_LEA_MESSAGE_SHUTDOWN_TIMER; + msg.data = calloc(1, sizeof(uint16_t)); + memcpy(msg.data, &timeout, sizeof(uint16_t)); - /* Stupid 16 character limit... */ - snprintf(elemstring, 16, "med-%u", state->mediatorid); - hdrdata.networkelemid = elemstring; - hdrdata.networkelemid_len = strlen(hdrdata.networkelemid); + libtrace_message_queue_put(&(lea_t->in_main), &msg); + } - hdrdata.intpointid = NULL; - hdrdata.intpointid_len = 0; +} - kamsg = encode_etsi_keepalive(ho->ho_state->encoder, &hdrdata, - ho->ho_state->lastkaseq + 1); - if (kamsg == NULL) { - logger(LOG_INFO, - "OpenLI Mediator: failed to construct a keep-alive."); - return -1; - } +/** Disconnects a provisioner socket and releases local state for that + * connection. + * + * @param currstate The global state for this mediator. + */ +static inline void drop_provisioner(mediator_state_t *currstate) { - ho->ho_state->pending_ka = kamsg; - ho->ho_state->lastkaseq += 1; + disconnect_provisioner(&(currstate->provisioner), 1); - /* Enable the output event for the handover, so that epoll will - * trigger a writable event when we are able to send this message. */ - if (enable_handover_writing(ho) < 0) { - return -1; - } - } + /* Shutdown all handovers if we haven't heard from the provisioner + * again within the next 30 minutes. + */ + trigger_lea_thread_shutdown_timers(currstate, 1800); - /* Reset the keep alive timer */ - return restart_handover_keepalive(ho); } /** Creates and registers an epoll event for the socket that listens for @@ -609,22 +514,24 @@ static int process_signal(mediator_state_t *state, int sigfd) { static int receive_lea_withdrawal(mediator_state_t *state, uint8_t *msgbody, uint16_t msglen) { - liagency_t lea; + liagency_t *lea = calloc(1, sizeof(liagency_t)); /* Call into netcomms.c to decode the message properly */ - if (decode_lea_withdrawal(msgbody, msglen, &lea) == -1) { + if (decode_lea_withdrawal(msgbody, msglen, lea) == -1) { if (state->provisioner.disable_log == 0) { logger(LOG_INFO, "OpenLI Mediator: received invalid LEA withdrawal from provisioner."); } + free_liagency(lea); return -1; } if (state->provisioner.disable_log == 0) { logger(LOG_INFO, "OpenLI Mediator: received LEA withdrawal for %s.", - lea.agencyid); + lea->agencyid); } - withdraw_agency(&(state->handover_state), lea.agencyid); + mediator_halt_agency_thread(&(state->agency_threads), lea->agencyid); + free_liagency(lea); return 0; } @@ -638,92 +545,39 @@ static int receive_lea_withdrawal(mediator_state_t *state, uint8_t *msgbody, static int receive_lea_announce(mediator_state_t *state, uint8_t *msgbody, uint16_t msglen) { - liagency_t lea; + liagency_t *lea = calloc(1, sizeof(liagency_t)); + lea_thread_state_t *existing = NULL; + int ret = 0; /* Call into netcomms.c to decode the message */ - if (decode_lea_announcement(msgbody, msglen, &lea) == -1) { + if (decode_lea_announcement(msgbody, msglen, lea) == -1) { if (state->provisioner.disable_log == 0) { logger(LOG_INFO, "OpenLI Mediator: received invalid LEA announcement from provisioner."); } + free_liagency(lea); return -1; } if (state->provisioner.disable_log == 0) { logger(LOG_INFO, "OpenLI Mediator: received LEA announcement for %s.", - lea.agencyid); + lea->agencyid); logger(LOG_INFO, "OpenLI Mediator: HI2 = %s:%s HI3 = %s:%s", - lea.hi2_ipstr, lea.hi2_portstr, lea.hi3_ipstr, lea.hi3_portstr); - } - - return enable_agency(&(state->handover_state), &lea); -} - -/* Given a received ETSI record, determine which agency it should be - * forwarded to by the mediator. - * - * See extract_liid_from_exported_msg() for more information on the meaning - * of the liidlen output parameter. - * - * @param state The global state for this mediator. - * @param etsimsg The start of the message received. - * @param msglen The length of the message received. - * @param liidlen[out] The number of bytes to strip from the front of the - * message to reach the start of the actual ETSI record - * - * @return A pointer to the LIID->agency mapping that this record corresponds - * to, or NULL if the LIID is not known by this mediator. - */ -static liid_map_entry_t *match_etsi_to_agency(mediator_state_t *state, - uint8_t *etsimsg, uint16_t msglen, uint16_t *liidlen) { - - unsigned char liidstr[65536]; - liid_map_entry_t *found = NULL; - - /* Figure out the LIID for this ETSI record */ - extract_liid_from_exported_msg(etsimsg, msglen, liidstr, 65536, liidlen); - - /* Is this an LIID that we have a suitable agency mapping for? */ - found = lookup_liid_agency_mapping(&(state->liidmap), (char *)liidstr); - if (!found) { - if (add_missing_liid(&(state->liidmap), (char *)liidstr) < 0) { - exit(-2); - } - return NULL; - } - - return found; -} - -/** Append an ETSI record to the outgoing queue for the appropriate handover. - * - * @param state The global state for this mediator - * @param ho The handover that will send this record - * @param etsimsg Pointer to the start of the ETSI record - * @param msglen Length of the ETSI record, in bytes. - * - * @return -1 if an error occurs, 0 otherwise. - */ -static int enqueue_etsi(mediator_state_t *state, handover_t *ho, - uint8_t *etsimsg, uint16_t msglen) { - - if (append_etsipdu_to_buffer(&(ho->ho_state->buf), etsimsg, - (uint32_t)msglen, 0) == 0) { - - if (ho->disconnect_msg == 0) { - logger(LOG_INFO, - "OpenLI Mediator: was unable to enqueue ETSI PDU for handover %s:%s HI%d", - ho->ipstr, ho->portstr, ho->handover_type); - } - return -1; + lea->hi2_ipstr, lea->hi2_portstr, lea->hi3_ipstr, + lea->hi3_portstr); } - /* Got something to send, so make sure we are enable EPOLLOUT */ - if (enable_handover_writing(ho) < 0) { - return -1; + HASH_FIND(hh, state->agency_threads.threads, lea->agencyid, + strlen(lea->agencyid), existing); + if (!existing) { + ret = mediator_start_agency_thread(&(state->agency_threads), lea); + free_liagency(lea); + } else { + ret = mediator_update_agency_thread(existing, lea); + /* Don't free lea -- it will get sent to the LEA thread */ } - return 0; + return ret; } /** Parse and action an instruction from a provisioner to publish an HI1 @@ -738,17 +592,16 @@ static int enqueue_etsi(mediator_state_t *state, handover_t *ho, static int receive_hi1_notification(mediator_state_t *state, uint8_t *msgbody, uint16_t msglen) { - hi1_notify_data_t ndata; - wandder_encoded_result_t *encoded_hi1 = NULL; - mediator_agency_t *agency; - int ret = -1; + hi1_notify_data_t *ndata = calloc(1, sizeof(hi1_notify_data_t)); + lea_thread_msg_t msg; + lea_thread_state_t *lea_t; char *nottype_strings[] = { "INVALID", "Activated", "Deactivated", "Modified", "ALARM" }; /** See netcomms.c for this method */ - if (decode_hi1_notification(msgbody, msglen, &ndata) == -1) { + if (decode_hi1_notification(msgbody, msglen, ndata) == -1) { if (state->provisioner.disable_log == 0) { logger(LOG_INFO, "OpenLI Mediator: received invalid HI1 notification from provisioner."); @@ -756,70 +609,58 @@ static int receive_hi1_notification(mediator_state_t *state, uint8_t *msgbody, goto freehi1; } - if (ndata.notify_type < 0 || ndata.notify_type > HI1_ALARM) { + if (ndata->notify_type < 0 || ndata->notify_type > HI1_ALARM) { if (state->provisioner.disable_log == 0) { logger(LOG_INFO, - "OpenLI Mediator: invalid HI1 notification type %u received from provisioner.", ndata.notify_type); + "OpenLI Mediator: invalid HI1 notification type %u received from provisioner.", ndata->notify_type); } goto freehi1; } - if (state->provisioner.disable_log == 0) { - logger(LOG_INFO, - "OpenLI Mediator: received \"%s\" HI1 Notification from provisioner for LIID %s (target agency = %s)", - nottype_strings[ndata.notify_type], ndata.liid, - ndata.agencyid); - } - - agency = lookup_agency(&(state->handover_state), ndata.agencyid); - if (agency == NULL) { - /* We don't know about this supposed agency, but maybe that's - * because they only talk to another mediator -- silently ignore - * until we've got code that doesn't just broadcast these - * notifications to all mediators. - */ - ret = 0; + /* Forward the notification on to the appropriate LEA thread, which will + * encode the notification and forward it to the agency via HI2 + */ + HASH_FIND(hh, state->agency_threads.threads, ndata->agencyid, + strlen(ndata->agencyid), lea_t); + if (lea_t == NULL) { + if (state->provisioner.disable_log == 0) { + logger(LOG_INFO, + "OpenLI Mediator: received \"%s\" HI1 Notification from provisioner for LIID %s, but target agency '%s' is not recognisable?", + nottype_strings[ndata->notify_type], ndata->liid, + ndata->agencyid); + } goto freehi1; } - if (agency->hi2->ho_state->encoder == NULL) { - agency->hi2->ho_state->encoder = init_wandder_encoder(); - } else { - reset_wandder_encoder(agency->hi2->ho_state->encoder); + if (state->provisioner.disable_log == 0) { + logger(LOG_INFO, + "OpenLI Mediator: received \"%s\" HI1 Notification from provisioner for LIID %s (target agency = %s)", + nottype_strings[ndata->notify_type], ndata->liid, + ndata->agencyid); } - encoded_hi1 = encode_etsi_hi1_notification(agency->hi2->ho_state->encoder, - &ndata, state->operatorid, state->shortoperatorid); - if (encoded_hi1 == NULL) { - logger(LOG_INFO, "OpenLI Mediator: failed to construct HI1 Notifcation message"); - goto freehi1; - } + memset(&msg, 0, sizeof(msg)); + msg.type = MED_LEA_MESSAGE_SEND_HI1_NOTIFICATION; + msg.data = (void *)ndata; + libtrace_message_queue_put(&(lea_t->in_main), &msg); - if (enqueue_etsi(state, agency->hi2, encoded_hi1->encoded, - encoded_hi1->len) < 0) { - wandder_release_encoded_result(agency->hi2->ho_state->encoder, - encoded_hi1); - goto freehi1; - } - - wandder_release_encoded_result(agency->hi2->ho_state->encoder, - encoded_hi1); - ret = 0; + return 0; freehi1: - if (ndata.agencyid) { - free(ndata.agencyid); + if (ndata->agencyid) { + free(ndata->agencyid); } - if (ndata.liid) { - free(ndata.liid); + if (ndata->liid) { + free(ndata->liid); } - if (ndata.authcc) { - free(ndata.authcc); + if (ndata->authcc) { + free(ndata->authcc); } - if (ndata.delivcc) { - free(ndata.delivcc); + if (ndata->delivcc) { + free(ndata->delivcc); } - return ret; + free(ndata); + return -1; } /** Parse and action an instruction from a provisioner to remove an @@ -835,8 +676,8 @@ static int receive_cease(mediator_state_t *state, uint8_t *msgbody, uint16_t msglen) { char *liid = NULL; - liid_map_entry_t *m; - mediator_pcap_msg_t pcapmsg; + lea_thread_msg_t msg; + lea_thread_state_t *lea_t, *tmp; /** See netcomms.c for this method */ if (decode_cease_mediation(msgbody, msglen, &liid) == -1) { @@ -852,73 +693,21 @@ static int receive_cease(mediator_state_t *state, uint8_t *msgbody, return -1; } - /* Is this LIID in our existing LIID->agency map */ - m = lookup_liid_agency_mapping(&(state->liidmap), liid); - if (m == NULL) { - /* If not, ceasing is pretty straightforward */ - free(liid); - return 0; - } - - /* end any pcap trace for this LIID */ - if (m->agency == NULL) { - memset(&pcapmsg, 0, sizeof(pcapmsg)); - - pcapmsg.msgtype = PCAP_MESSAGE_DISABLE_LIID; - pcapmsg.msgbody = (unsigned char *)strdup(liid); - pcapmsg.msglen = strlen(liid) + 1; - libtrace_message_queue_put(&(state->pcapqueue), &pcapmsg); - } - - /* We cease mediation on a time-wait basis, i.e. we wait 15 seconds - * after receiving the cease instruction before removing the LIID mapping. - * This allows any remaining records that were actually intercepted - * before the cease was issued, but are sitting in a buffer somewhere - * (either on the mediator or the collector), to be forwarded to the - * agencies. + /* Send the remove message to all LEA threads -- we don't keep a global + * map of LIIDs to agencies, but this shouldn't be a huge workload for + * the LEA threads to deal with. + * + * Note: this will include the pcap output thread. */ + memset(&msg, 0, sizeof(msg)); + HASH_ITER(hh, state->agency_threads.threads, lea_t, tmp) { + msg.type = MED_LEA_MESSAGE_REMOVE_LIID; + msg.data = strdup(liid); - if (m->ceasetimer != NULL) { - /* This LIID has already been scheduled to cease? */ - free(liid); - return 0; - } - - logger(LOG_INFO, - "OpenLI Mediator: scheduling removal of agency mapping for LIID %s.", - m->liid); - - m->ceasetimer = create_mediator_timer(state->epoll_fd, (void *)m, - MED_EPOLL_CEASE_LIID_TIMER, 15); - - if (m->ceasetimer == NULL) { - logger(LOG_INFO, "OpenLI Mediator: warning -- cease timer was not able to be set for LIID %s: %s", liid, strerror(errno)); + libtrace_message_queue_put(&(lea_t->in_main), &msg); } - return 0; -} - -/** Removes an entry from the LIID->agency map, following the expiry of - * a "cease mediation" timer. - * - * @param state The global state for this mediator - * @param mev The epoll event for the cease mediation timer that - * has triggered. - * - * @return -1 if an error occurs, 0 otherwise. - */ -static inline int remove_mediator_liid_mapping(mediator_state_t *state, - med_epoll_ev_t *mev) { - - liid_map_entry_t *m = (liid_map_entry_t *)(mev->state); - - remove_liid_agency_mapping(&(state->liidmap), m->liid); - - /* Make sure that the timer event is removed from epoll */ - halt_mediator_timer(mev); - free(m->ceasetimer); - free(m->liid); - free(m); + free(liid); return 0; } @@ -936,8 +725,11 @@ static int receive_liid_mapping(mediator_state_t *state, uint8_t *msgbody, uint16_t msglen) { char *agencyid, *liid; - mediator_agency_t *agency; - int err; + int found = 0; + lea_thread_msg_t msg; + lea_thread_state_t *target; + added_liid_t *added; + lea_thread_state_t *tmp; agencyid = NULL; liid = NULL; @@ -952,72 +744,49 @@ static int receive_liid_mapping(mediator_state_t *state, uint8_t *msgbody, return -1; } - /* "Special" agency ID for intercepts that need to be written to a - * PCAP file instead of sent to an agency... + /* + * Include agencyid and LIID in msg.data and send msg to ALL LEA + * threads. + * + * LEA threads who have the LIID in their active LIID map but do + * NOT match the agencyid in the message must immediately deregister + * any RMQs for the LIID and remove it from their map. This ensures + * that any LIID that changes agencies is properly transitioned + * (although this should ideally never happen). + * + * The LEA thread that does match the agencyid obviously adds the + * mapping as per usual. */ - if (strcmp((char *)agencyid, "pcapdisk") == 0) { - agency = NULL; - } else { - /* Try to find the agency in our agency list */ - agency = lookup_agency(&(state->handover_state), agencyid); + found = 0; - /* We *could* consider waiting for an LEA announcement that will resolve - * this discrepancy, but any relevant announcement should have been sent - * before the LIID mapping. - * - * Also, what are we going to do with any records matching that LIID? - * Buffer them? Our buffers are tied to handovers, so we'd need - * somewhere else to store them. Drop them? - */ - if (agency == NULL) { - logger(LOG_INFO, "OpenLI Mediator: agency %s is not recognised by the mediator, yet LIID %s is intended for it?", - agencyid, liid); - return -1; + HASH_ITER(hh, state->agency_threads.threads, target, tmp) { + if (strcmp(target->agencyid, agencyid) == 0) { + found = 1; } - } - free(agencyid); - - err = add_liid_agency_mapping(&(state->liidmap), liid, agency); - if (err < 0) { - return -1; - } + added = calloc(1, sizeof(added_liid_t)); + added->liid = strdup(liid); + added->agencyid = strdup(agencyid); - if (err == 1) { - /* tell pcap thread that it no longer gets this LIID */ - mediator_pcap_msg_t pcapmsg; - memset(&pcapmsg, 0, sizeof(pcapmsg)); + msg.type = MED_LEA_MESSAGE_ADD_LIID; + msg.data = (void *)added; - pcapmsg.msgtype = PCAP_MESSAGE_DISABLE_LIID; - pcapmsg.msgbody = (unsigned char *)strdup(liid); - pcapmsg.msglen = strlen(liid) + 1; - libtrace_message_queue_put(&(state->pcapqueue), &pcapmsg); + libtrace_message_queue_put(&(target->in_main), &msg); } - return 0; -} - -/** React to a handover's failure to respond to a keep alive before the - * response timer expired. - * - * @param mev The epoll event for the keep alive response timer - * - * @return -1 to force the epoll loop to restart, rather than try to - * continue processing events (in case the handover that we've - * just disconnected is one of the upcoming events). - */ -static int trigger_ka_failure(med_epoll_ev_t *mev) { - handover_t *ho = (handover_t *)(mev->state); - - if (ho->disconnect_msg == 0) { - logger(LOG_INFO, "OpenLI Mediator: failed to receive KA response from LEA on handover %s:%s HI%d, dropping connection.", - ho->ipstr, ho->portstr, ho->handover_type); + if (found == 0) { + logger(LOG_INFO, "OpenLI Mediator: agency %s is not recognised by the mediator, yet LIID %s is intended for it?", + agencyid, liid); + return -1; } - disconnect_handover(ho); - - /* Return -1 here to force a fresh call to epoll_wait() */ - return -1; + if (liid) { + free(liid); + } + if (agencyid) { + free(agencyid); + } + return 0; } /** Receives and actions one or more messages received from the provisioner. @@ -1099,137 +868,6 @@ static int receive_provisioner(mediator_state_t *state, med_epoll_ev_t *mev) { return 0; } -#define MAX_COLL_RECV (10 * 1024 * 1024) - -/** Receives and actions a message from a collector, which can include - * an encoded ETSI CC or IRI. - * - * @param state The global state for this mediator. - * @param mev The epoll event for the collector socket. - * - * @return -1 if an error occurs, 0 otherwise. - */ -static int receive_collector(mediator_state_t *state, med_epoll_ev_t *mev) { - - uint8_t *msgbody = NULL; - uint16_t msglen = 0; - uint64_t internalid; - liid_map_entry_t *thisint; - single_coll_state_t *cs = (single_coll_state_t *)(mev->state); - openli_proto_msgtype_t msgtype; - mediator_pcap_msg_t pcapmsg; - uint16_t liidlen; - uint32_t total_recvd = 0; - - do { - if (mev->fdtype == MED_EPOLL_COL_RMQ) { - msgtype = receive_RMQ_buffer(cs->incoming_rmq, cs->amqp_state, - &msgbody, &msglen, &internalid); - } else { - msgtype = receive_net_buffer(cs->incoming, &msgbody, - &msglen, &internalid); - } - - if (msgtype < 0) { - if (cs->disabled_log == 0) { - nb_log_receive_error(msgtype); - logger(LOG_INFO, - "OpenLI Mediator: error receiving message from collector."); - } - return -1; - } - - total_recvd += msglen; - - switch(msgtype) { - case OPENLI_PROTO_DISCONNECT: - logger(LOG_INFO, - "OpenLI Mediator: error receiving message from collector."); - return -1; - case OPENLI_PROTO_NO_MESSAGE: - break; - case OPENLI_PROTO_HEARTBEAT: - break; - case OPENLI_PROTO_RAWIP_SYNC: - /* This is a raw IP packet capture, rather than a properly - * encoded ETSI CC. */ - /* msgbody should be an LIID + an IP packet */ - thisint = match_etsi_to_agency(state, msgbody, msglen, - &liidlen); - if (thisint == NULL) { - break; - } - if (cs->disabled_log == 1) { - reenable_collector_logging(&(state->collectors), cs); - } - - if (thisint->agency == NULL) { - /* Write IP packet directly to pcap */ - pcapmsg.msgtype = PCAP_MESSAGE_RAWIP; - pcapmsg.msgbody = (uint8_t *)malloc(msglen); - memcpy(pcapmsg.msgbody, msgbody, msglen); - pcapmsg.msglen = msglen; - libtrace_message_queue_put(&(state->pcapqueue), &pcapmsg); - } - - break; - case OPENLI_PROTO_ETSI_CC: - /* msgbody should contain an LIID + a full ETSI CC record */ - thisint = match_etsi_to_agency(state, msgbody, msglen, - &liidlen); - if (thisint == NULL) { - break; - } - if (cs->disabled_log == 1) { - reenable_collector_logging(&(state->collectors), cs); - } - if (thisint->agency == NULL) { - /* Destined for a pcap file rather than an agency */ - /* TODO freelist rather than repeated malloc/free */ - pcapmsg.msgtype = PCAP_MESSAGE_PACKET; - pcapmsg.msgbody = (uint8_t *)malloc(msglen - liidlen); - memcpy(pcapmsg.msgbody, msgbody + liidlen, - msglen - liidlen); - pcapmsg.msglen = msglen - liidlen; - libtrace_message_queue_put(&(state->pcapqueue), &pcapmsg); - } else if (enqueue_etsi(state, thisint->agency->hi3, - msgbody + liidlen, msglen - liidlen) == -1) { - return -1; - } - break; - case OPENLI_PROTO_ETSI_IRI: - /* msgbody should contain an LIID + a full ETSI IRI record */ - thisint = match_etsi_to_agency(state, msgbody, msglen, - &liidlen); - if (thisint == NULL) { - break; - } - if (cs->disabled_log == 1) { - reenable_collector_logging(&(state->collectors), cs); - } - if (thisint->agency == NULL) { - /* Destined for a pcap file rather than an agency */ - /* IRIs don't make sense for a pcap, so just ignore it */ - break; - } - if (enqueue_etsi(state, thisint->agency->hi2, msgbody + liidlen, - msglen - liidlen) == -1) { - return -1; - } - break; - default: - if (cs->disabled_log == 0) { - logger(LOG_INFO, - "OpenLI Mediator: unexpected message type %d received from collector.", - msgtype); - } - return -1; - } - } while (msgtype != OPENLI_PROTO_NO_MESSAGE && total_recvd < MAX_COLL_RECV); - - return 0; -} - /** React to an event on a file descriptor reported by our epoll loop. * * @param state The global state for the mediator @@ -1252,44 +890,19 @@ static int check_epoll_fd(mediator_state_t *state, struct epoll_event *ev) { logger(LOG_INFO, "OpenLI Mediator: main epoll timer has failed."); return -1; - case MED_EPOLL_RMQCHECK_TIMER: - halt_mediator_timer(mev); - service_RMQ_connections(&(state->collectors)); - if (start_mediator_timer(state->RMQtimerev, - state->RMQ_conf.heartbeatFreq) < 0) { - logger(LOG_INFO, "OpenLI Mediator: unable to reset RMQ heartbeat timer: %s", strerror(errno)); - return -1; - } - return 1; - case MED_EPOLL_PCAP_TIMER: - /* pcap timer has fired, flush or rotate any pcap output */ - assert(ev->events == EPOLLIN); - ret = trigger_pcap_flush(state, mev); - break; case MED_EPOLL_SIGNAL: /* we got a signal that needs to be handled */ ret = process_signal(state, mev->fd); break; case MED_EPOLL_COLL_CONN: /* a connection is occuring on our listening socket */ - ret = mediator_accept_collector(&(state->collectors), - state->listenerev->fd); + ret = mediator_accept_collector_connection( + &(state->collector_threads), state->listenerev->fd); break; case MED_EPOLL_CEASE_LIID_TIMER: /* an LIID->agency mapping can now be safely removed */ assert(ev->events == EPOLLIN); - ret = remove_mediator_liid_mapping(state, mev); - break; - case MED_EPOLL_KA_TIMER: - /* a handover is due to send a keep alive message */ - assert(ev->events == EPOLLIN); - ret = trigger_keepalive(state, mev); - break; - case MED_EPOLL_KA_RESPONSE_TIMER: - /* a handover target has not responded to a keep alive message - * and is due to be disconnected */ - assert(ev->events == EPOLLIN); - ret = trigger_ka_failure(mev); + //ret = remove_mediator_liid_mapping(state, mev); break; case MED_EPOLL_PROVRECONNECT: /* we're due to try reconnecting to a lost provisioner */ @@ -1298,25 +911,6 @@ static int check_epoll_fd(mediator_state_t *state, struct epoll_event *ev) { state->provisioner.tryconnect = 1; break; - case MED_EPOLL_LEA: - /* the handover is available for writing or reading */ - if (ev->events & EPOLLRDHUP) { - ret = -1; - } else if (ev->events & EPOLLIN) { - /* message from LEA -- hopefully a keep-alive response */ - ret = receive_handover(mev); - } else if (ev->events & EPOLLOUT) { - /* handover is able to send buffered records */ - ret = xmit_handover(mev); - } else { - ret = -1; - } - if (ret == -1) { - handover_t *ho = (handover_t *)(mev->state); - disconnect_handover(ho); - } - break; - case MED_EPOLL_PROVISIONER: /* the provisioner socket is available for reading or writing */ if (ev->events & EPOLLRDHUP) { @@ -1333,32 +927,14 @@ static int check_epoll_fd(mediator_state_t *state, struct epoll_event *ev) { state->provisioner.provaddr, state->provisioner.provport); state->provisioner.disable_log = 0; + } } else { ret = -1; } if (ret == -1) { - disconnect_provisioner(&(state->provisioner), 1); - } - break; - case MED_EPOLL_COLLECTOR_HANDSHAKE: - /* socket with an incomplete SSL handshake is available */ - ret = continue_collector_handshake(&(state->collectors), mev); - if (ret == -1) { - drop_collector(&(state->collectors), mev, 1); - } - break; - case MED_EPOLL_COLLECTOR: - case MED_EPOLL_COL_RMQ: - /* a collector is sending us some data */ - if (ev->events & EPOLLRDHUP) { - ret = -1; - } else if (ev->events & EPOLLIN) { - ret = receive_collector(state, mev); - } - if (ret == -1) { - drop_collector(&(state->collectors), mev, 1); + drop_provisioner(state); } break; default: @@ -1422,27 +998,6 @@ static int send_mediator_listen_details(mediator_state_t *state, &meddeets, justcreated); } -/** Disconnects a provisioner socket and releases local state for that - * connection. - * - * @param currstate The global state for this mediator. - */ -static inline void drop_provisioner(mediator_state_t *currstate) { - - /* Disconnect from provisioner and reset all state received - * from the old provisioner (just to be safe). */ - - /* Purge the LIID->agency mappings */ - purge_liid_map(&(currstate->liidmap)); - - disconnect_provisioner(&(currstate->provisioner), 1); - - /* Dump all known agencies -- we'll get new ones when we get a usable - * provisioner again */ - drop_all_agencies(&(currstate->handover_state)); - -} - /** Closes the socket that is listening for collector connections and * drops any collectors that are connected through it. * @@ -1451,20 +1006,28 @@ static inline void drop_provisioner(mediator_state_t *currstate) { static inline void halt_listening_socket(mediator_state_t *currstate) { /* Disconnect all collectors */ - drop_all_collectors(&(currstate->collectors)); - currstate->collectors.collectors = libtrace_list_init( - sizeof(active_collector_t *)); - + mediator_disconnect_all_collectors(&(currstate->collector_threads)); /* Close listen socket and disable epoll event */ remove_mediator_fdevent(currstate->listenerev); currstate->listenerev = NULL; } +/** Updates the global state with any modified values for any of the + * config options that are related to pcap file output. + * + * @param currstate The current global state for the mediator + * @param newstate The state as derived from a recent re-read of + * the config file. + * + * @return 0 if no pcap config has unchanged, 1 if at least one option has + * changed value. + */ static int reload_pcap_config(mediator_state_t *currstate, mediator_state_t *newstate) { int changed = 0; + char *tmp; if (newstate->pcapdirectory == NULL && currstate->pcapdirectory != NULL) { free(currstate->pcapdirectory); @@ -1500,9 +1063,22 @@ static int reload_pcap_config(mediator_state_t *currstate, changed = 1; } + if (currstate->pcaprotatefreq != newstate->pcaprotatefreq) { + changed = 1; + } + + tmp = currstate->pcapdirectory; currstate->pcapdirectory = newstate->pcapdirectory; + newstate->pcapdirectory = tmp; + + tmp = currstate->pcaptemplate; currstate->pcaptemplate = newstate->pcaptemplate; + newstate->pcaptemplate = tmp; + currstate->pcapcompress = newstate->pcapcompress; + currstate->pcaprotatefreq = newstate->pcaprotatefreq; + + return changed; } @@ -1574,9 +1150,15 @@ static int reload_mediator_config(mediator_state_t *currstate) { int provchanged = 0; int tlschanged = 0; int pcapchanged = 0; + int rmqchanged = 0; + int medidchanged = 0; + int opidchanged = 0; + /* TODO the logic in here is horrible to try and follow! */ + + init_provisioner_instance(&(newstate.provisioner), NULL); /* Load the updated config into a spare "global state" instance */ - if (init_med_state(&newstate, currstate->conffile) == -1) { + if (init_mediator_config(&newstate, currstate->conffile) == -1) { logger(LOG_INFO, "OpenLI Mediator: error reloading config file for mediator."); return -1; @@ -1590,12 +1172,77 @@ static int reload_mediator_config(mediator_state_t *currstate) { return -1; } + /* Has the mediator ID changed? */ + if (newstate.mediatorid != currstate->mediatorid) { + medidchanged = 1; + logger(LOG_INFO, + "OpenLI Mediator: mediator ID has changed from %u to %u.", + currstate->mediatorid, newstate.mediatorid); + currstate->mediatorid = newstate.mediatorid; + } + + /* Has the operator ID changed? */ + if (strcmp(newstate.operatorid, currstate->operatorid) != 0) { + char *tmp = currstate->operatorid; + opidchanged = 1; + logger(LOG_INFO, + "OpenLI Mediator: operator ID has changed from %s to %s.", + currstate->operatorid, newstate.operatorid); + currstate->operatorid = newstate.operatorid; + newstate.operatorid = tmp; + } + if (strcmp(newstate.shortoperatorid, currstate->shortoperatorid) != 0) { + char *tmp = currstate->shortoperatorid; + logger(LOG_INFO, + "OpenLI Mediator: short operator ID has changed from %s to %s.", + currstate->shortoperatorid, newstate.shortoperatorid); + opidchanged = 1; + currstate->shortoperatorid = newstate.shortoperatorid; + newstate.shortoperatorid = tmp; + } + + /* Has the RMQ internal password changed? */ + lock_med_collector_config(&(currstate->collector_threads.config)); + if (strcmp(currstate->RMQ_conf.internalpass, + newstate.RMQ_conf.internalpass) != 0) { + + char *tmp = currstate->RMQ_conf.internalpass; + logger(LOG_INFO, + "OpenLI Mediator: RMQ internal password has changed."); + rmqchanged = 1; + currstate->RMQ_conf.internalpass = newstate.RMQ_conf.internalpass; + newstate.RMQ_conf.internalpass = tmp; + } + unlock_med_collector_config(&(currstate->collector_threads.config)); + + /* Has the RMQ heartbeat frequency changed? */ + if (currstate->RMQ_conf.heartbeatFreq != newstate.RMQ_conf.heartbeatFreq) + { + logger(LOG_INFO, "OpenLI Mediator: RMQ heartbeat check frequency changed from %u to %u seconds.", + currstate->RMQ_conf.heartbeatFreq, + newstate.RMQ_conf.heartbeatFreq); + rmqchanged = 1; + currstate->RMQ_conf.heartbeatFreq = newstate.RMQ_conf.heartbeatFreq; + } + + /* Have any pcap-related config options changed? */ + pcapchanged = reload_pcap_config(currstate, &newstate); + if (pcapchanged == -1) { + return -1; + } + + /* RabbitMQ heartbeat, mediator ID or operator ID has changed? + * Tell LEA threads to update their local copies of this config... + */ + if (medidchanged || opidchanged || rmqchanged || pcapchanged) { + update_lea_thread_config(currstate); + } + if (provchanged) { /* The provisioner is supposedly listening on a different IP and/or * port to before, so we should definitely not be talking to * whoever is on the old IP+port. */ - drop_provisioner(currstate); } @@ -1611,36 +1258,51 @@ static int reload_mediator_config(mediator_state_t *currstate) { /* Check if our TLS configuration has changed. If so, we'll need to * drop all connections to other OpenLI components and create them anew. */ + lock_med_collector_config(&(currstate->collector_threads.config)); tlschanged = reload_ssl_config(&(currstate->sslconf), &(newstate.sslconf)); + unlock_med_collector_config(&(currstate->collector_threads.config)); if (tlschanged == -1) { return -1; } - pcapchanged = reload_pcap_config(currstate, &newstate); - if (pcapchanged == -1) { - return -1; - } else if (pcapchanged == 1) { - update_pcap_msg_thread(currstate); - } + if (tlschanged != 0 || newstate.etsitls != currstate->etsitls || + medidchanged || rmqchanged) { + /* Something has changed that will affect our collector receive + * threads and therefore we may need to drop them and force them + * to reconnect. + */ - if (tlschanged != 0 || newstate.etsitls != currstate->etsitls) { - currstate->etsitls = newstate.etsitls; + if (newstate.etsitls != currstate->etsitls) { + currstate->etsitls = newstate.etsitls; + tlschanged = 1; + } + update_coll_recv_thread_config(currstate); + } + + if (tlschanged || medidchanged) { + /* If TLS changed, then our existing connections are no longer + * valid. + * + * If the mediator ID changed, then we also need to rejoin the + * collectors -- if we are using RMQ, the queue ID that we are + * supposed to read from is based on our mediator ID number so + * it's just easiest to reset our connections. + */ if (!listenchanged) { /* Disconnect all collectors */ - drop_all_collectors(&(currstate->collectors)); - currstate->collectors.collectors = libtrace_list_init( - sizeof(active_collector_t *)); - + mediator_disconnect_all_collectors(&(currstate->collector_threads)); listenchanged = 1; } + } + if (tlschanged) { if (!provchanged) { drop_provisioner(currstate); provchanged = 1; } } - if (listenchanged && !provchanged) { + if ((listenchanged || medidchanged) && !provchanged) { /* Need to re-announce our listen socket (or mediator ID) details */ if (send_mediator_listen_details(currstate, 0) < 0) { return -1; @@ -1648,7 +1310,8 @@ static int reload_mediator_config(mediator_state_t *currstate) { } - /* newstate was just temporary, so we can tidy it up now */ + /* newstate was just temporary and should only contain config, + * so we can tidy it up now using clear_med_config() */ clear_med_config(&newstate); return 0; @@ -1671,8 +1334,6 @@ static void run(mediator_state_t *state) { int timerexpired = 0; struct epoll_event evs[64]; int provfail = 0; - struct timeval tv; - uint32_t firstflush; med_epoll_ev_t *signalev; /* Register the epoll event for received signals */ @@ -1683,19 +1344,6 @@ static void run(mediator_state_t *state) { "OpenLI Mediator: pcap output file rotation frequency is set to %d minutes.", state->pcaprotatefreq); - gettimeofday(&tv, NULL); - - /* Set our first pcap file flush timer */ - firstflush = (((tv.tv_sec / 60) * 60) + 60) - tv.tv_sec; - - state->pcaptimerev = create_mediator_timer(state->epoll_fd, NULL, - MED_EPOLL_PCAP_TIMER, firstflush); - - if (state->pcaptimerev == NULL) { - logger(LOG_INFO, - "OpenLI Mediator: failed to create pcap rotation timer"); - } - state->timerev = create_mediator_timer(state->epoll_fd, NULL, MED_EPOLL_SIGCHECK_TIMER, 0); @@ -1704,9 +1352,6 @@ static void run(mediator_state_t *state) { goto runfailure; } - state->RMQtimerev = create_mediator_timer(state->epoll_fd, NULL, - MED_EPOLL_RMQCHECK_TIMER, state->RMQ_conf.heartbeatFreq); - while (!mediator_halt) { /* If we've had a SIGHUP recently, reload the config file */ if (reload_config) { @@ -1726,9 +1371,20 @@ static void run(mediator_state_t *state) { if (!provfail) { if (send_mediator_listen_details(state, 1) < 0) { - disconnect_provisioner(&(state->provisioner), 1); + drop_provisioner(state); continue; } + + /* Any LEA threads for LEAs that the provisioner does + * not announce within the next 60 seconds should be + * halted, as presumably those agencies were removed + * from the provisioner config while we were not + * connected to the provisioner. + */ + if (state->provisioner.just_connected) { + trigger_lea_thread_shutdown_timers(state, 60); + state->provisioner.just_connected = 0; + } } /* This timer will force us to stop checking epoll and go back * to the start of this loop (i.e. checking if we should halt the @@ -1789,11 +1445,6 @@ static void run(mediator_state_t *state) { */ mediator_halt = true; - /* Tell our agency connection thread to stop when it can */ - pthread_mutex_lock(state->handover_state.agency_mutex); - state->handover_state.halt_flag = 1; - pthread_mutex_unlock(state->handover_state.agency_mutex); - if (signalev) { remove_mediator_fdevent(signalev); } @@ -1803,7 +1454,6 @@ static void run(mediator_state_t *state) { * * Tasks: * - parses user configuration and initialises global state - * - starts supporting threads (pcap output thread, listener thread) * - enters main loop via run() * - once loop exits, wait for supporting threads to exit * - free remaining global state @@ -1815,7 +1465,6 @@ int main(int argc, char *argv[]) { char *pidfile = NULL; mediator_state_t medstate; - mediator_pcap_msg_t pcapmsg; while (1) { int optind; @@ -1880,13 +1529,10 @@ int main(int argc, char *argv[]) { logger(LOG_INFO, "OpenLI Mediator: '%u' has started.", medstate.mediatorid); - update_pcap_msg_thread(&medstate); - - /* Start the pcap output thread */ - pthread_create(&(medstate.pcapthread), NULL, start_pcap_thread, - &(medstate.pcapqueue)); + /* Start the pcap output thread (which behaves like an LEA thread) */ + mediator_start_pcap_thread(&(medstate.agency_threads)); - /* Start the thread that listens for connections from collectors */ + /* Open the socket that listens for connections from collectors */ if (start_collector_listener(&medstate) == -1) { logger(LOG_INFO, "OpenLI Mediator: could not start collector listener socket."); @@ -1898,12 +1544,11 @@ int main(int argc, char *argv[]) { */ run(&medstate); - /* Tell the pcap thread to halt */ - memset(&pcapmsg, 0, sizeof(pcapmsg)); - pcapmsg.msgtype = PCAP_MESSAGE_HALT; - pcapmsg.msgbody = NULL; - pcapmsg.msglen = 0; - libtrace_message_queue_put(&(medstate.pcapqueue), &pcapmsg); + /* Halt all LEA and collector threads that we have started, including the + * pcap output thread. + */ + mediator_disconnect_all_collectors(&(medstate.collector_threads)); + mediator_disconnect_all_leas(&(medstate.agency_threads)); /* Clean up */ destroy_med_state(&medstate); diff --git a/src/mediator/mediator.h b/src/mediator/mediator.h index 55621f0b..04592656 100644 --- a/src/mediator/mediator.h +++ b/src/mediator/mediator.h @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -40,7 +40,8 @@ #include "pcapthread.h" #include "liidmapping.h" #include "mediator_prov.h" -#include "mediator_coll.h" +#include "coll_recv_thread.h" +#include "lea_send_thread.h" /** Global state variables for a mediator instance */ typedef struct med_state { @@ -79,12 +80,6 @@ typedef struct med_state { /** Compression level to use when writing pcap files */ uint8_t pcapcompress; - /** State for managing all connected handovers */ - handover_state_t handover_state; - - /** A map of LIIDs to their corresponding agencies */ - liid_map_t liidmap; - /** The global epoll file descriptor for this mediator */ int epoll_fd; @@ -97,29 +92,22 @@ typedef struct med_state { /** The epoll event for the epoll loop timer */ med_epoll_ev_t *timerev; - /** The epoll event for the pcap file rotation timer */ - med_epoll_ev_t *pcaptimerev; - - /** The epoll event for the RabbitMQ heartbeat check timer */ - med_epoll_ev_t *RMQtimerev; - /** State for managing the connection back to the provisioner */ mediator_prov_t provisioner; - /** State for managing the connections from collectors */ - mediator_collector_t collectors; + /** The collector receive threads that have been spawned */ + mediator_collector_t collector_threads; + + /** The LEA send threads that have been spawned */ + mediator_lea_t agency_threads; /** The frequency to rotate the pcap files (in minutes) */ uint32_t pcaprotatefreq; - /** The pthread ID for the pcap file writing thread */ - pthread_t pcapthread; - - /** The queue for pushing packets to the pcap file writing thread */ - libtrace_message_queue_t pcapqueue; - /** The SSL configuration for the mediator */ openli_ssl_config_t sslconf; + + /** The RabbitMQ configuration for the mediator */ openli_RMQ_config_t RMQ_conf; } mediator_state_t; diff --git a/src/mediator/mediator_coll.c b/src/mediator/mediator_coll.c deleted file mode 100644 index 3bdefbfe..00000000 --- a/src/mediator/mediator_coll.c +++ /dev/null @@ -1,456 +0,0 @@ -/* - * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. - * All rights reserved. - * - * This file is part of OpenLI. - * - * This code has been developed by the University of Waikato WAND - * research group. For further information please see http://www.wand.net.nz/ - * - * OpenLI is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3 of the License, or - * (at your option) any later version. - * - * OpenLI is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * - */ - -#include "openli_tls.h" -#include "mediator_coll.h" -#include "util.h" -#include "logger.h" -#include -#include - -/** Initialises the state for the collectors managed by a mediator. - * - * @param medcol The global state for the collectors that is to be - * initialised. - * @param usetls A pointer to the global flag that indicates whether - * new collector connections must use TLS. - * @param sslconf A pointer to the SSL configuration for this mediator. - * @param rmqconf A pointer to the RabbitMQ configuration for this - * mediator. - */ -void init_med_collector_state(mediator_collector_t *medcol, uint8_t *usetls, - openli_ssl_config_t *sslconf, openli_RMQ_config_t *rmqconf, - uint32_t mediatorid) { - - medcol->usingtls = usetls; - medcol->sslconf = sslconf; - medcol->lastsslerror = 0; - medcol->disabledcols = NULL; - medcol->collectors = NULL; - medcol->epoll_fd = -1; - medcol->rmqconf = rmqconf; - medcol->parent_mediatorid = mediatorid; -} - -/** Destroys the state for the collectors managed by mediator, including - * dropping any remaining collector connections. - * - * @param medcol The global state for the collectors that is to be - * destroyed. - */ -void destroy_med_collector_state(mediator_collector_t *medcol) { - - disabled_collector_t *discol, *dtmp; - - /* Purge the disabled collector list */ - HASH_ITER(hh, medcol->disabledcols, discol, dtmp) { - HASH_DELETE(hh, medcol->disabledcols, discol); - free(discol->ipaddr); - free(discol); - } - - /* Dump all connected collectors */ - drop_all_collectors(medcol); - -} - -/** Accepts a connection from a collector and prepares to receive encoded - * ETSI records from that collector. - * - * @param medcol The global state for the collectors seen by this - * mediator. - * @param listenfd The file descriptor that the connection attempt - * was seen on. - * - * @return -1 if an error occurs, otherwise the file descriptor for the - * collector connection. - */ -int mediator_accept_collector(mediator_collector_t *medcol, int listenfd) { - - int newfd = -1, rmqfd = -1; - struct sockaddr_storage saddr; - socklen_t socklen = sizeof(saddr); - char strbuf[INET6_ADDRSTRLEN]; - active_collector_t *col = NULL; - single_coll_state_t *mstate; - disabled_collector_t *discol = NULL; - int fdtype; - int r = OPENLI_SSL_CONNECT_NOSSL; - char stringspace[32]; - - /* TODO check for EPOLLHUP or EPOLLERR */ - - /* Accept, then add to list of collectors. Push all active intercepts - * out to the collector. */ - newfd = accept(listenfd, (struct sockaddr *)&saddr, &socklen); - fd_set_nonblock(newfd); - - if (getnameinfo((struct sockaddr *)&saddr, socklen, strbuf, sizeof(strbuf), - 0, 0, NI_NUMERICHOST) != 0) { - logger(LOG_INFO, "OpenLI Mediator: getnameinfo error in mediator: %s.", - strerror(errno)); - } - - if (newfd < 0) { - return newfd; - } - - col = (active_collector_t *)calloc(1, sizeof(active_collector_t)); - col->ssl = NULL; - - if (*(medcol->usingtls)) { - /* We're using TLS so create an OpenSSL socket */ - r = listen_ssl_socket(medcol->sslconf, &(col->ssl), newfd); - - if (r == OPENLI_SSL_CONNECT_FAILED) { - close(newfd); - SSL_free(col->ssl); - col->ssl = NULL; - - if (r != medcol->lastsslerror) { - logger(LOG_INFO, - "OpenLI: SSL Handshake failed for collector %s", - strbuf); - } - medcol->lastsslerror = r; - return -1; - } - - if (r == OPENLI_SSL_CONNECT_WAITING) { - /* Handshake is not yet complete, so we need to wait for that */ - fdtype = MED_EPOLL_COLLECTOR_HANDSHAKE; - } else { - /* Handshake completed, go straight to "Ready" mode */ - fdtype = MED_EPOLL_COLLECTOR; - medcol->lastsslerror = 0; - } - } else { - /* Not using TLS, we're good to go right away */ - fdtype = MED_EPOLL_COLLECTOR; - } - - mstate = (single_coll_state_t *)calloc(1, sizeof(single_coll_state_t)); - mstate->ipaddr = strdup(strbuf); - mstate->iplen = strlen(strbuf); - - mstate->rmq_queueid.len = snprintf(stringspace, sizeof(stringspace), "ID%d", - medcol->parent_mediatorid); - mstate->rmq_queueid.bytes = (void *)strdup(stringspace); - - col->rmqev = NULL; - col->colev = NULL; - - if (fdtype == MED_EPOLL_COLLECTOR && medcol->rmqconf->enabled) { - rmqfd = receive_rmq_invite(medcol, mstate); - if (rmqfd < 0) { - logger(LOG_INFO, - "OpenLI Mediator: error while joining RMQ for collector %s", - strbuf); - goto acceptfail; - } - col->rmqev = create_mediator_fdevent(medcol->epoll_fd, mstate, - MED_EPOLL_COL_RMQ, rmqfd, EPOLLIN | EPOLLRDHUP); - if (col->rmqev == NULL) { - logger(LOG_INFO, - "OpenLI Mediator: unable to add collector RMQ fd to epoll: %s.", - strerror(errno)); - goto acceptfail; - } - } - - /* Add fd to epoll */ - col->colev = create_mediator_fdevent(medcol->epoll_fd, mstate, fdtype, - newfd, EPOLLIN | EPOLLRDHUP); - - if (col->colev == NULL) { - logger(LOG_INFO, - "OpenLI Mediator: unable to add collector fd to epoll: %s.", - strerror(errno)); - goto acceptfail; - } - mstate->ssl = col->ssl; - mstate->owner = col; - if (!mstate->incoming) { - mstate->incoming = create_net_buffer(NETBUF_RECV, newfd, col->ssl); - } - - /* Check if this is a reconnection case */ - HASH_FIND(hh, medcol->disabledcols, mstate->ipaddr, - strlen(mstate->ipaddr), discol); - - if (discol) { - mstate->disabled_log = 1; - } else { - logger(LOG_INFO, - "OpenLI Mediator: accepted connection from collector %s.", - strbuf); - mstate->disabled_log = 0; - } - - /* Add this collector to the set of active collectors */ - libtrace_list_push_back(medcol->collectors, &col); - - return newfd; - -acceptfail: - if (newfd != -1) { - close(newfd); - } - if (rmqfd != -1) { - close(rmqfd); - } - if (col) { - remove_mediator_fdevent(col->colev); - remove_mediator_fdevent(col->rmqev); - free(col); - } - - free(mstate->ipaddr); - free(mstate); - return -1; -} - -/** Attempts to complete an ongoing TLS handshake with a collector. - * - * @param medcol The global state for the collectors seen by the mediator - * @param mev The epoll event for the collector socket - * - * @return -1 if an error occurs, 0 if the handshake is not yet complete, - * 1 if the handshake has now completed. - */ -int continue_collector_handshake(mediator_collector_t *medcol, - med_epoll_ev_t *mev) { - - single_coll_state_t *cs = (single_coll_state_t *)(mev->state); - - //either keep running handshake or return when error - int ret = SSL_accept(cs->ssl); - - if (ret <= 0){ - ret = SSL_get_error(cs->ssl, ret); - if(ret == SSL_ERROR_WANT_READ || ret == SSL_ERROR_WANT_WRITE){ - //keep trying - return 0; - } - else { - //fail out - logger(LOG_INFO, - "OpenLI: Pending SSL Handshake for collector failed"); - return -1; - } - } - logger(LOG_INFO, "OpenLI: Pending SSL Handshake for collector accepted"); - medcol->lastsslerror = 0; - - //handshake has finished - if (medcol->rmqconf->enabled) { - int rmqfd = receive_rmq_invite(medcol, cs); - if (rmqfd < 0) { - logger(LOG_INFO, - "OpenLI Mediator: error while joining RMQ for collector %s", - cs->ipaddr); - return -1; - } - assert(cs->owner); - cs->owner->rmqev = create_mediator_fdevent(medcol->epoll_fd, cs, - MED_EPOLL_COL_RMQ, rmqfd, EPOLLIN | EPOLLRDHUP); - if (cs->owner->rmqev == NULL) { - logger(LOG_INFO, - "OpenLI Mediator: unable to add collector RMQ fd to epoll: %s.", - strerror(errno)); - return -1; - } - } - mev->fdtype = MED_EPOLL_COLLECTOR; - return 1; -} - -/** Drops the connection to a collector and moves the collector to the - * disabled collector list. - * - * @param medcol The global state for collectors seen by the mediator - * @param colev The epoll event for this collection connection - * @param disablelog A flag that indicates whether we should log about - * this incident - */ -void drop_collector(mediator_collector_t *medcol, - med_epoll_ev_t *colev, int disablelog) { - single_coll_state_t *mstate; - - if (!colev) { - return; - } - - mstate = (single_coll_state_t *)(colev->state); - if (mstate->disabled_log == 0 && colev->fd != -1) { - logger(LOG_INFO, - "OpenLI Mediator: disconnecting from collector %d.", - colev->fd); - } - - if (mstate && disablelog) { - disabled_collector_t *discol; - - /* Add this collector to the disabled collectors list. */ - HASH_FIND(hh, medcol->disabledcols, mstate->ipaddr, - strlen(mstate->ipaddr), discol); - if (discol == NULL) { - discol = (disabled_collector_t *)calloc(1, - sizeof(disabled_collector_t)); - discol->ipaddr = mstate->ipaddr; - mstate->ipaddr = NULL; - - HASH_ADD_KEYPTR(hh, medcol->disabledcols, discol->ipaddr, - strlen(discol->ipaddr), discol); - } - } - - if (mstate && mstate->incoming) { - destroy_net_buffer(mstate->incoming); - mstate->incoming = NULL; - } - - if (mstate && mstate->incoming_rmq) { - destroy_net_buffer(mstate->incoming_rmq); - mstate->incoming_rmq = NULL; - } - - if (mstate->ipaddr) { - free(mstate->ipaddr); - mstate->ipaddr = NULL; - } - - if (mstate->amqp_state) { - amqp_destroy_connection(mstate->amqp_state); - mstate->amqp_state = NULL; - } - - if (mstate->rmq_queueid.bytes) { - free(mstate->rmq_queueid.bytes); - } - - remove_mediator_fdevent(colev); - if (mstate->owner) { - remove_mediator_fdevent(mstate->owner->rmqev); - if (mstate->owner->ssl) { - SSL_free(mstate->owner->ssl); - } - mstate->owner->rmqev = NULL; - mstate->owner->colev = NULL; - } - - free(mstate); -} - -/** Drops *all* currently connected collectors. - * - * @param medcol The set of collectors for this mediator - */ -void drop_all_collectors(mediator_collector_t *medcol) { - - /* TODO send disconnect messages to all collectors? */ - libtrace_list_node_t *n; - active_collector_t *col; - - n = medcol->collectors->head; - while (n) { - col = *((active_collector_t **)(n->data)); - - /* No need to log every collector we're dropping, so we pass in 0 - * as the last parameter */ - drop_collector(medcol, col->colev, 0); - free(col); - n = n->next; - } - - libtrace_list_deinit(medcol->collectors); -} - -/** Re-enables log messages for a collector that has re-connected. - * - * @param medcol The global state for collectors seen by this mediator - * @param cs The collector that has re-connected - * - */ -void reenable_collector_logging(mediator_collector_t *medcol, - single_coll_state_t *cs) { - - disabled_collector_t *discol = NULL; - - cs->disabled_log = 0; - HASH_FIND(hh, medcol->disabledcols, cs->ipaddr, strlen(cs->ipaddr), discol); - if (discol) { - HASH_DELETE(hh, medcol->disabledcols, discol); - free(discol->ipaddr); - free(discol); - logger(LOG_INFO, "collector %s has successfully re-connected", - cs->ipaddr); - } -} - -void service_RMQ_connections(mediator_collector_t *medcol) { - - libtrace_list_node_t *curr; - int ret; - single_coll_state_t *cs; - - if (medcol == NULL) { - return; - } - curr = medcol->collectors->head; - - while (curr) { - active_collector_t *col = *((active_collector_t **)(curr->data)); - cs = (single_coll_state_t *)(col->colev->state); - - if (col->rmqev == NULL || col->rmqev->fdtype != MED_EPOLL_COL_RMQ) { - curr = curr->next; - continue; - } - - ret = check_rmq_status(medcol, col); - if (ret == -1) { - drop_collector(medcol, col->colev, 0); - } else if (ret == 0) { - if (receive_rmq_invite(medcol, cs) < 0) { - if (cs->disabled_log == 0) { - logger(LOG_INFO, - "OpenLI mediator: failed to reconnect to RMQ socket: %s", - strerror(errno)); - } - cs->disabled_log = 1; - } else { - logger(LOG_INFO, "OpenLI mediator: reconnected to RMQ at %s", - cs->ipaddr); - cs->disabled_log = 0; - } - } - curr = curr->next; - } -} - -// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : - diff --git a/src/mediator/mediator_coll.h b/src/mediator/mediator_coll.h deleted file mode 100644 index a6b9bf5a..00000000 --- a/src/mediator/mediator_coll.h +++ /dev/null @@ -1,207 +0,0 @@ -/* - * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. - * All rights reserved. - * - * This file is part of OpenLI. - * - * This code has been developed by the University of Waikato WAND - * research group. For further information please see http://www.wand.net.nz/ - * - * OpenLI is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3 of the License, or - * (at your option) any later version. - * - * OpenLI is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * - */ - -#ifndef OPENLI_MEDIATOR_COLL_H_ -#define OPENLI_MEDIATOR_COLL_H_ - -#include -#include -#include -#include "med_epoll.h" -#include "netcomms.h" -#include "openli_tls.h" - -typedef struct active_collector active_collector_t; - -/** Describes a collector which has been temporarily disabled, e.g. due to - * a connection breaking down. - */ -typedef struct disabled_collector { - /** The IP address that the collector connected from */ - char *ipaddr; - UT_hash_handle hh; -} disabled_collector_t; - -/** State associated with a single collector connection */ -typedef struct single_coll_state { - - /** The IP address that the collector has connected from */ - char *ipaddr; - - /** The length of the IP address string */ - int iplen; - - /** The buffer used to store ETSI records received from the collector via - * a network connection */ - net_buffer_t *incoming; - - /** The buffer used to store ETSI records received from the collector via - * RabbitMQ */ - net_buffer_t *incoming_rmq; - - /** A flag indicating whether error logging is disabled for this - * collector. - */ - int disabled_log; - - /** The SSL socket for this collector connection, if not using RMQ */ - SSL *ssl; - - /** The AMQP connection state for this collector connection, if using RMQ */ - amqp_connection_state_t amqp_state; - - amqp_bytes_t rmq_queueid; - - active_collector_t *owner; -} single_coll_state_t; - -/** An instance of an active collector */ -struct active_collector { - /** The epoll event for the collector connection socket */ - med_epoll_ev_t *colev; - - /** The epoll event for the collector RMQ socket */ - med_epoll_ev_t *rmqev; - - /** The SSL socket for this collector connection, if required */ - SSL *ssl; -}; - -/** Structure for storing global state for all collectors managed by a - * mediator instance. - */ -typedef struct mediator_collector_glob_state { - /* The error code for the most recent SSL error when accepting a collector - * connection. - */ - int lastsslerror; - - /** Points to the flag that indicates whether collector connections are - * using TLS. - */ - uint8_t *usingtls; - - /** The global epoll fd for this mediator instance. */ - int epoll_fd; - - /** The list of currently active collector connections. */ - libtrace_list_t *collectors; - - /** A map containing all collectors that are currently disconnected. */ - disabled_collector_t *disabledcols; - - /** The SSL configuration for this mediator instance. */ - openli_ssl_config_t *sslconf; - - /** The RabbitMQ configuration for this mediator instance */ - openli_RMQ_config_t *rmqconf; - - /** The ID of the mediator instance */ - uint32_t parent_mediatorid; -} mediator_collector_t; - -/** Initialises the state for the collectors managed by a mediator. - * - * @param medcol The global state for the collectors that is to be - * initialised. - * @param usetls A pointer to the global flag that indicates whether - * new collector connections must use TLS. - * @param sslconf A pointer to the SSL configuration for this mediator. - * @param rmqconf A pointer to the RabbitMQ configuration for this - * mediator. - * @param mediatorid The ID of the mediator that is managing the collectors. - */ -void init_med_collector_state(mediator_collector_t *medcol, uint8_t *usetls, - openli_ssl_config_t *sslconf, openli_RMQ_config_t *rmqconf, - uint32_t mediatorid); - -/** Destroys the state for the collectors managed by mediator, including - * dropping any remaining collector connections. - * - * @param medcol The global state for the collectors that is to be - * destroyed. - */ -void destroy_med_collector_state(mediator_collector_t *medcol); - -/** Accepts a connection from a collector and prepares to receive encoded - * ETSI records from that collector. - * - * @param medcol The global state for the collectors seen by this - * mediator. - * @param listenfd The file descriptor that the connection attempt - * was seen on. - * - * @return -1 if an error occurs, otherwise the file descriptor for the - * collector connection. - */ -int mediator_accept_collector(mediator_collector_t *medcol, int listenfd); - -/** Attempts to complete an ongoing TLS handshake with a collector. - * - * @param medcol The global state for the collectors seen by the mediator - * @param mev The epoll event for the collector socket - * - * @return -1 if an error occurs, 0 if the handshake is not yet complete, - * 1 if the handshake has now completed. - */ -int continue_collector_handshake(mediator_collector_t *medcol, - med_epoll_ev_t *mev); - -/** Drops the connection to a collector and moves the collector to the - * disabled collector list. - * - * @param medcol The global state for collectors seen by the mediator - * @param colev The epoll event for this collection connection - * @param disablelog A flag that indicates whether we should log about - * this incident - */ -void drop_collector(mediator_collector_t *medcol, - med_epoll_ev_t *colev, int disablelog); - -/** Drops *all* currently connected collectors. - * - * @param medcol The set of collectors for this mediator - */ -void drop_all_collectors(mediator_collector_t *medcol); - -/** Re-enables log messages for a collector that has re-connected. - * - * @param medcol The global state for collectors seen by this mediator - * @param cs The collector that has re-connected - * - */ -void reenable_collector_logging(mediator_collector_t *medcol, - single_coll_state_t *cs); - -int receive_rmq_invite(mediator_collector_t *medcol, - single_coll_state_t *mstate); - -int check_rmq_status(mediator_collector_t *medcol, active_collector_t *col); - -void service_RMQ_connections(mediator_collector_t *medcol); - -#endif -// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/mediator_prov.c b/src/mediator/mediator_prov.c index 82133f87..a9bf0065 100644 --- a/src/mediator/mediator_prov.c +++ b/src/mediator/mediator_prov.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -33,6 +33,15 @@ #include "logger.h" #include "util.h" +/** This file implements the methods used by the communication channel between + * an OpenLI mediator and an OpenLI provisioner (on the mediator side). + */ + +/** Initialises the state for a provisioner instance + * + * @param prov The provisioner instance + * @param ctx The SSL context object for the mediator + */ void init_provisioner_instance(mediator_prov_t *prov, SSL_CTX **ctx) { prov->provreconnect = NULL; prov->provev = NULL; @@ -40,6 +49,7 @@ void init_provisioner_instance(mediator_prov_t *prov, SSL_CTX **ctx) { prov->outgoing = NULL; prov->disable_log = 0; prov->tryconnect = 1; + prov->just_connected = 0; prov->ssl = NULL; prov->epoll_fd = -1; prov->sslctxt = ctx; @@ -66,6 +76,13 @@ static inline void setup_provisioner_reconnect_timer(mediator_prov_t *prov) { start_mediator_timer(prov->provreconnect, 1); } +/** Disconnects the TCP session to a provisioner and resets any state + * associated with that communication channel. + * + * @param prov The provisioner instance to disconnect + * @param enable_reconnect If not zero, we will set a timer to try and + * reconnect to the provisioner in 1 second. + */ void disconnect_provisioner(mediator_prov_t *prov, int enable_reconnect) { if (prov->disable_log == 0) { @@ -108,6 +125,7 @@ void disconnect_provisioner(mediator_prov_t *prov, int enable_reconnect) { prov->incoming = NULL; } + /* Set the reconnection timer, if requested */ if (enable_reconnect) { setup_provisioner_reconnect_timer(prov); } @@ -132,6 +150,17 @@ void free_provisioner(mediator_prov_t *prov) { } } +/** Sends the mediator details message to a connected provisioner. + * Mediator details include the port and IP that it is listening on for + * collector connections. + * + * @param prov The provisioner that is to receive the message. + * @param meddeets The details to be included in the message. + * @param justcreated A flag indicating whether the socket for the + * provisioner connection has just been created. + * + * @return -1 if an error occurs, 0 otherwise. + */ int send_mediator_details_to_provisioner(mediator_prov_t *prov, openli_mediator_t *meddeets, int justcreated) { @@ -220,6 +249,9 @@ static int init_provisioner_connection(mediator_prov_t *prov, int sock) { prov->outgoing = create_net_buffer(NETBUF_SEND, sock, prov->ssl); prov->incoming = create_net_buffer(NETBUF_RECV, sock, prov->ssl); + /* The AUTH message indicates to the provisioner that we are an OpenLI + * mediator and they can safely start sending us intercept information. + */ if (push_auth_onto_net_buffer(prov->outgoing, OPENLI_PROTO_MEDIATOR_AUTH) == -1) { if (prov->disable_log == 0) { @@ -277,10 +309,10 @@ int attempt_provisioner_connect(mediator_prov_t *prov, int provfail) { provfail = 0; if (s == -1) { - // if (prov->disable_log == 0) { + if (prov->disable_log == 0) { logger(LOG_INFO, "OpenLI Mediator: Error - Unable to connect to provisioner."); - // } + } setup_provisioner_reconnect_timer(prov); provfail = 1; } else if (s == 0) { @@ -303,7 +335,7 @@ int attempt_provisioner_connect(mediator_prov_t *prov, int provfail) { "OpenLI mediator has connected to provisioner at %s:%s", prov->provaddr, prov->provport); } - + prov->just_connected = 1; } } return provfail; diff --git a/src/mediator/mediator_prov.h b/src/mediator/mediator_prov.h index 844c6f9d..47c17f3c 100644 --- a/src/mediator/mediator_prov.h +++ b/src/mediator/mediator_prov.h @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -58,6 +58,12 @@ typedef struct mediator_provisioner { */ uint8_t tryconnect; + /** Flag indicating if the provisioner connection has just been + * re-established, so that the mediator can let other threads + * know that the provisioner is back. + */ + uint8_t just_connected; + /** The SSL socket for the connection to the provisioner */ SSL *ssl; @@ -79,18 +85,19 @@ typedef struct mediator_provisioner { char *provport; } mediator_prov_t; -/** Initialises a provisioner instance with an OpenLI mediator +/** Initialises the state for a provisioner instance * - * @param prov The reference to the provisioner that is to be - * initialised + * @param prov The provisioner instance + * @param ctx The SSL context object for the mediator */ void init_provisioner_instance(mediator_prov_t *prov, SSL_CTX **ctx); -/** Disconnects the currently connected provisioner. +/** Disconnects the TCP session to a provisioner and resets any state + * associated with that communication channel. * - * @param prov The provisioner to disconnect. - * @param enable_reconnect Flag that indicates whether the mediator should - * try to reconnect to the provisioner. + * @param prov The provisioner instance to disconnect + * @param enable_reconnect If not zero, we will set a timer to try and + * reconnect to the provisioner in 1 second. */ void disconnect_provisioner(mediator_prov_t *prov, int enable_reconnect); diff --git a/src/mediator/mediator_rmq.c b/src/mediator/mediator_rmq.c index 77c65fa4..07dbb4b5 100644 --- a/src/mediator/mediator_rmq.c +++ b/src/mediator/mediator_rmq.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -27,240 +27,1239 @@ #include #include #include -#include "mediator_coll.h" +#include "mediator_rmq.h" +#include #include "logger.h" +#include "coll_recv_thread.h" -static amqp_connection_state_t join_RMQ(mediator_collector_t *medcol, - uint8_t *msgbody, uint16_t msglen, int logDisabled, - single_coll_state_t *mstate) { +/** This file implements the interactions between various elements of the + * OpenLI mediator (e.g. LEA send threads, collector receive threads) and + * the RabbitMQ API. + */ + +/** Declares a RabbitMQ queue on a specified channel + * + * @param state The RMQ connection to use to declare the queue + * @param queueid The name to assign to the queue + * @param channel The channel to declare the queue on + * + * @return -1 if an error occurs, 0 otherwise + */ +static int declare_RMQ_queue(amqp_connection_state_t state, + char *queueid, int channel) { + + amqp_bytes_t rmq_queueid; + amqp_table_t queueargs; + + queueargs.num_entries = 0; + queueargs.entries = NULL; + + rmq_queueid.len = strlen(queueid); + rmq_queueid.bytes = (void *)queueid; + + amqp_queue_declare(state, channel, rmq_queueid, 0, 1, 0, 0, queueargs); + if (amqp_get_rpc_reply(state).reply_type != AMQP_RESPONSE_NORMAL) { + logger(LOG_INFO, "OpenLI Mediator: unable to declare RMQ queue for %s on channel %d: %u", queueid, channel, amqp_get_rpc_reply(state).reply_type); + return -1; + } + + return 0; +} + +/** Checks if a particular RabbitMQ queue is empty (i.e. contains zero + * unconsumed messages). + * + * @param state The RMQ connection to use to access the queue + * @param queueid The name of the queue to check + * @param channel The channel that the queue exists on + * + * @return -1 if an error occurs, 0 if the queue is not empty, 1 if the queue + * is empty. + */ +static int is_RMQ_queue_empty(amqp_connection_state_t state, char *queueid, + int channel) { + + amqp_bytes_t rmq_queueid; + amqp_table_t queueargs; + amqp_queue_declare_ok_t *r; + + queueargs.num_entries = 0; + queueargs.entries = NULL; + + rmq_queueid.len = strlen(queueid); + rmq_queueid.bytes = (void *)queueid; + + r = amqp_queue_declare(state, channel, rmq_queueid, 0, 1, 0, 0, queueargs); + if (amqp_get_rpc_reply(state).reply_type != AMQP_RESPONSE_NORMAL) { + logger(LOG_INFO, "OpenLI Mediator: unable to declare passive RMQ queue for %s on channel %d: %u", queueid, channel, amqp_get_rpc_reply(state).reply_type); + return -1; + } + + /* TODO this is wrong! message_count doesn't include pre-fetched + * messages so is often 0 when there are still unacked messages :( + * + * Need a way to get the number that rabbitmqctl reports for the + * queue -- I don't think rabbitmq-c provides a useful API for this. + */ + printf("queueid %s -- message count %u\n", queueid, r->message_count); + if (r->message_count == 0) { + return 1; + } + + return 0; +} + +/** Registers a RabbitMQ queue for consumption by an existing connection + * + * @param state The RMQ connection to register the queue on + * @param queueid The name of the queue to consume from + * @param channel The channel that the queue should be on + * + * @return -1 if an error occurs, 0 otherwise. + */ +static int register_RMQ_consumer(amqp_connection_state_t state, + char *queueid, int channel) { + + amqp_bytes_t rmq_queueid; + + rmq_queueid.len = strlen(queueid); + rmq_queueid.bytes = (void *)queueid; + + amqp_basic_consume(state, channel, rmq_queueid, amqp_cstring_bytes(queueid), + 0, 0, 0, amqp_empty_table); + + if (amqp_get_rpc_reply(state).reply_type != AMQP_RESPONSE_NORMAL ) { + return -1; + } + + return 0; +} + +/** Disables consumption from a RabbitMQ queue by an existing connection + * + * @param state The RMQ connection to disassociate the queue from + * @param queueid The name of the queue to disable + * @param channel The channel that the queue should be on + * + * @return -1 if an error occurs, 0 otherwise. + */ +static int cancel_RMQ_consumer(amqp_connection_state_t state, + char *queueid, int channel) { + + if (state == NULL) { + return 0; + } + + amqp_basic_cancel(state, channel, amqp_cstring_bytes(queueid)); + if (amqp_get_rpc_reply(state).reply_type != AMQP_RESPONSE_NORMAL) { + return -1; + } + return 0; +} + +/** Declares the CC and IRI queues in RabbitMQ for a particular LIID + * + * If the queues are already declared, this should be a no-op. + * + * @param state The RMQ connection to use to declare the queues + * @param liid The LIID to declare queues for + * @param liidlen The length of the LIID (in bytes) + * + * @return -1 if an error occurs, 0 otherwise. + */ +int declare_mediator_liid_RMQ_queue(amqp_connection_state_t state, + char *liid, int liidlen) { + + char cc_queuename[1024]; + char iri_queuename[1024]; + + snprintf(cc_queuename, 1024, "%s-%s", liid, "cc"); + snprintf(iri_queuename, 1024, "%s-%s", liid, "iri"); + + if (declare_RMQ_queue(state, iri_queuename, 2) < 0) { + return -1; + } + return declare_RMQ_queue(state, cc_queuename, 3); +} + +/** Declares the Raw IP queue in RabbitMQ for a particular LIID + * + * Only required for LIIDs that are being written to pcap files. + * + * @param state The RMQ connection to use to declare the queue + * @param liid The LIID to declare a raw IP queue for + * @param liidlen The length of the LIID (in bytes) + * + * @return -1 if an error occurs, 0 otherwise. + */ +int declare_mediator_rawip_RMQ_queue(amqp_connection_state_t state, + char *liid, int liidlen) { + + char queuename[1024]; + snprintf(queuename, 1024, "%s-rawip", liid); + return declare_RMQ_queue(state, queuename, 4); +} + +/** Publishes a message onto a mediator RMQ queue. + * + * A message can be an encoded CC, an encoded IRI, or a raw IP packet body. + * + * @param state The RMQ connection to use to publish the message + * @param msg A pointer to the start of the messsage content + * @param msglen The length of the message content, in bytes + * @param liid The LIID that the message belongs to + * @param channel The channel to publish the message to + * @param queuetype The message type (one of "iri", "cc", or "rawip") + * @param expiry The TTL of the message in seconds -- if set to 0, + * the message will not be expired by RMQ + * + * @return 0 if an error occurs, 1 if the message is published successfully + */ +static int produce_mediator_RMQ(amqp_connection_state_t state, + uint8_t *msg, uint16_t msglen, char *liid, int channel, + char *queuetype, uint32_t expiry) { + amqp_bytes_t message_bytes; + amqp_basic_properties_t props; + int pub_ret; + char queuename[1024]; + char expirystr[1024]; + + snprintf(queuename, 1024, "%s-%s", liid, queuetype); + message_bytes.len = msglen; + message_bytes.bytes = msg; + + props._flags = AMQP_BASIC_DELIVERY_MODE_FLAG; + props.delivery_mode = 2; /* persistent mode */ + + if (expiry != 0) { + snprintf(expirystr, 1024, "%u", expiry * 1000); + props.expiration = amqp_cstring_bytes(expirystr); + } + + pub_ret = amqp_basic_publish(state, channel, amqp_cstring_bytes(""), + amqp_cstring_bytes(queuename), 0, 0, &props, message_bytes); + if (pub_ret != 0) { + logger(LOG_INFO, "OpenLI Mediator: error publishing to internal RMQ for LIID %s: %d", liid, pub_ret); + return 0; + } + + return 1; +} + +/** Publishes a raw IP packet onto a mediator RMQ queue. + * + * @param state The RMQ connection to use to publish the message + * @param msg A pointer to the start of the packet body + * @param msglen The length of the packet body, in bytes + * @param liid The LIID that the message belongs to + * + * @return 0 if an error occurs, 1 if the message is published successfully + */ +int publish_rawip_on_mediator_liid_RMQ_queue(amqp_connection_state_t state, + uint8_t *msg, uint16_t msglen, char *liid) { + /* If we haven't managed to write this to a pcap file within 60 seconds, + * expire the message from the RMQ. + * + * This should mitigate any potential issues in the rare case where an + * intercept switches from pcapdisk to a regular agency then back to + * pcapdisk -- old raw IP from the first pcapdisk phase that was + * lingering in the queue won't be included in the "new" pcapdisk + * output (assuming 60 seconds have passed since the first pcapdisk + * output was halted). + */ + return produce_mediator_RMQ(state, msg, msglen, liid, 4, "rawip", 60); +} + +/** Publishes an encoded IRI onto a mediator RMQ queue. + * + * @param state The RMQ connection to use to publish the message + * @param msg A pointer to the start of the encoded IRI + * @param msglen The length of the encoded IRI, in bytes + * @param liid The LIID that the message belongs to + * + * @return 0 if an error occurs, 1 if the message is published successfully + */ +int publish_iri_on_mediator_liid_RMQ_queue(amqp_connection_state_t state, + uint8_t *msg, uint16_t msglen, char *liid) { + + return produce_mediator_RMQ(state, msg, msglen, liid, 2, "iri", 0); +} + +/** Publishes an encoded CC onto a mediator RMQ queue. + * + * @param state The RMQ connection to use to publish the message + * @param msg A pointer to the start of the encoded CC + * @param msglen The length of the encoded CC, in bytes + * @param liid The LIID that the message belongs to + * + * @return 0 if an error occurs, 1 if the message is published successfully + */ +int publish_cc_on_mediator_liid_RMQ_queue(amqp_connection_state_t state, + uint8_t *msg, uint16_t msglen, char *liid) { + return produce_mediator_RMQ(state, msg, msglen, liid, 3, "cc", 0); +} + +void remove_mediator_liid_RMQ_queue(amqp_connection_state_t state, + char *liid) { + int err = 0; + amqp_queue_delete_ok_t *reply; + char queuename[1024]; + + snprintf(queuename, 1024, "%s-iri", liid); + + reply = amqp_queue_delete(state, 2, amqp_cstring_bytes(queuename), 0, 0); + if (reply == NULL) { + err = 1; + } + + snprintf(queuename, 1024, "%s-cc", liid); + reply = amqp_queue_delete(state, 3, amqp_cstring_bytes(queuename), 0, 0); + if (reply == NULL) { + err = 1; + } + + if (err) { + /* I guess this is bad, not sure what to do though... */ + logger(LOG_ERR, "Error while deleting internal RMQ for LIID %s", + liid); + } + +} + +void remove_mediator_rawip_RMQ_queue(amqp_connection_state_t state, + char *liid) { + + amqp_queue_delete_ok_t *reply; + char queuename[1024]; + + snprintf(queuename, 1024, "%s-rawip", liid); + reply = amqp_queue_delete(state, 4, amqp_cstring_bytes(queuename), 0, 0); + if (reply == NULL) { + logger(LOG_ERR, "Error while deleting internal rawip RMQ for LIID %s", + liid); + } +} + +/** Creates a connection to the internal RMQ instance for the purposes of + * consuming intercept records for intercepts headed for a particular agency. + * + * Intended to be called by LEA send threads to establish their RMQ + * connection session. + * + * @param agencyid The ID of the agency that this connection is for. + * @param logfailure Flag indicating whether to write a log message if + * an error occurs. Set to zero to avoid log spam + * if the connection attempt repeatedly fails. + * @param password The password to use to authenticate with RMQ. + * + * @return NULL if the connection fails, otherwise the newly created + * connection object. + */ +amqp_connection_state_t join_mediator_RMQ_as_consumer(char *agencyid, + int logfailure, char *password) { + + amqp_connection_state_t state; + amqp_socket_t *amqp_sock; + + state = amqp_new_connection(); + amqp_sock = amqp_tcp_socket_new(state); + + if (amqp_socket_open(amqp_sock, "localhost", 5672)) { + if (logfailure) { + logger(LOG_INFO, "OpenLI Mediator: unable to open amqp consumer socket for internal RMQ in agency thread %s", agencyid); + } + goto consfailed; + } + + /* Hard-coded username and vhost */ + if ((amqp_login(state, "OpenLI-med", 0, 131072, 0, + AMQP_SASL_METHOD_PLAIN, "openli.nz", password)) + .reply_type != AMQP_RESPONSE_NORMAL) { + if (logfailure) { + logger(LOG_ERR, "OpenLI Mediator: failed to log into RMQ broker using plain auth in agency thread %s", agencyid); + } + goto consfailed; + } + + /* TODO replace with loop */ + amqp_channel_open(state, 2); + if ((amqp_get_rpc_reply(state).reply_type) != AMQP_RESPONSE_NORMAL) { + if (logfailure) { + logger(LOG_ERR, "OpenLI Mediator: failed to open RMQ channel 2 in agency thread %s", agencyid); + } + goto consfailed; + } + + amqp_channel_open(state, 3); + if ((amqp_get_rpc_reply(state).reply_type) != AMQP_RESPONSE_NORMAL) { + if (logfailure) { + logger(LOG_ERR, "OpenLI Mediator: failed to open RMQ channel 3 in agency thread %s", agencyid); + } + goto consfailed; + } + + amqp_channel_open(state, 4); + if ((amqp_get_rpc_reply(state).reply_type) != AMQP_RESPONSE_NORMAL) { + if (logfailure) { + logger(LOG_ERR, "OpenLI Mediator: failed to open RMQ channel 4 in agency thread %s", agencyid); + } + goto consfailed; + } + + return state; + +consfailed: + if (state) { + amqp_destroy_connection(state); + } + return NULL; +} + +/** Creates a connection to the internal RMQ instance for the purposes of + * writing intercept records received from a collector + * + * Intended to be called by collector receive threads to establish their RMQ + * connection session. + * + * @param col The state for the collector receive thread that + * is calling this function + * + * @return NULL if the connection fails, otherwise the newly created + * connection object. + */ +amqp_connection_state_t join_mediator_RMQ_as_producer(coll_recv_t *col) { + + if (col->amqp_producer_state) { + return col->amqp_producer_state; + } + + if (col->internalpass == NULL) { + logger(LOG_INFO, "OpenLI Mediator: collector thread for %s cannot log into RMQ broker because no suitable password has been configured.", col->ipaddr); + goto prodfailed; + } + + col->amqp_producer_state = amqp_new_connection(); + col->amqp_producer_sock = amqp_tcp_socket_new(col->amqp_producer_state); + + if (amqp_socket_open(col->amqp_producer_sock, "localhost", 5672)) { + if (col->disabled_log == 0) { + logger(LOG_INFO, "OpenLI Mediator: collector thread for %s could not open amqp producer socket", col->ipaddr); + } + goto prodfailed; + } + + /* Hard-coded username and password -- not ideal, but the RMQ instance + * should only be accessible via localhost. + */ + if ((amqp_login(col->amqp_producer_state, "OpenLI-med", 0, 131072, 0, + AMQP_SASL_METHOD_PLAIN, "openli.nz", col->internalpass)) + .reply_type != AMQP_RESPONSE_NORMAL) { + if (col->disabled_log == 0) { + logger(LOG_ERR, "OpenLI Mediator: failed to log into RMQ broker using plain auth in collector thread %s", col->ipaddr); + } + goto prodfailed; + } + + /* TODO replace with loop */ + /* TODO some of this stuff could be moved into a separate inline + * function that is called by both the producer and consumer functions + */ + amqp_channel_open(col->amqp_producer_state, 2); + if ((amqp_get_rpc_reply(col->amqp_producer_state).reply_type) != + AMQP_RESPONSE_NORMAL) { + if (col->disabled_log == 0) { + logger(LOG_ERR, "OpenLI Mediator: failed to open RMQ channel 2 in collector thread for %s", col->ipaddr); + } + goto prodfailed; + } + + amqp_channel_open(col->amqp_producer_state, 3); + if ((amqp_get_rpc_reply(col->amqp_producer_state).reply_type) != + AMQP_RESPONSE_NORMAL) { + if (col->disabled_log == 0) { + logger(LOG_ERR, "OpenLI Mediator: failed to open RMQ channel 3 in collector thread for %s", col->ipaddr); + } + goto prodfailed; + } + + amqp_channel_open(col->amqp_producer_state, 4); + if ((amqp_get_rpc_reply(col->amqp_producer_state).reply_type) != + AMQP_RESPONSE_NORMAL) { + if (col->disabled_log == 0) { + logger(LOG_ERR, "OpenLI Mediator: failed to open RMQ channel 4 in collector thread for %s", col->ipaddr); + } + goto prodfailed; + } + + logger(LOG_INFO, "OpenLI Mediator: collector thread for %s has connected to local RMQ instance", col->ipaddr); + + return col->amqp_producer_state; + +prodfailed: + if (col->amqp_producer_state) { + amqp_destroy_connection(col->amqp_producer_state); + col->amqp_producer_state = NULL; + } + return NULL; +} + +/** Connect to the RMQ instance that is running on an OpenLI collector. + * + * This connection would be used by a collector receive thread to + * consume intercept records from its corresponding collector + * (assuming that the collector is exporting via RMQ, of course). + * + * @param col The state for the collector receive thread that + * is requesting the connection to the collector RMQ + * + * @return NULL if the connection fails, otherwise the newly created + * connection object + */ +amqp_connection_state_t join_collector_RMQ(coll_recv_t *col) { + + char stringspace[32]; struct timeval tv; int status; - tv.tv_usec = 0; - tv.tv_sec = 1; + amqp_connection_state_t amqp_state; + amqp_socket_t *amqp_sock = NULL; + mediator_collector_config_t *conf = col->parentconfig; + uint16_t useport; + uint32_t medid; - //try connect to RMQ server at address(msgbody) - //and join the appropiate queue (medID) + /* Try to connect to RMQ service at col->ipaddr and join the appropiate + * queue (which should be named after our mediator ID) + */ amqp_set_initialize_ssl_library(0); - amqp_connection_state_t amqp_state = amqp_new_connection(); + amqp_state = amqp_new_connection(); - amqp_socket_t *ampq_sock = NULL; + lock_med_collector_config(conf); /* LOCK SHARED CONFIGURATION */ - if (medcol->rmqconf->SSLenabled && - medcol->sslconf->cacertfile && - medcol->sslconf->certfile && - medcol->sslconf->keyfile) { + medid = conf->parent_mediatorid; - ampq_sock = amqp_ssl_socket_new(amqp_state); + //If no port is supplied use default + if (conf->rmqconf->port == 0) { + if (conf->rmqconf->SSLenabled) { + useport = 5671; + } else { + useport = 5672; + } + } else { + useport = conf->rmqconf->port; + } - if (!ampq_sock) { - if (!logDisabled) - logger(LOG_INFO, "OpenLI Mediator: RMQ Faild creating new SSL socket"); + tv.tv_usec = 0; + tv.tv_sec = 1; + + if (conf->rmqconf->SSLenabled && conf->sslconf->cacertfile && + conf->sslconf->certfile && conf->sslconf->keyfile) { + /* SSL connection is required */ + amqp_sock = amqp_ssl_socket_new(amqp_state); + if (!amqp_sock) { + if (!col->disabled_log) { + logger(LOG_INFO, "OpenLI Mediator: RMQ failed to create new SSL socket"); + } amqp_destroy_connection(amqp_state); + unlock_med_collector_config(conf); return NULL; } - amqp_ssl_socket_set_verify_peer(ampq_sock, 0); - amqp_ssl_socket_set_verify_hostname(ampq_sock, 0); + amqp_ssl_socket_set_verify_peer(amqp_sock, 0); + amqp_ssl_socket_set_verify_hostname(amqp_sock, 0); - if (amqp_ssl_socket_set_cacert(ampq_sock, medcol->sslconf->cacertfile) + if (amqp_ssl_socket_set_cacert(amqp_sock, conf->sslconf->cacertfile) != AMQP_STATUS_OK) { - if (!logDisabled) - logger(LOG_INFO, "OpenLI Mediator: RMQ Failed to load cacert"); + if (!col->disabled_log) { + logger(LOG_INFO, "OpenLI Mediator: RMQ failed to load cacert"); + } amqp_destroy_connection(amqp_state); + unlock_med_collector_config(conf); return NULL; } - if (amqp_ssl_socket_set_key( - ampq_sock, - medcol->sslconf->certfile, - medcol->sslconf->keyfile - ) != AMQP_STATUS_OK ) { - if (!logDisabled) + if (amqp_ssl_socket_set_key(amqp_sock, conf->sslconf->certfile, + conf->sslconf->keyfile) != AMQP_STATUS_OK ) { + if (!col->disabled_log) { logger(LOG_INFO, - "OpenLI Mediator: RMQ Failed to load SSL cert/key"); + "OpenLI Mediator: RMQ failed to load SSL cert/key"); + } amqp_destroy_connection(amqp_state); + unlock_med_collector_config(conf); return NULL; } - //If no port is supplied use defualt - if (medcol->rmqconf->port == 0) { - medcol->rmqconf->port = 5671; + if (!col->disabled_log) { + logger(LOG_INFO, + "OpenLI Mediator: attempting to connect to RMQ using SSL on %s:%u", + col->ipaddr, useport); } - logger(LOG_INFO, "OpenLI Mediator: attempting to connect to RMQ using SSL on port %u", medcol->rmqconf->port); - - if ((status = amqp_socket_open_noblock(ampq_sock, (const char *)msgbody, - medcol->rmqconf->port, &tv))){ - if (!logDisabled) + if ((status = amqp_socket_open_noblock(amqp_sock, + (const char *)col->ipaddr, + useport, &tv))){ + if (!col->disabled_log) { logger(LOG_INFO, "OpenLI Mediator: RMQ failed to open AMQP socket: %d", status); + } amqp_destroy_connection(amqp_state); + unlock_med_collector_config(conf); return NULL; } - if ( (amqp_login(amqp_state, "OpenLI", 0, 131072, - medcol->rmqconf->heartbeatFreq, + if ( (amqp_login(amqp_state, "OpenLI", 0, 131072, + conf->rmqconf->heartbeatFreq, AMQP_SASL_METHOD_EXTERNAL, "EXTERNAL") ).reply_type != AMQP_RESPONSE_NORMAL ) { - if (!logDisabled) - logger(LOG_INFO, - "OpenLI Mediator: RMQ Failed to login using EXTERNAL auth"); + if (!col->disabled_log) { + logger(LOG_INFO, + "OpenLI Mediator: RMQ failed to login using EXTERNAL auth"); + } amqp_destroy_connection(amqp_state); + unlock_med_collector_config(conf); return NULL; } - } else if (medcol->rmqconf->name && medcol->rmqconf->pass) { + } else if (conf->rmqconf->name && conf->rmqconf->pass) { //start up socket with non SSL auth - ampq_sock = amqp_tcp_socket_new(amqp_state); + amqp_sock = amqp_tcp_socket_new(amqp_state); - if (!ampq_sock) { - if (!logDisabled) - logger(LOG_INFO, - "OpenLI Mediator: RMQ Error opening new TCP socket"); + if (!amqp_sock) { + if (!col->disabled_log) { + logger(LOG_INFO, + "OpenLI Mediator: RMQ error opening new TCP socket"); + } amqp_destroy_connection(amqp_state); + unlock_med_collector_config(conf); return NULL; } - //If no port is supplied use defualt - if (medcol->rmqconf->port == 0) { - medcol->rmqconf->port = 5672; + if (!col->disabled_log) { + logger(LOG_INFO, "OpenLI Mediator: attempting to connect to RMQ using PLAIN auth at %s:%u", col->ipaddr, useport); } - if (amqp_socket_open_noblock(ampq_sock, (const char *)msgbody, - 5672, &tv)){ - if (!logDisabled) - logger(LOG_INFO, + if (amqp_socket_open_noblock(amqp_sock, (const char *)col->ipaddr, + useport, &tv)){ + if (!col->disabled_log) { + logger(LOG_INFO, "OpenLI Mediator: RMQ failed to open AMQP socket"); + } amqp_destroy_connection(amqp_state); + unlock_med_collector_config(conf); return NULL; } - if (amqp_login(amqp_state, "OpenLI", 0, 131072, - medcol->rmqconf->heartbeatFreq, AMQP_SASL_METHOD_PLAIN, - medcol->rmqconf->name, - medcol->rmqconf->pass).reply_type != AMQP_RESPONSE_NORMAL) { - if (!logDisabled) - logger(LOG_INFO, - "OpenLI Mediator: RMQ Failed to login using PLAIN auth"); + if (amqp_login(amqp_state, "OpenLI", 0, 131072, + conf->rmqconf->heartbeatFreq, AMQP_SASL_METHOD_PLAIN, + conf->rmqconf->name, + conf->rmqconf->pass).reply_type != AMQP_RESPONSE_NORMAL) { + if (!col->disabled_log) { + logger(LOG_INFO, + "OpenLI Mediator: RMQ failed to login using PLAIN auth"); + } amqp_destroy_connection(amqp_state); + unlock_med_collector_config(conf); return NULL; } } else { - if (!logDisabled) - logger(LOG_INFO, - "OpenLI Mediator: RMQ no valid login was provided"); + if (!col->disabled_log) { + logger(LOG_INFO, + "OpenLI Mediator: no valid RMQ login was provided"); + } amqp_destroy_connection(amqp_state); + unlock_med_collector_config(conf); return NULL; } + unlock_med_collector_config(conf); /* UNLOCK SHARED CONFIGURATION */ + + /* Use channel 1 for exchanging intercept records */ amqp_channel_open(amqp_state, 1); - + if ( (amqp_get_rpc_reply(amqp_state).reply_type) != AMQP_RESPONSE_NORMAL ) { - if (!logDisabled) - logger(LOG_ERR, "OpenLI Mediator: RMQ Failed to open channel"); + if (!col->disabled_log) { + logger(LOG_ERR, "OpenLI Mediator: RMQ failed to open channel"); + } amqp_destroy_connection(amqp_state); return NULL; } - amqp_queue_declare( - amqp_state, - 1, - mstate->rmq_queueid, - 0, - 1, - 0, - 0, - amqp_empty_table); - - if (amqp_get_rpc_reply(amqp_state).reply_type != AMQP_RESPONSE_NORMAL ) { - if (!logDisabled) - logger(LOG_INFO, "OpenLI Mediator: RMQ Failed to declare queue"); + /* Make sure we have a declared instance of the queue for this mediator */ + snprintf(stringspace, sizeof(stringspace), "ID%d", medid); + if (declare_RMQ_queue(amqp_state, stringspace, 1) < 0) { + if (!col->disabled_log) { + logger(LOG_INFO, "OpenLI Mediator: RMQ failed to declare queue %s on collector %s", stringspace, col->ipaddr); + } amqp_destroy_connection(amqp_state); return NULL; } - amqp_basic_consume(amqp_state, - 1, - mstate->rmq_queueid, - amqp_empty_bytes, - 0, - 0, - 0, - amqp_empty_table); - - if (amqp_get_rpc_reply(amqp_state).reply_type != AMQP_RESPONSE_NORMAL ) { - if (!logDisabled) - logger(LOG_INFO, "OpenLI Mediator: RMQ Failed to register consumer"); + /* Add the queue to our list of consumable queues for this connection */ + if (register_RMQ_consumer(amqp_state, stringspace, 1) < 0) { + if (!col->disabled_log) { + logger(LOG_INFO, + "OpenLI Mediator: RMQ failed to register consumer"); + } amqp_destroy_connection(amqp_state); return NULL; - } else { - if (!logDisabled) - logger(LOG_INFO, "OpenLI Mediator: RMQ Registered consumer %s", - (char *)(mstate->rmq_queueid.bytes)); + } else if (!col->disabled_log) { + logger(LOG_INFO, "OpenLI Mediator: RMQ Registered consumer %s", + stringspace); } return amqp_state; } -int receive_rmq_invite(mediator_collector_t *medcol, - single_coll_state_t *mstate) { - amqp_connection_state_t amqp_state = join_RMQ(medcol, - (uint8_t *)mstate->ipaddr, mstate->iplen, 0, mstate); - int sock_fd; - if (!amqp_state) return -1; +/** Stop consuming IRI messages for a given LIID + * + * @param state The RMQ connection to stop consuming on + * @param liid The LIID to stop consuming IRI records for + * + * @return -1 if an error occurs, 0 otherwise. + */ +int deregister_mediator_iri_RMQ_consumer(amqp_connection_state_t state, + char *liid) { + + char iri_queuename[1024]; + snprintf(iri_queuename, 1024, "%s-%s", liid, "iri"); - sock_fd = amqp_get_sockfd(amqp_state); - if (sock_fd < 0) { - return sock_fd; - } + return cancel_RMQ_consumer(state, iri_queuename, 2); +} - mstate->amqp_state = amqp_state; - if (mstate->incoming_rmq == NULL) { - mstate->incoming_rmq = create_net_buffer(NETBUF_RECV, 0, NULL); - } +/** Stop consuming CC messages for a given LIID + * + * @param state The RMQ connection to stop consuming on + * @param liid The LIID to stop consuming CC records for + * + * @return -1 if an error occurs, 0 otherwise. + */ +int deregister_mediator_cc_RMQ_consumer(amqp_connection_state_t state, + char *liid) { - return sock_fd; + char cc_queuename[1024]; + snprintf(cc_queuename, 1024, "%s-%s", liid, "cc"); + + return cancel_RMQ_consumer(state, cc_queuename, 3); } -int check_rmq_status(mediator_collector_t *medcol, active_collector_t *col) { +/** Stop consuming raw IP packets for a given LIID + * + * @param state The RMQ connection to stop consuming on + * @param liid The LIID to stop consuming raw IP packets for + * + * @return -1 if an error occurs, 0 otherwise. + */ +int deregister_mediator_rawip_RMQ_consumer(amqp_connection_state_t state, + char *liid) { + + char cc_queuename[1024]; + snprintf(cc_queuename, 1024, "%s-%s", liid, "rawip"); + + return cancel_RMQ_consumer(state, cc_queuename, 4); +} + +/** Consume (and potentially act upon) a non-standard frame seen by an + * RMQ consuming connection. + * + * Such frames usually indicate that the connection is in an error state or + * communicate some information from the server back to the consumer. + * + * @param state The RMQ connection with a pending non-standard + * message. + * + * @return -1 if the connection has failed and needs to be reset, 0 otherwise. + */ +static int consume_other_frame(amqp_connection_state_t state) { + amqp_frame_t frame; + amqp_rpc_reply_t ret; + + if (AMQP_STATUS_OK != amqp_simple_wait_frame(state, &frame)) { + return 0; + } + + if (AMQP_FRAME_METHOD == frame.frame_type) { + switch (frame.payload.method.id) { + case AMQP_BASIC_ACK_METHOD: + /* if we've turned publisher confirms on, and we've published a + * message here is a message being confirmed. + */ + return 0; + case AMQP_BASIC_RETURN_METHOD: + /* if a published message couldn't be routed and the mandatory + * flag was set this is what would be returned. The message then + * needs to be read. + */ + { + amqp_message_t message; + ret = amqp_read_message(state, frame.channel, &message, 0); + if (AMQP_RESPONSE_NORMAL != ret.reply_type) { + return -1; + } + amqp_destroy_message(&message); + } + return 0; + + case AMQP_CHANNEL_CLOSE_METHOD: + /* a channel.close method happens when a channel exception + * occurs, this can happen by publishing to an exchange that + * doesn't exist for example. + * + * In this case you would need to open another channel, + * redeclare any queues that were declared auto-delete, and + * restart any consumers that were attached to the previous + * channel. + */ + return -1; + + case AMQP_CONNECTION_CLOSE_METHOD: + /* a connection.close method happens when a connection exception + * occurs, this can happen by trying to use a channel that isn't + * open for example. + * + * In this case the whole connection must be restarted. + */ + return -1; + + default: + return -1; + } + } + /* If we get here, something really weird is going on -- usually this + * means we've consumed the "method" portion of a message (i.e. the + * first frame) without subsequently reading the header and body that + * follow the method. + * + * Best way to resolve this is to reset the RMQ connection and try + * again, hopefully without doing a partial message read next time. + * + * Note: this often happens if you mess around with + * amqp_simple_wait_frame_noblock(), so don't do that unless you know + * what you are doing. + */ + + return -1; +} + +#define MAX_CONSUMER_REJECTIONS 10 + +/** Consumes messages from an internal RMQ connection and writes them into + * an export buffer. + * + * @param state The RMQ connection to consume messages from + * @param buf The export buffer to write the messages into + * @param maxread The maximum number of messages to read before + * returning from this function + * @param channel The channel to consume from + * @param last_deliv The delivery tag of the most recent consumed + * message (updated by this function) + * @param prependlength Flag to indicate whether the message length + * should be written into the export buffer ahead + * of writing the message itself + * + * @return -1 if an error occurs, -2 if the RMQ connection has timed out + * due to a heartbeat failure, 0 if no messages were consumed, or + * 1 if at least one message was consumed successfully. + */ +static int consume_mediator_liid_messages(amqp_connection_state_t state, + export_buffer_t *buf, int maxread, int channel, uint64_t *last_deliv, + uint8_t prependlength) { - amqp_frame_t frame; + int msgread = 0; + int rejects = 0; struct timeval tv; - int ret; - single_coll_state_t *cs = NULL; + uint32_t len; + amqp_envelope_t envelope; + amqp_rpc_reply_t ret; - tv.tv_usec = 1; tv.tv_sec = 0; + tv.tv_usec = 100000; + + if (state == NULL) { + usleep(10000); + return 0; + } + + while (msgread < maxread) { + /* Let the connection free any unused internal state / buffers */ + amqp_maybe_release_buffers(state); + + /* Grab the next message */ + ret = amqp_consume_message(state, &envelope, &tv, 0); + if (ret.reply_type != AMQP_RESPONSE_NORMAL) { + if (ret.reply_type == AMQP_RESPONSE_LIBRARY_EXCEPTION && + ret.library_error == AMQP_STATUS_TIMEOUT) { + /* No messages available */ + usleep(10000); + return (msgread > 0); + } + + if (ret.reply_type == AMQP_RESPONSE_LIBRARY_EXCEPTION && + ret.library_error == AMQP_STATUS_HEARTBEAT_TIMEOUT) { + /* Connection has timed out because we didn't respond to + * a heartbeat in time? + */ + return -2; + } + + if (ret.reply_type == AMQP_RESPONSE_LIBRARY_EXCEPTION && + ret.library_error == AMQP_STATUS_UNEXPECTED_STATE) { + /* Non-standard frame, probably an error or internal RMQ + * message. + */ + if (consume_other_frame(state) < 0) { + return -1; + } + } else { + return -1; + } + amqp_destroy_envelope(&envelope); + continue; + } + + if (envelope.channel == 0) { + /* Probably a heartbeat or some other internal admin message */ + amqp_destroy_envelope(&envelope); + continue; + } + + if (envelope.channel != channel) { + /* Message has an unexpected channel, reject it. If we + * have to reject too many messages, break out of the consuming + * loop + */ + if (amqp_basic_reject(state, envelope.channel, + envelope.delivery_tag, true) != 0) { + return -1; + } + amqp_destroy_envelope(&envelope); + rejects += 1; + if (rejects >= MAX_CONSUMER_REJECTIONS) { + return (msgread > 0); + } + continue; + } + + msgread += 1; + + /* Raw IP messages need to be prepended with their length as we have + * no other reliable indicator of their length in the message + * itself. + */ + if (prependlength) { + len = envelope.message.body.len; + if (append_etsipdu_to_buffer(buf, (uint8_t *)(&len), + sizeof(len), 0) == 0) { + logger(LOG_INFO, "OpenLI Mediator: unable to enqueue ETSI PDU length into export buffer"); + return -1; + } + } + + if (append_etsipdu_to_buffer(buf, envelope.message.body.bytes, + envelope.message.body.len, 0) == 0) { + logger(LOG_INFO, "OpenLI Mediator: unable to enqueue ETSI PDU into export buffer"); + return -1; + } + + *last_deliv = envelope.delivery_tag; + amqp_destroy_envelope(&envelope); + } + + return 1; +} + +/** Consumes IRI records using an RMQ connection, writing them into the + * provided export buffer. + * + * Note that only IRIs for LIIDs that are registered with this connection + * will be consumed. + * + * @param state The RMQ connection to consume IRIs from + * @param buf The export buffer to write the IRIs into + * @param maxread The maximum number of IRIs to read before + * returning from this function + * @param last_deliv The delivery tag of the most recent consumed + * message (updated by this function) + * + * @return -1 if an error occurs, -2 if the RMQ connection has timed out + * due to a heartbeat failure, 0 if no IRIs were consumed, or + * 1 if at least one IRI was consumed successfully. + */ +int consume_mediator_iri_messages(amqp_connection_state_t state, + export_buffer_t *buf, int maxread, uint64_t *last_deliv) { + + return consume_mediator_liid_messages(state, buf, maxread, 2, last_deliv, + 0); +} + +/** Consumes CC records using an RMQ connection, writing them into the + * provided export buffer. + * + * Note that only CCs for LIIDs that are registered with this connection + * will be consumed. + * + * @param state The RMQ connection to consume CCs from + * @param buf The export buffer to write the CCs into + * @param maxread The maximum number of CCs to read before + * returning from this function + * @param last_deliv The delivery tag of the most recent consumed + * message (updated by this function) + * + * @return -1 if an error occurs, -2 if the RMQ connection has timed out + * due to a heartbeat failure, 0 if no CCs were consumed, or + * 1 if at least one CC was consumed successfully. + */ +int consume_mediator_cc_messages(amqp_connection_state_t state, + export_buffer_t *buf, int maxread, uint64_t *last_deliv) { + + return consume_mediator_liid_messages(state, buf, maxread, 3, last_deliv, + 0); +} + +/** Consumes raw IP packets using an RMQ connection, writing them into the + * provided export buffer. + * + * Note that only raw IP packets for LIIDs that are registered with this + * connection will be consumed. + * + * @param state The RMQ connection to consume packets from + * @param buf The export buffer to write the packets into + * @param maxread The maximum number of packets to read before + * returning from this function + * @param last_deliv The delivery tag of the most recent consumed + * message (updated by this function) + * + * @return -1 if an error occurs, -2 if the RMQ connection has timed out + * due to a heartbeat failure, 0 if no packets were consumed, or + * 1 if at least one packet was consumed successfully. + */ +int consume_mediator_rawip_messages(amqp_connection_state_t state, + export_buffer_t *buf, int maxread, uint64_t *last_deliv) { + + return consume_mediator_liid_messages(state, buf, maxread, 4, last_deliv, + 1); +} + +/** Acknowledges messages for an RMQ connection, up to the provided + * delivery tag number. + * + * @param state The RMQ connection to acknowledge messages on + * @param deliv_tag The delivery tag to acknowledge + * @param channel The channel to apply the acknowledgement to + * + * @return AMQP_STATUS_OK if the acknowledgement was successful, otherwise + * will return the corresponding AMQP error code + */ +static inline int ack_mediator_liid_messages(amqp_connection_state_t state, + uint64_t deliv_tag, int channel) { + + int x; + + if (state == NULL) { + return 0; + } + + if ((x = amqp_basic_ack(state, channel, deliv_tag, 1)) != 0) { + return x; + } + return 0; +} + +/** Acknowledges IRI messages for an RMQ connection, up to the provided + * delivery tag number. + * + * @param state The RMQ connection to acknowledge messages on + * @param deliv_tag The delivery tag to acknowledge + * + * @return AMQP_STATUS_OK if the acknowledgement was successful, otherwise + * will return the corresponding AMQP error code + */ +int ack_mediator_iri_messages(amqp_connection_state_t state, + uint64_t deliv_tag) { + return ack_mediator_liid_messages(state, deliv_tag, 2); +} + +/** Acknowledges CC messages for an RMQ connection, up to the provided + * delivery tag number. + * + * @param state The RMQ connection to acknowledge messages on + * @param deliv_tag The delivery tag to acknowledge + * + * @return AMQP_STATUS_OK if the acknowledgement was successful, otherwise + * will return the corresponding AMQP error code + */ +int ack_mediator_cc_messages(amqp_connection_state_t state, + uint64_t deliv_tag) { + return ack_mediator_liid_messages(state, deliv_tag, 3); +} + +/** Acknowledges raw IP messages for an RMQ connection, up to the provided + * delivery tag number. + * + * @param state The RMQ connection to acknowledge messages on + * @param deliv_tag The delivery tag to acknowledge + * + * @return AMQP_STATUS_OK if the acknowledgement was successful, otherwise + * will return the corresponding AMQP error code + */ +int ack_mediator_rawip_messages(amqp_connection_state_t state, + uint64_t deliv_tag) { + return ack_mediator_liid_messages(state, deliv_tag, 4); +} + +/** Declares the IRI queue for a given LIID and registers it with an + * RMQ connection for consumption. + * + * @param state The RMQ connection to register the queue on + * @param liid The LIID of the intercept to register + * + * @return -1 if the queue declaration fails, -2 if the registration + * fails, 0 if either parameter is bad, 1 if everything was + * successful + */ +int register_mediator_iri_RMQ_consumer(amqp_connection_state_t state, + char *liid) { + + char iri_queuename[1024]; + + if (state == NULL || liid == NULL) { + return 0; + } + snprintf(iri_queuename, 1024, "%s-%s", liid, "iri"); + + if (declare_RMQ_queue(state, iri_queuename, 2) < 0) { + return -1; + } + + if (register_RMQ_consumer(state, iri_queuename, 2) < 0) { + return -2; + } + + return 1; + +} + +/** Declares the CC queue for a given LIID and registers it with an + * RMQ connection for consumption. + * + * @param state The RMQ connection to register the queue on + * @param liid The LIID of the intercept to register + * + * @return -1 if the queue declaration fails, -2 if the registration + * fails, 0 if either parameter is bad, 1 if everything was + * successful + */ +int register_mediator_cc_RMQ_consumer(amqp_connection_state_t state, + char *liid) { + + char cc_queuename[1024]; + + if (state == NULL || liid == NULL) { + return 0; + } + snprintf(cc_queuename, 1024, "%s-%s", liid, "cc"); + + if (declare_RMQ_queue(state, cc_queuename, 3) < 0) { + return -1; + } + + if (register_RMQ_consumer(state, cc_queuename, 3) < 0) { + return -2; + } + + return 1; + +} + +/** Declares the raw IP packet queue for a given LIID and registers it with an + * RMQ connection for consumption. + * + * @param state The RMQ connection to register the queue on + * @param liid The LIID of the intercept to register + * + * @return -1 if the queue declaration fails, -2 if the registration + * fails, 0 if either parameter is bad, 1 if everything was + * successful + */ +int register_mediator_rawip_RMQ_consumer(amqp_connection_state_t state, + char *liid) { + + char raw_queuename[1024]; + + if (state == NULL || liid == NULL) { + return 0; + } + snprintf(raw_queuename, 1024, "%s-%s", liid, "rawip"); + + if (declare_RMQ_queue(state, raw_queuename, 4) < 0) { + return -1; + } + + if (register_RMQ_consumer(state, raw_queuename, 4) < 0) { + return -2; + } + + return 1; + +} + +/** Indicates whether the IRI queue for a given LIID is empty or not. + * + * @param state The RMQ connection to use to undertake the check + * @param liid The LIID whose IRI queue needs to be checked + * + * @return -1 if an error occurs, 0 if the queue is not empty or the + * parameters are invalid, 1 if the queue is empty. + */ +int check_empty_mediator_iri_RMQ(amqp_connection_state_t state, char *liid) { + + char iri_queuename[1024]; + + if (state == NULL || liid == NULL) { + return 0; + } + snprintf(iri_queuename, 1024, "%s-%s", liid, "iri"); + + return is_RMQ_queue_empty(state, iri_queuename, 2); +} + +/** Indicates whether the CC queue for a given LIID is empty or not. + * + * @param state The RMQ connection to use to undertake the check + * @param liid The LIID whose CC queue needs to be checked + * + * @return -1 if an error occurs, 0 if the queue is not empty or the + * parameters are invalid, 1 if the queue is empty. + */ +int check_empty_mediator_cc_RMQ(amqp_connection_state_t state, char *liid) { + + char cc_queuename[1024]; + + if (state == NULL || liid == NULL) { + return 0; + } + snprintf(cc_queuename, 1024, "%s-%s", liid, "cc"); + + return is_RMQ_queue_empty(state, cc_queuename, 3); +} + +/** Indicates whether the raw IP packet queue for a given LIID is empty or not. + * + * @param state The RMQ connection to use to undertake the check + * @param liid The LIID whose raw IP queue needs to be checked + * + * @return -1 if an error occurs, 0 if the queue is not empty or the + * parameters are invalid, 1 if the queue is empty. + */ +int check_empty_mediator_rawip_RMQ(amqp_connection_state_t state, char *liid) { + + char raw_queuename[1024]; + + if (state == NULL || liid == NULL) { + return 0; + } + snprintf(raw_queuename, 1024, "%s-%s", liid, "rawip"); - cs = (single_coll_state_t *)(col->colev->state); - - if (cs->amqp_state == NULL) { - return 0; - } - - ret = amqp_simple_wait_frame_noblock(cs->amqp_state, &frame, &tv); - switch(ret) { - case AMQP_STATUS_HEARTBEAT_TIMEOUT: - logger(LOG_INFO, - "OpenLI Mediator: RMQ Heartbeat timer expired for %s", - cs->ipaddr); - - return -1; - case AMQP_STATUS_INVALID_PARAMETER: - case AMQP_STATUS_NO_MEMORY: - case AMQP_STATUS_BAD_AMQP_DATA: - case AMQP_STATUS_UNKNOWN_METHOD: - case AMQP_STATUS_UNKNOWN_CLASS: - case AMQP_STATUS_TIMER_FAILURE: - case AMQP_STATUS_SOCKET_ERROR: - case AMQP_STATUS_SSL_ERROR: - logger(LOG_INFO, - "OpenLI Mediator: RMQ connection error, closing"); - return -1; - } - return 1; + return is_RMQ_queue_empty(state, raw_queuename, 4); } // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/mediator_rmq.h b/src/mediator/mediator_rmq.h new file mode 100644 index 00000000..b6001efc --- /dev/null +++ b/src/mediator/mediator_rmq.h @@ -0,0 +1,344 @@ +/* + * + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. + * All rights reserved. + * + * This file is part of OpenLI. + * + * This code has been developed by the University of Waikato WAND + * research group. For further information please see http://www.wand.net.nz/ + * + * OpenLI is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * OpenLI is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + */ + +#ifndef OPENLI_MEDIATOR_RMQ_H_ +#define OPENLI_MEDIATOR_RMQ_H_ + +#include "coll_recv_thread.h" +#include "lea_send_thread.h" + +/** Creates a connection to the internal RMQ instance for the purposes of + * writing intercept records received from a collector + * + * Intended to be called by collector receive threads to establish their RMQ + * connection session. + * + * @param col The state for the collector receive thread that + * is calling this function + * + * @return NULL if the connection fails, otherwise the newly created + * connection object. + */ +amqp_connection_state_t join_mediator_RMQ_as_producer(coll_recv_t *col); + +/** Connect to the RMQ instance that is running on an OpenLI collector. + * + * This connection would be used by a collector receive thread to + * consume intercept records from its corresponding collector + * (assuming that the collector is exporting via RMQ, of course). + * + * @param col The state for the collector receive thread that + * is requesting the connection to the collector RMQ + * + * @return NULL if the connection fails, otherwise the newly created + * connection object + */ +amqp_connection_state_t join_collector_RMQ(coll_recv_t *col); + +/** Creates a connection to the internal RMQ instance for the purposes of + * consuming intercept records for intercepts headed for a particular agency. + * + * Intended to be called by LEA send threads to establish their RMQ + * connection session. + * + * @param agencyid The ID of the agency that this connection is for. + * @param logfailure Flag indicating whether to write a log message if + * an error occurs. Set to zero to avoid log spam + * if the connection attempt repeatedly fails. + * @param password The password to use to authenticate with RMQ. + * + * @return NULL if the connection fails, otherwise the newly created + * connection object. + */ +amqp_connection_state_t join_mediator_RMQ_as_consumer(char *agencyid, + int logfailure, char *password); + +/** Declares the IRI queue for a given LIID and registers it with an + * RMQ connection for consumption. + * + * @param state The RMQ connection to register the queue on + * @param liid The LIID of the intercept to register + * + * @return -1 if the queue declaration fails, -2 if the registration + * fails, 0 if either parameter is bad, 1 if everything was + * successful + */ +int register_mediator_iri_RMQ_consumer(amqp_connection_state_t state, + char *liid); + +/** Stop consuming IRI messages for a given LIID + * + * @param state The RMQ connection to stop consuming on + * @param liid The LIID to stop consuming IRI records for + * + * @return -1 if an error occurs, 0 otherwise. + */ +int deregister_mediator_iri_RMQ_consumer(amqp_connection_state_t state, + char *liid); + +/** Declares the CC queue for a given LIID and registers it with an + * RMQ connection for consumption. + * + * @param state The RMQ connection to register the queue on + * @param liid The LIID of the intercept to register + * + * @return -1 if the queue declaration fails, -2 if the registration + * fails, 0 if either parameter is bad, 1 if everything was + * successful + */ +int register_mediator_cc_RMQ_consumer(amqp_connection_state_t state, + char *liid); + +/** Stop consuming CC messages for a given LIID + * + * @param state The RMQ connection to stop consuming on + * @param liid The LIID to stop consuming CC records for + * + * @return -1 if an error occurs, 0 otherwise. + */ +int deregister_mediator_cc_RMQ_consumer(amqp_connection_state_t state, + char *liid); + +/** Declares the raw IP packet queue for a given LIID and registers it with an + * RMQ connection for consumption. + * + * @param state The RMQ connection to register the queue on + * @param liid The LIID of the intercept to register + * + * @return -1 if the queue declaration fails, -2 if the registration + * fails, 0 if either parameter is bad, 1 if everything was + * successful + */ +int register_mediator_rawip_RMQ_consumer(amqp_connection_state_t state, + char *liid); + +/** Stop consuming raw IP packets for a given LIID + * + * @param state The RMQ connection to stop consuming on + * @param liid The LIID to stop consuming raw IP packets for + * + * @return -1 if an error occurs, 0 otherwise. + */ + +int deregister_mediator_rawip_RMQ_consumer(amqp_connection_state_t state, + char *liid); + +/** Declares the CC and IRI queues in RabbitMQ for a particular LIID + * + * If the queues are already declared, this should be a no-op. + * + * @param state The RMQ connection to use to declare the queues + * @param liid The LIID to declare queues for + * @param liidlen The length of the LIID (in bytes) + * + * @return -1 if an error occurs, 0 otherwise. + */ +int declare_mediator_liid_RMQ_queue(amqp_connection_state_t state, + char *liid, int liidlen); + +/** Declares the Raw IP queue in RabbitMQ for a particular LIID + * + * Only required for LIIDs that are being written to pcap files. + * + * @param state The RMQ connection to use to declare the queue + * @param liid The LIID to declare a raw IP queue for + * @param liidlen The length of the LIID (in bytes) + * + * @return -1 if an error occurs, 0 otherwise. + */ +int declare_mediator_rawip_RMQ_queue(amqp_connection_state_t state, + char *liid, int liidlen); + + +void remove_mediator_liid_RMQ_queue(amqp_connection_state_t state, + char *liid); +void remove_mediator_rawip_RMQ_queue(amqp_connection_state_t state, + char *liid); + +/** Publishes an encoded CC onto a mediator RMQ queue. + * + * @param state The RMQ connection to use to publish the message + * @param msg A pointer to the start of the encoded CC + * @param msglen The length of the encoded CC, in bytes + * @param liid The LIID that the message belongs to + * + * @return 0 if an error occurs, 1 if the message is published successfully + */ +int publish_iri_on_mediator_liid_RMQ_queue(amqp_connection_state_t state, + uint8_t *msg, uint16_t msglen, char *liid); + +/** Publishes an encoded CC onto a mediator RMQ queue. + * + * @param state The RMQ connection to use to publish the message + * @param msg A pointer to the start of the encoded CC + * @param msglen The length of the encoded CC, in bytes + * @param liid The LIID that the message belongs to + * + * @return 0 if an error occurs, 1 if the message is published successfully + */ +int publish_cc_on_mediator_liid_RMQ_queue(amqp_connection_state_t state, + uint8_t *msg, uint16_t msglen, char *liid); + +/** Publishes an encoded CC onto a mediator RMQ queue. + * + * @param state The RMQ connection to use to publish the message + * @param msg A pointer to the start of the encoded CC + * @param msglen The length of the encoded CC, in bytes + * @param liid The LIID that the message belongs to + * + * @return 0 if an error occurs, 1 if the message is published successfully + */ +int publish_rawip_on_mediator_liid_RMQ_queue(amqp_connection_state_t state, + uint8_t *msg, uint16_t msglen, char *liid); + +/** Consumes CC records using an RMQ connection, writing them into the + * provided export buffer. + * + * Note that only CCs for LIIDs that are registered with this connection + * will be consumed. + * + * @param state The RMQ connection to consume CCs from + * @param buf The export buffer to write the CCs into + * @param maxread The maximum number of CCs to read before + * returning from this function + * @param last_deliv The delivery tag of the most recent consumed + * message (updated by this function) + * + * @return -1 if an error occurs, -2 if the RMQ connection has timed out + * due to a heartbeat failure, 0 if no CCs were consumed, or + * 1 if at least one CC was consumed successfully. + */ +int consume_mediator_cc_messages(amqp_connection_state_t state, + export_buffer_t *buf, int maxread, uint64_t *last_deliv); + +/** Consumes IRI records using an RMQ connection, writing them into the + * provided export buffer. + * + * Note that only IRIs for LIIDs that are registered with this connection + * will be consumed. + * + * @param state The RMQ connection to consume IRIs from + * @param buf The export buffer to write the IRIs into + * @param maxread The maximum number of IRIs to read before + * returning from this function + * @param last_deliv The delivery tag of the most recent consumed + * message (updated by this function) + * + * @return -1 if an error occurs, -2 if the RMQ connection has timed out + * due to a heartbeat failure, 0 if no IRIs were consumed, or + * 1 if at least one IRI was consumed successfully. + */ +int consume_mediator_iri_messages(amqp_connection_state_t state, + export_buffer_t *buf, int maxread, uint64_t *last_deliv); + +/** Consumes raw IP packets using an RMQ connection, writing them into the + * provided export buffer. + * + * Note that only raw IP packets for LIIDs that are registered with this + * connection will be consumed. + * + * @param state The RMQ connection to consume packets from + * @param buf The export buffer to write the packets into + * @param maxread The maximum number of packets to read before + * returning from this function + * @param last_deliv The delivery tag of the most recent consumed + * message (updated by this function) + * + * @return -1 if an error occurs, -2 if the RMQ connection has timed out + * due to a heartbeat failure, 0 if no packets were consumed, or + * 1 if at least one packet was consumed successfully. + */ +int consume_mediator_rawip_messages(amqp_connection_state_t state, + export_buffer_t *buf, int maxread, uint64_t *last_deliv); + +/** Acknowledges IRI messages for an RMQ connection, up to the provided + * delivery tag number. + * + * @param state The RMQ connection to acknowledge messages on + * @param deliv_tag The delivery tag to acknowledge + * + * @return AMQP_STATUS_OK if the acknowledgement was successful, otherwise + * will return the corresponding AMQP error code + */ +int ack_mediator_iri_messages(amqp_connection_state_t state, + uint64_t deliv_tag); + +/** Acknowledges CC messages for an RMQ connection, up to the provided + * delivery tag number. + * + * @param state The RMQ connection to acknowledge messages on + * @param deliv_tag The delivery tag to acknowledge + * + * @return AMQP_STATUS_OK if the acknowledgement was successful, otherwise + * will return the corresponding AMQP error code + */ +int ack_mediator_cc_messages(amqp_connection_state_t state, + uint64_t deliv_tag); + +/** Acknowledges raw IP messages for an RMQ connection, up to the provided + * delivery tag number. + * + * @param state The RMQ connection to acknowledge messages on + * @param deliv_tag The delivery tag to acknowledge + * + * @return AMQP_STATUS_OK if the acknowledgement was successful, otherwise + * will return the corresponding AMQP error code + */ +int ack_mediator_rawip_messages(amqp_connection_state_t state, + uint64_t deliv_tag); + +/** Indicates whether the IRI queue for a given LIID is empty or not. + * + * @param state The RMQ connection to use to undertake the check + * @param liid The LIID whose IRI queue needs to be checked + * + * @return -1 if an error occurs, 0 if the queue is not empty or the + * parameters are invalid, 1 if the queue is empty. + */ +int check_empty_mediator_iri_RMQ(amqp_connection_state_t state, char *liid); + +/** Indicates whether the CC queue for a given LIID is empty or not. + * + * @param state The RMQ connection to use to undertake the check + * @param liid The LIID whose CC queue needs to be checked + * + * @return -1 if an error occurs, 0 if the queue is not empty or the + * parameters are invalid, 1 if the queue is empty. + */ +int check_empty_mediator_cc_RMQ(amqp_connection_state_t state, char *liid); + +/** Indicates whether the raw IP packet queue for a given LIID is empty or not. + * + * @param state The RMQ connection to use to undertake the check + * @param liid The LIID whose raw IP queue needs to be checked + * + * @return -1 if an error occurs, 0 if the queue is not empty or the + * parameters are invalid, 1 if the queue is empty. + */ +int check_empty_mediator_rawip_RMQ(amqp_connection_state_t state, char *liid); + +#endif +// vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/pcapthread.c b/src/mediator/pcapthread.c index 4ddae6b3..ae43e22d 100644 --- a/src/mediator/pcapthread.c +++ b/src/mediator/pcapthread.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -25,13 +25,33 @@ */ #include +#include #include "logger.h" -#include "mediator.h" +#include "lea_send_thread.h" #include "util.h" +#include "pcapthread.h" +#include "mediator_rmq.h" #include #include +/** This source file implements the "pcap output" thread that allows + * users to write an intercept to disk as a series of pcaps, instead of + * delivering it to an agency via the conventional handovers. + * + * This thread is implemented as a special type of LEA send thread that + * just happens to use libtrace to write files, so there are a lot of + * references to lea_thread_state_t instances etc. Hopefully it is not + * too confusing -- just try to think of the pcap thread as like a + * inheriting class from the LEA send thread, which overrides or extends + * certain functionality to suit its intended purpose. + * + * Because of this, a pcap thread has two state "objects" -- one is an + * LEA send thread state instance, which includes all of the state that is + * common to both LEA send threads and the pcap thread, the other is the + * pcap specific thread state that is never required by an LEA send thread. + */ + /** Halt all ongoing pcap outputs and close their respective files. * * @param pstate The state for the pcap output thread @@ -48,6 +68,17 @@ static void halt_pcap_outputs(pcap_thread_state_t *pstate) { } } +/** Concatenates a string onto another, using the provided pointer as + * the infered end of the "front" string. + * + * @param str The string to "add" to the current string + * @param bufp Pointer to the null byte of the string that is being + * added to + * @param buflim Pointer to the end of the allocated space for the + * string being added to + * + * @return the null byte of the newly concatenated string + */ static char *stradd(const char *str, char *bufp, char *buflim) { while (bufp < buflim && (*bufp = *str++) != '\0') { ++bufp; @@ -55,21 +86,39 @@ static char *stradd(const char *str, char *bufp, char *buflim) { return bufp; } -static int populate_pcap_uri(pcap_thread_state_t *pstate, char *urispace, +/** Constructs the pcap filename URI for an output file. + * + * @param state The LEA send thread state for this pcap thread + * @param pstate The pcap specific state for this pcap thread + * @param urispace The string that the URI is to be written into + * @param urispacelen The number of bytes allocated for the urispace + * string. + * @param act The intercept that this output file will belong to + * + * @return 0 if the URI could not fit in the provided string space, 1 + * otherwise. + */ + +static int populate_pcap_uri(lea_thread_state_t *state, + pcap_thread_state_t *pstate, char *urispace, int urispacelen, active_pcap_output_t *act) { - char *ptr = pstate->outtemplate; + char *ptr = state->pcap_outtemplate; struct timeval tv; char tsbuf[12]; char scratch[9500]; char *w = scratch; char *end = scratch + urispacelen; + /* Build the URI in 'scratch', then copy it into urispace only if we + * manage to build it successfully + */ assert(ptr); gettimeofday(&tv, NULL); w = stradd("pcapfile:", w, end); - w = stradd(pstate->dir, w, end); + w = stradd(state->pcap_dir, w, end); + w = stradd("/", w, end); for (; *ptr; ++ptr) { if (*ptr == '%') { @@ -78,9 +127,11 @@ static int populate_pcap_uri(pcap_thread_state_t *pstate, char *urispace, --ptr; break; case 'L': + /* '%L' is replaced with the LIID for the intercept */ w = stradd(act->liid, w, end); continue; case 's': + /* '%s' is replaced with the unix timestamp in seconds */ snprintf(tsbuf, sizeof(tsbuf), "%ld", tv.tv_sec); w = stradd(tsbuf, w, end); continue; @@ -95,8 +146,9 @@ static int populate_pcap_uri(pcap_thread_state_t *pstate, char *urispace, *w++ = *ptr; } + /* Make sure we put an appropriate suffix on the file name */ w = stradd(".pcap", w, end); - if (pstate->compresslevel > 0) { + if (state->pcap_compress_level > 0) { w = stradd(".gz", w, end); } @@ -105,27 +157,59 @@ static int populate_pcap_uri(pcap_thread_state_t *pstate, char *urispace, } *w = '\0'; + /* All other '%' formatting is handled by strftime() */ strftime(urispace, urispacelen, scratch, gmtime(&(tv.tv_sec))); return 1; } +/** De-registers the "rawip" queues for any LIIDs that we have disabled + * pcap output for due to being unconfirmed by a reconnecting provisioner. + * + * This method is designed to be called using foreach_liid_agency_mapping() + * + * @param m The LIID map entry that is being considered + * @param arg The pcap thread state object (requires casting) + * + * @return 0 always + */ +static int deregister_unconfirmed_pcap_liids(liid_map_entry_t *m, void *arg) { + pcap_thread_state_t *pstate = (pcap_thread_state_t *)arg; + + if (m->unconfirmed == 0) { + return 0; + } + + /* If the LIID has not been confirmed as a "pcap" output by now, let's + * assume it has been removed or re-assigned to an LEA instead. + * + * The only thing we really need to do though is deregister the + * raw IP RMQ queue for that intercept. + */ + if (pstate->rawip_handover->rmq_consumer) { + deregister_mediator_rawip_RMQ_consumer( + pstate->rawip_handover->rmq_consumer, m->liid); + } + return 0; +} + /** Opens a pcap output file using libtrace, named after the current time. * - * @param pstate The state for the pcap output thread + * @param state The LEA thread state for this thread + * @param pstate The pcap-specific state for the pcap output thread * @param act The intercept that requires a new pcap file * * @return -1 if an error occurs, 0 otherwise. */ -static int open_pcap_output_file(pcap_thread_state_t *pstate, - active_pcap_output_t *act) { +static int open_pcap_output_file(lea_thread_state_t *state, + pcap_thread_state_t *pstate, active_pcap_output_t *act) { char uri[4096]; int compressmethod = TRACE_OPTION_COMPRESSTYPE_ZLIB; - int compresslevel = pstate->compresslevel; + int compresslevel = state->pcap_compress_level; struct timeval tv; /* Make sure the user configured a directory for us to put files into */ - if (pstate->dir == NULL) { + if (state->pcap_dir == NULL) { if (!pstate->dirwarned) { logger(LOG_INFO, "OpenLI Mediator: pcap directory is not configured so will not write any pcap files."); @@ -140,7 +224,7 @@ static int open_pcap_output_file(pcap_thread_state_t *pstate, return -1; } - if (pstate->outtemplate == NULL) { + if (state->pcap_outtemplate == NULL) { /* Name the file after the LIID and current timestamp -- this ensures we * will have files that have unique and meaningful names, even if we @@ -148,26 +232,23 @@ static int open_pcap_output_file(pcap_thread_state_t *pstate, */ gettimeofday(&tv, NULL); - if (pstate->compresslevel > 0) { + if (state->pcap_compress_level > 0) { snprintf(uri, 4096, "pcapfile:%s/openli_%s_%lu.pcap.gz", - pstate->dir, act->liid, tv.tv_sec); + state->pcap_dir, act->liid, tv.tv_sec); } else { snprintf(uri, 4096, "pcapfile:%s/openli_%s_%lu.pcap", - pstate->dir, act->liid, tv.tv_sec); + state->pcap_dir, act->liid, tv.tv_sec); } } else { - if (populate_pcap_uri(pstate, uri, 4096, act) == 0) { + if (populate_pcap_uri(state, pstate, uri, 4096, act) == 0) { logger(LOG_INFO, "OpenLI Mediator: unable to create pcap output file name from template '%s'", - pstate->outtemplate); + state->pcap_outtemplate); return -1; } } - /* Libtrace boiler-plate for creating an output file - we use zlib - * compression level 1 here for a good balance between compression ratio - * and inter-operability with other software. - */ + /* Libtrace boiler-plate for creating an output file */ act->out = trace_create_output(uri); if (trace_is_err_output(act->out)) { libtrace_err_t err; @@ -178,7 +259,7 @@ static int open_pcap_output_file(pcap_thread_state_t *pstate, goto pcaptraceerr; } - if (pstate->compresslevel > 0) { + if (state->pcap_compress_level > 0) { if (trace_config_output(act->out, TRACE_OPTION_OUTPUT_COMPRESSTYPE, &compressmethod) == -1) { libtrace_err_t err; @@ -228,21 +309,28 @@ static int open_pcap_output_file(pcap_thread_state_t *pstate, /** Start a new pcap output for a particular LIID * - * @param pstate The state for the pcap output thread + * @param state The LEA thread state for this thread + * @param pstate The pcap-specific state for the pcap output thread * @param liid The LIID to create a pcap output for, as a string. * * @return a pointer to a new pcap output structure, or NULL if an error * occurred. */ -static active_pcap_output_t *create_new_pcap_output(pcap_thread_state_t *pstate, +static active_pcap_output_t *create_new_pcap_output( + lea_thread_state_t *state, pcap_thread_state_t *pstate, char *liid) { active_pcap_output_t *act; + HASH_FIND(hh, pstate->active, liid, strlen(liid), act); + if (act) { + return act; + } + act = (active_pcap_output_t *)malloc(sizeof(active_pcap_output_t)); act->liid = strdup(liid); - if (open_pcap_output_file(pstate, act) == -1) { + if (open_pcap_output_file(state, pstate, act) == -1) { free(act->liid); free(act); return NULL; @@ -251,164 +339,291 @@ static active_pcap_output_t *create_new_pcap_output(pcap_thread_state_t *pstate, return act; } -/** Writes a raw captured IP packet to a pcap trace file. +/** Converts a raw IP packet record into a libtrace packet and writes it + * to the appropriate pcap output file. * - * The IP packet must be prepended with the LIID of the intercept that - * triggered this packet's capture. + * @param nextrec Pointer to the start of the raw IP packet record + * @param bufrem The amount of readable bytes in the buffer where + * the raw IP packet record is stored + * @param pstate The pcap-specific state for this thread * - * @param pstate The state for the pcap output thread - * @param pcapmsg The message containing the captured packet, as - * received from the collector. + * @return the number of bytes to advance the buffer to move past the + * raw IP packet record that we just wrote to disk. */ -static void write_rawpcap_packet(pcap_thread_state_t *pstate, - mediator_pcap_msg_t *pcapmsg) { +static uint32_t write_rawip_to_pcap(uint8_t *nextrec, uint64_t bufrem, + pcap_thread_state_t *pstate) { active_pcap_output_t *pcapout; - uint16_t liidlen; + uint32_t pdulen; unsigned char liidspace[2048]; - uint8_t *rawip; + uint16_t liidlen; - if (pcapmsg->msgbody == NULL) { - return; - } + /* The raw IP packet record begins with a four-byte size field, which is + * the size of the record (not including the size field itself) + */ + pdulen = *(uint32_t *)nextrec; - /* Strip off the LIID that is at the start of the message */ - extract_liid_from_exported_msg(pcapmsg->msgbody, pcapmsg->msglen, - liidspace, 2048, &liidlen); + nextrec += sizeof(uint32_t); + bufrem -= sizeof(uint32_t); - if (liidlen == pcapmsg->msglen) { - return; + if (pdulen == 0) { + return sizeof(uint32_t); } - /* The IP header starts immediately after the LIID */ - rawip = pcapmsg->msgbody + liidlen; + /* Next is the LIID, which is encoded as a 2 byte size field followed + * by the LIID string itself (not null-terminated) + */ + extract_liid_from_exported_msg(nextrec, bufrem, liidspace, 2048, &liidlen); - /* Have we seen this LIID before? -- if not, create a new pcap output */ - HASH_FIND(hh, pstate->active, liidspace, strlen((char *)liidspace), - pcapout); - if (!pcapout) { - pcapout = create_new_pcap_output(pstate, (char *)liidspace); + nextrec += liidlen; + bufrem -= liidlen; + + if (bufrem > 65535) { + logger(LOG_INFO, "OpenLI Mediator: raw IP packet is too large to write as a pcap packet, possibly corrupt"); + return pdulen + sizeof(uint32_t); } + HASH_FIND(hh, pstate->active, liidspace, + strlen((const char *)liidspace), pcapout); - if (pcapout) { + /* Hopefully, we already know about this LIID and have a pcap output + * handle all set up and ready for it. If not, let's just skip past it. + */ + if (pcapout && pcapout->out) { if (!pstate->packet) { pstate->packet = trace_create_packet(); } - /* To use the libtrace API to write this packet and construct an - * appropriate pcap header for it, we'll need to use - * trace_construct_packet() to turn our buffer containing the IP - * packet into a libtrace packet. + /* Thankfully, libtrace will let us "construct" a packet object + * from a buffer containing the raw packet contents. */ trace_construct_packet(pstate->packet, TRACE_TYPE_NONE, - (const void *)rawip, (uint16_t)pcapmsg->msglen - liidlen); + (const void *)nextrec, + (uint16_t)(pdulen - liidlen - sizeof(uint32_t))); - /* write resulting packet to libtrace output */ + /* Now we can have libtrace write the packet using the pcap format */ if (trace_write_packet(pcapout->out, pstate->packet) < 0) { libtrace_err_t err = trace_get_err_output(pcapout->out); - logger(LOG_INFO, - "OpenLI mediator: error while writing packet to pcap trace file: %s", - err.problem); + logger(LOG_INFO, "OpenLI Mediator: failed to write raw IP to pcap for LIID %s: %s", liidspace, err.problem); trace_destroy_output(pcapout->out); - HASH_DELETE(hh, pstate->active, pcapout); - free(pcapout->liid); - free(pcapout); + pcapout->out = NULL; + } else { + pcapout->pktwritten += 1; } - pcapout->pktwritten = 1; } - free(pcapmsg->msgbody); + return pdulen + sizeof(uint32_t); } -/** Writes the IP packet contents of an encoded ETSI record to a pcap trace - * file. + +/** Converts a ETSI CC record into a libtrace packet and writes it + * to the appropriate pcap output file. * - * The IP packet must be prepended with the LIID of the intercept that - * triggered this packet's capture. + * @param nextrec Pointer to the start of the ETSI CC record + * @param bufrem The amount of readable bytes in the buffer where + * the ETSI CC record is stored + * @param pstate The pcap-specific state for this thread * - * @param pstate The state for the pcap output thread - * @param pcapmsg The message containing the captured packet, as - * received from the collector. + * @return the number of bytes to advance the buffer to move past the + * ETSI CC record that we just wrote to disk. Returns 0 if there + * is a problem with the ETSI CC record that prevents it from + * being written to disk. */ -static void write_pcap_packet(pcap_thread_state_t *pstate, - mediator_pcap_msg_t *pcapmsg) { +static uint32_t write_etsicc_to_pcap(uint8_t *nextrec, uint64_t bufrem, + pcap_thread_state_t *pstate) { - uint32_t pdulen; - char liidspace[1024]; - char ccname[128]; active_pcap_output_t *pcapout; + uint32_t pdulen; + unsigned char liidspace[2048]; - if (pcapmsg->msgbody == NULL) { - return; - } - - /* First, we're going to need to decode the ETSI encoding */ if (pstate->decoder == NULL) { pstate->decoder = wandder_create_etsili_decoder(); } - wandder_attach_etsili_buffer(pstate->decoder, pcapmsg->msgbody, - pcapmsg->msglen, false); + /* Using the ETSI decoder, grab the record length and the LIID from + * within the record itself + */ + wandder_attach_etsili_buffer(pstate->decoder, nextrec, bufrem, false); pdulen = wandder_etsili_get_pdu_length(pstate->decoder); - if (pdulen == 0 || pcapmsg->msglen < pdulen) { - logger(LOG_INFO, - "OpenLI Mediator: pcap thread received incomplete ETSI CC?"); - return; - } - /* Use the decoder to figure out the LIID for this record */ - if (wandder_etsili_get_liid(pstate->decoder, liidspace, 1024) == NULL) { - logger(LOG_INFO, - "OpenLI Mediator: unable to find LIID for ETSI CC in pcap thread"); - return; + if (pdulen == 0 || pdulen > bufrem) { + logger(LOG_INFO, "OpenLI Mediator: pcap thread received an incomplete ETSI CC"); + return 0; } - /* Have we seen this LIID before? -- if not, create a new pcap output */ - HASH_FIND(hh, pstate->active, liidspace, strlen(liidspace), pcapout); - if (!pcapout) { - pcapout = create_new_pcap_output(pstate, liidspace); + if (wandder_etsili_get_liid(pstate->decoder, (char *)liidspace, + 2048) == NULL) { + logger(LOG_INFO, "OpenLI Mediator: unable to find LIID in ETSI CC received by pcap thread"); + return 0; } + HASH_FIND(hh, pstate->active, liidspace, strlen((const char *)liidspace), + pcapout); + /* Hopefully, we already know about this LIID and have a pcap output + * handle all set up and ready for it. If not, let's just skip past it. + */ if (pcapout && pcapout->out) { uint8_t *rawip; uint32_t cclen; + char ccname[128]; if (!pstate->packet) { pstate->packet = trace_create_packet(); } - /* Turn the ETSI CC into a libtrace pcap packet */ + /* Convert CC to pcap and write to trace file using libtrace. + * We don't need the ETSI headers, so we can jump straight to the + * the CC contents using libwandder + */ rawip = wandder_etsili_get_cc_contents(pstate->decoder, &cclen, ccname, 128); + + if (rawip == NULL) { + logger(LOG_INFO, "OpenLI Mediator: unable to find CC contents from ETSI CC seen by pcap thread for LIID %s", liidspace); + goto exitpcapwrite; + } if (cclen > 65535) { -logger(LOG_INFO, - "OpenLI Mediator: ETSI CC record is too large to write as a pcap packet -- possibly corrupt."); + logger(LOG_INFO, "OpenLI Mediator: ETSI CC record is too large to write as a pcap packet, possibly corrupt"); + goto exitpcapwrite; + } + + trace_construct_packet(pstate->packet, TRACE_TYPE_NONE, + (const void *)rawip, (uint16_t)cclen); + + if (trace_write_packet(pcapout->out, pstate->packet) < 0) { + libtrace_err_t err = trace_get_err_output(pcapout->out); + logger(LOG_INFO, "OpenLI Mediator: failed to write ETSI CC to pcap for LIID %s: %s", liidspace, err.problem); + trace_destroy_output(pcapout->out); + pcapout->out = NULL; } else { - /* To use the libtrace API to write this packet and construct an - * appropriate pcap header for it, we'll need to use - * trace_construct_packet() to turn our buffer containing the IP - * packet into a libtrace packet. - */ - trace_construct_packet(pstate->packet, TRACE_TYPE_NONE, - (const void *)rawip, (uint16_t)cclen); + pcapout->pktwritten += 1; + } + } - /* write resulting packet to libtrace output */ - if (trace_write_packet(pcapout->out, pstate->packet) < 0) { - libtrace_err_t err = trace_get_err_output(pcapout->out); - logger(LOG_INFO, - "OpenLI Mediator: error while writing packet to pcap trace file: %s", - err.problem); - trace_destroy_output(pcapout->out); - pcapout->out = NULL; - HASH_DELETE(hh, pstate->active, pcapout); - free(pcapout->liid); - free(pcapout); +exitpcapwrite: + return pdulen; +} + +/** Reads intercept records from the export buffer, converts them into the + * pcap format and writes them into their corresponding pcap output file(s). + * + * @param ho The handover which owns the export buffer + * @param state The LEA send thread state for this pcap thread + * @param pstate The pcap-specific thread state for this thread + * + * @return -1 if an error occurs while writing to disk, -2 if an error + * occurs while acknowledging the written data in RMQ, 0 if + * the writing was successful. + */ +static int write_pcap_from_buffered_rmq(handover_t *ho, + lea_thread_state_t *state, pcap_thread_state_t *pstate) { + uint64_t bufrem; + uint8_t *nextrec = NULL; + uint32_t advance = 0; + static int tally = 0; + + bufrem = get_buffered_amount(&(ho->ho_state->buf)); + while ((nextrec = get_buffered_head(&(ho->ho_state->buf), &bufrem))) { + /* TODO consider limiting the number of records written, so we + * don't get stuck in here for a long time? + */ + tally ++; + if (ho->handover_type == HANDOVER_HI3) { + if ((advance = write_etsicc_to_pcap(nextrec, bufrem, pstate)) + == 0) { + return -1; } - pcapout->pktwritten = 1; + } else if (ho->handover_type == HANDOVER_HI2) { + /* TODO */ + assert(0); + } else if (ho->handover_type == HANDOVER_RAWIP) { + if ((advance = write_rawip_to_pcap(nextrec, bufrem, pstate)) + == 0) { + return -1; + } + } else { + logger(LOG_INFO, "OpenLI Mediator: handover is corrupted in pcap thread"); + return -1; + } + + advance_export_buffer_head(&(ho->ho_state->buf), advance); + } + + if (!ho->ho_state->valid_rmq_ack) { + return 0; + } + + /* acknowledge RMQ messages */ + if (ho->handover_type == HANDOVER_HI3) { + if (ack_mediator_cc_messages(ho->rmq_consumer, + ho->ho_state->next_rmq_ack) != 0) { + logger(LOG_INFO, "OpenLI Mediator: error while acknowledging sent data from internal CC queue by pcapdisk thread"); + return -2; + } + } else if (ho->handover_type == HANDOVER_HI2) { + if (ack_mediator_iri_messages(ho->rmq_consumer, + ho->ho_state->next_rmq_ack) != 0) { + logger(LOG_INFO, "OpenLI Mediator: error while acknowledging sent data from internal IRI queue by pcapdisk thread"); + return -2; + } + } else if (ho->handover_type == HANDOVER_RAWIP) { + if (ack_mediator_rawip_messages(ho->rmq_consumer, + ho->ho_state->next_rmq_ack) != 0) { + logger(LOG_INFO, "OpenLI Mediator: error while acknowledging sent data from internal rawip queue by pcapdisk thread"); + return -2; } } - free(pcapmsg->msgbody); + ho->ho_state->valid_rmq_ack = 0; + + return 0; +} + +static int consume_pcap_packets(handover_t *ho, lea_thread_state_t *state, + pcap_thread_state_t *pstate) { + + int r; + + if ((r = write_pcap_from_buffered_rmq(ho, state, pstate)) == 1) { + return 0; + } else if (r == -2) { + reset_handover_rmq(ho); + return 0; + } else if (r == -1) { + /* pcap writing error */ + return -1; + } + + /* if we get here, the buffer is empty so read more messages from RMQ */ + if (ho->handover_type == HANDOVER_HI3) { + r = consume_mediator_cc_messages(ho->rmq_consumer, + &(ho->ho_state->buf), 32, &(ho->ho_state->next_rmq_ack)); + } else if (ho->handover_type == HANDOVER_RAWIP) { + r = consume_mediator_rawip_messages(ho->rmq_consumer, + &(ho->ho_state->buf), 32, &(ho->ho_state->next_rmq_ack)); + } else if (ho->handover_type == HANDOVER_HI2) { + r = consume_mediator_iri_messages(ho->rmq_consumer, + &(ho->ho_state->buf), 32, &(ho->ho_state->next_rmq_ack)); + } else { + reset_handover_rmq(ho); + return 0; + } + + if (r < 0) { + reset_handover_rmq(ho); + return 1; + } else if (r > 0) { + ho->ho_state->valid_rmq_ack = 1; + } + + r = write_pcap_from_buffered_rmq(ho, state, pstate); + if (r == -2) { + reset_handover_rmq(ho); + return 0; + } else if (r == -1) { + /* pcap writing error */ + } + return r; + } /** Flush any outstanding packets for each active pcap output. @@ -419,7 +634,7 @@ logger(LOG_INFO, * flushing of the pcap outputs to ensure that the file on disk is more * representative of what has been intercepted thus far. * - * @param pstate The state for the pcap output thread + * @param pstate The pcap-specific state for the thread */ static void pcap_flush_traces(pcap_thread_state_t *pstate) { active_pcap_output_t *pcapout, *tmp; @@ -450,9 +665,11 @@ static void pcap_flush_traces(pcap_thread_state_t *pstate) { * (i.e. with no half-written packets and proper footers) available for the * user to hand over to LEAs, if they accept pcap output. * - * @param pstate The state for the pcap output thread + * @param state The LEA thread state for this thread + * @param pstate The pcap-specific state for the pcap output thread */ -static void pcap_rotate_traces(pcap_thread_state_t *pstate) { +static void pcap_rotate_traces(lea_thread_state_t *state, + pcap_thread_state_t *pstate) { active_pcap_output_t *pcapout, *tmp; HASH_ITER(hh, pstate->active, pcapout, tmp) { @@ -463,7 +680,7 @@ static void pcap_rotate_traces(pcap_thread_state_t *pstate) { pcapout->out = NULL; /* Open a new file, which will be named using the current time */ - if (open_pcap_output_file(pstate, pcapout) == -1) { + if (open_pcap_output_file(state, pstate, pcapout) == -1) { logger(LOG_INFO, "OpenLI Mediator: error while rotating pcap trace file"); @@ -478,17 +695,22 @@ static void pcap_rotate_traces(pcap_thread_state_t *pstate) { } } -static void pcap_disable_liid(pcap_thread_state_t *pstate, char *liid, - uint16_t liidlen) { +/** Disables pcap output for a particular LIID, closing any existing open + * file handle. + * + * @param pstate The pcap-specific state for the thread + * @param liid The LIID to disable pcap output for + */ +static void pcap_disable_liid(pcap_thread_state_t *pstate, char *liid) { active_pcap_output_t *pcapout; - logger(LOG_INFO, "OpenLI mediator: disabling pcap output for '%s'", - liid); HASH_FIND(hh, pstate->active, liid, strlen(liid), pcapout); if (!pcapout) { return; } + logger(LOG_INFO, "OpenLI Mediator: disabling pcap output for LIID '%s'", + liid); if (pcapout->out) { trace_destroy_output(pcapout->out); @@ -497,147 +719,407 @@ static void pcap_disable_liid(pcap_thread_state_t *pstate, char *liid, HASH_DELETE(hh, pstate->active, pcapout); free(pcapout->liid); free(pcapout); - free(liid); } -/** Main loop for the pcap output thread. +/** Flush the pcap output file handle for all active pcap intercepts. If + * the files are due to be rotated, then do the rotation instead. + * + * @param state The LEA send thread state for this thread + * @param pstate The pcap-specific state for this thread + */ +static void flush_pcap_outputs(lea_thread_state_t *state, + pcap_thread_state_t *pstate) { + + struct timeval tv; + + gettimeofday(&tv, NULL); + if (tv.tv_sec % (60 * state->pcap_rotate_frequency) < 60) { + /* Rotation is due */ + pcap_rotate_traces(state, pstate); + return; + } + pcap_flush_traces(pstate); + +} + +/** Adds a new LIID to our set of pcap outputs and opens a libtrace file + * handle for writing output for that intercept. + * + * @param state The LEA send thread state for this thread + * @param pstate The pcap-specific state for this thread + * @param added The LIID that is to be added + */ +static void add_new_pcapdisk_liid(lea_thread_state_t *state, + pcap_thread_state_t *pstate, added_liid_t *added) { + + /* Check if this LIID is actually being added to pcapdisk, or if + * we are just getting an announcement for a different agency. + */ + if (strcmp(added->agencyid, state->agencyid) != 0) { + /* This LIID has switched to another agency, so close any + * existing pcap output and disable the pcap-specific RMQs */ + pcap_disable_liid(pstate, added->liid); + if (purge_lea_liid_mapping(state, added->liid) > 0) { + if (deregister_mediator_rawip_RMQ_consumer( + pstate->rawip_handover->rmq_consumer, + added->liid) < 0) { + logger(LOG_INFO, + "OpenLI Mediator: WARNING failed to deregister rawip RMQ for LIID %s -> %s", + added->liid, state->agencyid); + } + } + } else { + /* This is an announcement for the pcap thread! */ + int r = insert_lea_liid_mapping(state, added->liid); + if (r > 0) { + /* Only register with RMQ if this LIID is "new" */ + if (register_mediator_rawip_RMQ_consumer( + pstate->rawip_handover->rmq_consumer, added->liid) < 0) { + logger(LOG_INFO, + "OpenLI Mediator: WARNING failed to register rawip RMQ for LIID %s in pcap thread", + added->liid); + } + } + if (create_new_pcap_output(state, pstate, added->liid) + == NULL) { + logger(LOG_INFO, "OpenLI Mediator: failed to create new pcap output entity for LIID %s", added->liid); + } + } + + free(added->liid); + free(added->agencyid); + free(added); +} + +/** Parse and action a message received from the main thread. + * + * @param state The LEA send thread state for this thread + * @param pstate The pcap-specific state for this thread + * + * @return 1 if the pcap thread needs to be halted immediately, 0 otherwise. + */ +int handle_pcap_thread_messages(lea_thread_state_t *state, + pcap_thread_state_t *pstate) { + lea_thread_msg_t msg; + + while (libtrace_message_queue_try_get(&(state->in_main), (void *)&msg) + != LIBTRACE_MQ_FAILED) { + + if (msg.type == MED_LEA_MESSAGE_HALT) { + /* Main thread wants us to shutdown now */ + return 1; + } + + if (msg.type == MED_LEA_MESSAGE_SHUTDOWN_TIMER) { + /* Ignore this -- no need to shutdown the pcap thread */ + } + + if (msg.type == MED_LEA_MESSAGE_RECONNECT) { + /* Ignore */ + } + + if (msg.type == MED_LEA_MESSAGE_DISCONNECT) { + /* Ignore */ + } + + if (msg.type == MED_LEA_MESSAGE_RELOAD_CONFIG) { + /* Config has potentially changed, so re-read it */ + if (read_parent_config(state) == 1) { + reset_handover_rmq(state->agency.hi3); + reset_handover_rmq(state->agency.hi2); + reset_handover_rmq(pstate->rawip_handover); + } + } + + if (msg.type == MED_LEA_MESSAGE_UPDATE_AGENCY) { + /* Set a timer which upon expiry will declare any + * remaining unconfirmed LIIDs to be withdrawn. + */ + halt_mediator_timer(state->cleanse_liids); + if (start_mediator_timer(state->cleanse_liids, 30) < 0) { + logger(LOG_INFO, "OpenLI Mediator: failed to add timer to remove unconfirmed LIID mappings in pcap output thread"); + } + + } + + if (msg.type == MED_LEA_MESSAGE_REMOVE_LIID) { + /* An LIID has been withdrawn */ + char *liid = (char *)(msg.data); + + if (state->agency.hi2->rmq_consumer != NULL) { + deregister_mediator_iri_RMQ_consumer( + state->agency.hi2->rmq_consumer, liid); + } + if (state->agency.hi3->rmq_consumer != NULL) { + deregister_mediator_cc_RMQ_consumer( + state->agency.hi3->rmq_consumer, liid); + } + if (pstate->rawip_handover->rmq_consumer != NULL) { + deregister_mediator_rawip_RMQ_consumer( + pstate->rawip_handover->rmq_consumer, liid); + } + + withdraw_liid_agency_mapping(&(state->active_liids), liid); + free(liid); + } + + if (msg.type == MED_LEA_MESSAGE_ADD_LIID) { + /* An LIID has been assigned to an agency (or pcap) thread */ + added_liid_t *added = (added_liid_t *)msg.data; + add_new_pcapdisk_liid(state, pstate, added); + } + + } + return 0; +} + +/** Acts upon a file descriptor or timer event that has been triggered + * by this thread's epoll handler. * - * This thread handles any intercepted packets that the user has requested - * to be written to pcap files on disk, instead of mediated over the - * network using the ETSI LI handovers. + * @param state The LEA send thread state for this thread + * @param pstate The pcap-specific state for this thread + * @param ev The epoll event that had triggered * - * @param params The message queue on which the main thread will - * be sending packets and instructions to this - * thread. + * @return -1 if an error occurs, 1 if the caller needs to break out of its + * current loop, 0 otherwise. */ -void *start_pcap_thread(void *params) { +static int pcap_thread_epoll_event(lea_thread_state_t *state, + pcap_thread_state_t *pstate, struct epoll_event *ev) { + + med_epoll_ev_t *mev = (med_epoll_ev_t *)(ev->data.ptr); + int ret = 0; + + switch (mev->fdtype) { + case MED_EPOLL_SIGCHECK_TIMER: + if (ev->events & EPOLLIN) { + /* Time to check the message queue again */ + ret = 1; + } else { + logger(LOG_INFO, "OpenLI Mediator: main epoll timer has failed in pcapdisk thread"); + ret = 0; + } + break; + case MED_EPOLL_RMQCHECK_TIMER: + /* This should never fire in this thread, but just in case... */ + ret = agency_thread_action_rmqcheck_timer(state, mev); + break; + case MED_EPOLL_CEASE_LIID_TIMER: + /* Clean up any unconfirmed LIIDs */ + ret = agency_thread_action_cease_liid_timer(state); + foreach_liid_agency_mapping(&(state->active_liids), pstate, + deregister_unconfirmed_pcap_liids); + break; + case MED_EPOLL_PCAP_TIMER: + /* halt the timer + * for each active pcap output: + * check if we need to rotate the file + * otherwise, flush pending output to the file + * restart the timer + */ + halt_mediator_timer(mev); + flush_pcap_outputs(state, pstate); + if (start_mediator_timer(mev, 60) < 0) { + logger(LOG_INFO, "OpenLI Mediator: unable to reset pcap flush timer in pcap output thread: %s", strerror(errno)); + ret = -1; + } + break; + default: + logger(LOG_INFO, "OpenLI Mediator: invalid epoll event type %d seen in pcapdisk thread", mev->fdtype); + ret = -1; + } + + return ret; +} + +/** The "main" method for a pcap output thread. + * + * @param params The LEA send thread state that has been created + * for this thread. + * + * @return NULL when the thread exits (via pthread_join()) + */ +static void *run_pcap_thread(void *params) { + lea_thread_state_t *state = (lea_thread_state_t *)params; + med_epoll_ev_t *flushtimer = NULL; + struct epoll_event evs[64]; + int i, nfds, timerexpired = 0; + int is_halted = 0; pcap_thread_state_t pstate; - mediator_pcap_msg_t pcapmsg; + uint32_t firstflush; + struct timeval tv; + + // defined in lea_send_thread.c + read_parent_config(state); + /* Initialise pcap-specific state for this thread */ pstate.active = NULL; - pstate.dir = NULL; - pstate.compresslevel = 10; - pstate.outtemplate = NULL; pstate.dirwarned = 0; pstate.inqueue = (libtrace_message_queue_t *)params; pstate.decoder = NULL; pstate.packet = NULL; + pstate.rawip_handover = create_new_handover(state->epoll_fd, NULL, NULL, + HANDOVER_RAWIP, 0, 0); - while (1) { - if (libtrace_message_queue_try_get(pstate.inqueue, - (void *)&pcapmsg) == LIBTRACE_MQ_FAILED) { - usleep(500); - continue; - } + register_handover_RMQ_all(pstate.rawip_handover, NULL, "pcapdisk", + state->internalrmqpass); + logger(LOG_INFO, "OpenLI Mediator: starting pcap output thread"); + + if (create_agency_thread_timers(state) < 0) { + goto threadexit; + } - if (pcapmsg.msgtype == PCAP_MESSAGE_HALT) { - /* Time to halt this thread */ + /* Don't need the RMQ check timer, since we're going to poll the + * RMQ queues multiple times per second. + */ + halt_mediator_timer(state->rmqhb); + + /* Set up the flush / rotation timer for our output files */ + gettimeofday(&tv, NULL); + firstflush = (((tv.tv_sec / 60) * 60) + 60) - tv.tv_sec; + + flushtimer = create_mediator_timer(state->epoll_fd, NULL, + MED_EPOLL_PCAP_TIMER, firstflush); + + if (flushtimer == NULL) { + logger(LOG_INFO, + "OpenLI Mediator: failed to create pcap rotation timer"); + } + + while (!is_halted) { + /* Check for messages from the main thread */ + is_halted = handle_pcap_thread_messages(state, &pstate); + + if (is_halted) { break; } - if (pcapmsg.msgtype == PCAP_MESSAGE_FLUSH) { - /* Time to do our regular flush of the output files */ - pcap_flush_traces(&pstate); - continue; + /* register RMQ consumers if required */ + if (!state->agency.hi2->rmq_registered) { + /* TODO uncomment + register_handover_RMQ_all(state->agency.hi2, + &(state->active_liids), state->agencyid); + */ } - if (pcapmsg.msgtype == PCAP_MESSAGE_ROTATE) { - /* Time to rotate the output files */ - pcap_rotate_traces(&pstate); - continue; + if (!state->agency.hi3->rmq_registered) { + register_handover_RMQ_all(state->agency.hi3, + &(state->active_liids), state->agencyid, + state->internalrmqpass); } - if (pcapmsg.msgtype == PCAP_MESSAGE_DISABLE_LIID) { - pcap_disable_liid(&pstate, (char *)pcapmsg.msgbody, pcapmsg.msglen); - continue; + /* epoll */ + if (start_mediator_ms_timer(state->timerev, 50) < 0) { + logger(LOG_INFO,"OpenLI Mediator: failed to add timer to epoll in agency thread for %s", state->agencyid); + break; } + timerexpired = 0; + while (!timerexpired && !is_halted) { + nfds = epoll_wait(state->epoll_fd, evs, 64, -1); - if (pcapmsg.msgtype == PCAP_MESSAGE_CHANGE_DIR) { - /* The main thread wants us to write pcap files to this directory */ - if (pstate.dir) { - /* If we already had a configured directory, we'll need to - * close all of our existing files and switch over to the - * new directory. - */ - free(pstate.dir); - if (strcmp(pstate.dir, (char *)pcapmsg.msgbody) != 0) { - halt_pcap_outputs(&pstate); + if (nfds < 0) { + if (errno == EINTR) { + continue; } + logger(LOG_INFO, "OpenLI Mediator: error while waiting for epoll events in pcap thread: %s", strerror(errno)); + is_halted = 1; + continue; } - pstate.dir = (char *)pcapmsg.msgbody; - if (pstate.dir) { - logger(LOG_INFO, - "OpenLI Mediator: any pcap trace files will be written to %s", - pstate.dir); - } else { - logger(LOG_INFO, - "OpenLI Mediator: pcap trace file directory has been set to NULL"); - } - continue; - } - - if (pcapmsg.msgtype == PCAP_MESSAGE_CHANGE_TEMPLATE) { - /* The main thread wants us to write pcap files using a new - * naming scheme */ - if (pstate.outtemplate) { - /* If we already had a configured template, we'll need to - * close all of our existing files and switch over to the - * new template. - */ - free(pstate.outtemplate); - if (strcmp(pstate.outtemplate, (char *)pcapmsg.msgbody) != 0) { - halt_pcap_outputs(&pstate); + + for (i = 0; i < nfds; i++) { + timerexpired = pcap_thread_epoll_event(state, &pstate, + &(evs[i])); + if (timerexpired == -1) { + is_halted = 1; + break; + } + if (timerexpired) { + break; } } - pstate.outtemplate = (char *)pcapmsg.msgbody; - if (pstate.outtemplate) { - logger(LOG_INFO, - "OpenLI Mediator: pcap trace files are now named according to the template '%s'", - pstate.outtemplate); - } else { - logger(LOG_INFO, - "OpenLI Mediator: pcap trace files are named using the default template"); - } - continue; } - if (pcapmsg.msgtype == PCAP_MESSAGE_CHANGE_COMPRESS) { - uint8_t *val = (uint8_t *)pcapmsg.msgbody; - - if (*val != pstate.compresslevel) { - logger(LOG_INFO, "OpenLI Mediator: changing pcap trace compression level to %u (from next file onwards)", *val); - } + /* Consume available packets and write them to their corresponding + * pcap files */ - pstate.compresslevel = *val; - continue; - } + /* TODO error handling? */ + consume_pcap_packets(state->agency.hi3, state, &pstate); + consume_pcap_packets(pstate.rawip_handover, state, &pstate); - if (pcapmsg.msgtype == PCAP_MESSAGE_RAWIP) { - /* We've received a "raw" IP packet to be written to disk */ - write_rawpcap_packet(&pstate, &pcapmsg); - continue; - } - - /* If we get here, we've received an ETSI record that needs to be - * reverted back to an IP packet and written to disk. - */ - write_pcap_packet(&pstate, &pcapmsg); + halt_mediator_timer(state->timerev); } - /* Clean up any remaining thread state before exiting */ - if (pstate.dir) { - free(pstate.dir); - halt_pcap_outputs(&pstate); - } +threadexit: + halt_pcap_outputs(&pstate); if (pstate.decoder) { wandder_free_etsili_decoder(pstate.decoder); } if (pstate.packet) { trace_destroy_packet(pstate.packet); } - logger(LOG_INFO, "OpenLI Mediator: exiting pcap thread."); + if (pstate.rawip_handover) { + free_handover(pstate.rawip_handover); + } + + if (flushtimer) { + destroy_mediator_timer(flushtimer); + } + + logger(LOG_INFO, "OpenLI Mediator: ending pcap output thread"); + destroy_agency_thread_state(state); pthread_exit(NULL); } +/** Creates a "dummy" agency for the pcap output thread so that the thread + * can make use of existing methods written for the LEA send threads which + * require valid handover instances. + * + * @param agency The agency to initialise with the fake handovers. + * @param epollfd The epoll file descriptor for the pcap thread. + */ +static inline void init_pcapdisk_agency(mediator_agency_t *agency, + int epollfd) { + agency->awaitingconfirm = 0; + agency->agencyid = strdup("pcapdisk"); + agency->disabled = 0; + agency->disabled_msg = 0; + agency->hi2 = create_new_handover(epollfd, NULL, NULL, HANDOVER_HI2, 0, 0); + agency->hi3 = create_new_handover(epollfd, NULL, NULL, HANDOVER_HI3, 0, 0); +} + +/** Creates and starts the pcap output thread for an OpenLI mediator. + * + * The pcap thread is treated as another LEA send thread by the mediator, + * so it will be added to the set of LEA send threads maintained by the + * main mediator thread. + * + * @param medleas The list of LEA send threads for the mediator + * + * @return 1 always. + */ +int mediator_start_pcap_thread(mediator_lea_t *medleas) { + lea_thread_state_t *pcap = NULL; + mediator_lea_config_t *config = &(medleas->config); + + pcap = (lea_thread_state_t *)calloc(1, sizeof(lea_thread_state_t)); + pcap->parentconfig = config; + pcap->epoll_fd = epoll_create1(0); + + /* probably unnecessary, but doesn't hurt */ + pcap->handover_id = medleas->next_handover_id; + medleas->next_handover_id += 2; + + libtrace_message_queue_init(&(pcap->in_main), sizeof(lea_thread_msg_t)); + pcap->agencyid = strdup("pcapdisk"); + HASH_ADD_KEYPTR(hh, medleas->threads, pcap->agencyid, + strlen(pcap->agencyid), pcap); + + /* Use a "dummy" agency so we can re-use our code for managing RMQ + * consumers. + */ + init_pcapdisk_agency(&(pcap->agency), pcap->epoll_fd); + pthread_create(&(pcap->tid), NULL, run_pcap_thread, pcap); + return 1; +} + // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/mediator/pcapthread.h b/src/mediator/pcapthread.h index 93c8cf3a..dd3b0a58 100644 --- a/src/mediator/pcapthread.h +++ b/src/mediator/pcapthread.h @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018-2020 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018-2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -32,21 +32,24 @@ #include #include -/** State for a particular pcap output file */ +#include "lea_send_thread.h" +#include "export_buffer.h" + +/** State for a particular intercept that is being written to pcap files */ typedef struct active_pcap_output { - /** The LIID for the intercept that is being written to this file */ + /** The LIID for the intercept that is being written as pcap */ char *liid; /** The libtrace output file handle for the output file */ libtrace_out_t *out; - /** The number of packets written to this file so far */ + /** The number of packets written to the open file so far */ int pktwritten; UT_hash_handle hh; } active_pcap_output_t; -/** State for the pcap thread */ +/** Pcap-specific state for the pcap thread */ typedef struct pcap_thread_state { /** The queue which this thread will receive messages from the mediator */ @@ -58,15 +61,6 @@ typedef struct pcap_thread_state { /** A map of open pcap outputs, one per LIID */ active_pcap_output_t *active; - /** The directory where pcap file are to be written into */ - char *dir; - - /** The template that is used to name the pcap files */ - char *outtemplate; - - /** The compression level to use when writing pcap files */ - uint8_t compresslevel; - /** A flag that indicates whether we have logged an error due to there * being no valid directory configured to write pcaps into */ @@ -77,62 +71,24 @@ typedef struct pcap_thread_state { */ wandder_etsispec_t *decoder; -} pcap_thread_state_t; - -/** Simple wrapper structure for a message sent to the pcap thread */ -typedef struct mediator_pcap_message { - - /** The message type (see enum below for possible values) */ - uint8_t msgtype; - - /** Pointer to the message body (e.g. the packet to be written) */ - uint8_t *msgbody; - - /** Length of the msgbody, in bytes */ - uint16_t msglen; -} mediator_pcap_msg_t; - -/** Types of messages that can be sent to a pcap thread */ -enum { - /** Changes the directory where pcap files are written into */ - PCAP_MESSAGE_CHANGE_DIR, - - /** Tells the pcap thread to exit */ - PCAP_MESSAGE_HALT, - - /** Message contains an encoded ETSI record to be written as pcap */ - PCAP_MESSAGE_PACKET, - - /** Tells the pcap thread to flush any buffered output to disk */ - PCAP_MESSAGE_FLUSH, - - /** Triggers a rotation of all active pcap files */ - PCAP_MESSAGE_ROTATE, - - /** Message contains a raw IP packet to be written as pcap */ - PCAP_MESSAGE_RAWIP, - - /** Changes the template used to name pcap files */ - PCAP_MESSAGE_CHANGE_TEMPLATE, - - /** Changes the compression level used when writing pcap files */ - PCAP_MESSAGE_CHANGE_COMPRESS, - - /** Removes an LIID from the set of active pcap outputs */ - PCAP_MESSAGE_DISABLE_LIID, -}; + /** A dedicated handover instance used for receiving raw IP packets over + * RabbitMQ + */ + handover_t *rawip_handover; +} pcap_thread_state_t; -/** Starts the pcap file writing thread, which will listen on a queue for - * messages containing packets that will be written to pcap output files - * (as opposed to being emitted via an ETSI handover). +/** Creates and starts the pcap output thread for an OpenLI mediator. + * + * The pcap thread is treated as another LEA send thread by the mediator, + * so it will be added to the set of LEA send threads maintained by the + * main mediator thread. * - * @param params A pointer to the libtrace message queue that the - * packets for pcap export will be sent to by the main - * thread. + * @param medleas The list of LEA send threads for the mediator + * + * @return 1 always. */ -void *start_pcap_thread(void *params); - +int mediator_start_pcap_thread(mediator_lea_t *medleas); #endif // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/netcomms.c b/src/netcomms.c index 8e64858c..44c32983 100644 --- a/src/netcomms.c +++ b/src/netcomms.c @@ -356,16 +356,17 @@ int push_lea_withdrawal_onto_net_buffer(net_buffer_t *nb, liagency_t *lea) { #define VENDMIRROR_IPINTERCEPT_MODIFY_BODY_LEN(ipint) \ (ipint->common.liid_len + ipint->common.authcc_len + \ ipint->username_len + sizeof(ipint->accesstype) + \ - sizeof(ipint->options) + \ + sizeof(ipint->options) + sizeof(ipint->common.tomediate) + \ sizeof(ipint->common.tostart_time) + sizeof(ipint->common.toend_time) \ - + sizeof(ipint->vendmirrorid) + (8 * 4)) + + sizeof(ipint->vendmirrorid) + (9 * 4)) #define IPINTERCEPT_MODIFY_BODY_LEN(ipint) \ (ipint->common.liid_len + ipint->common.authcc_len + \ ipint->common.delivcc_len + \ ipint->username_len + sizeof(ipint->accesstype) + \ + sizeof(ipint->common.tomediate) + \ sizeof(ipint->common.tostart_time) + sizeof(ipint->common.toend_time) \ - + sizeof(ipint->options) + (8 * 4)) + + sizeof(ipint->options) + (9 * 4)) static int _push_ipintercept_modify(net_buffer_t *nb, ipintercept_t *ipint) { @@ -448,6 +449,12 @@ static int _push_ipintercept_modify(net_buffer_t *nb, ipintercept_t *ipint) { goto pushmodfail; } + if (push_tlv(nb, OPENLI_PROTO_FIELD_TOMEDIATE, + (uint8_t *)&(ipint->common.tomediate), + sizeof(ipint->common.tomediate)) == -1) { + goto pushmodfail; + } + return (int)totallen; @@ -459,11 +466,86 @@ static int _push_ipintercept_modify(net_buffer_t *nb, ipintercept_t *ipint) { } +#define EMAILINTERCEPT_MODIFY_BODY_LEN(em) \ + (em->common.liid_len + em->common.authcc_len + \ + em->common.delivcc_len + sizeof(em->common.toend_time) + \ + sizeof(em->common.tomediate) + \ + sizeof(em->common.tostart_time) + (6 * 4)) + +static int _push_emailintercept_modify(net_buffer_t *nb, emailintercept_t *em) { + ii_header_t hdr; + uint16_t totallen; + int ret; + + /* Pre-compute our body length so we can write it in the header */ + totallen = EMAILINTERCEPT_MODIFY_BODY_LEN(em); + if (totallen > 65535) { + logger(LOG_INFO, + "OpenLI: Email intercept modifcation is too long to fit in a single message (%d).", + totallen); + return -1; + } + + + /* Push on header */ + populate_header(&hdr, OPENLI_PROTO_MODIFY_EMAILINTERCEPT, totallen, 0); + if ((ret = push_generic_onto_net_buffer(nb, (uint8_t *)(&hdr), + sizeof(ii_header_t))) == -1) { + goto pushmodfail; + } + + /* Push on each intercept field */ + + if (push_tlv(nb, OPENLI_PROTO_FIELD_LIID, (uint8_t *)em->common.liid, + strlen(em->common.liid)) == -1) { + goto pushmodfail; + } + + if (push_tlv(nb, OPENLI_PROTO_FIELD_AUTHCC, (uint8_t *)em->common.authcc, + strlen(em->common.authcc)) == -1) { + goto pushmodfail; + } + + if (push_tlv(nb, OPENLI_PROTO_FIELD_DELIVCC, (uint8_t *)em->common.delivcc, + strlen(em->common.delivcc)) == -1) { + goto pushmodfail; + } + + if (push_tlv(nb, OPENLI_PROTO_FIELD_INTERCEPT_START_TIME, + (uint8_t *)&(em->common.tostart_time), + sizeof(em->common.tostart_time)) == -1) { + goto pushmodfail; + } + + if (push_tlv(nb, OPENLI_PROTO_FIELD_INTERCEPT_END_TIME, + (uint8_t *)&(em->common.toend_time), + sizeof(em->common.toend_time)) == -1) { + goto pushmodfail; + } + + if (push_tlv(nb, OPENLI_PROTO_FIELD_TOMEDIATE, + (uint8_t *)&(em->common.tomediate), + sizeof(em->common.tomediate)) == -1) { + goto pushmodfail; + } + + + return (int)totallen; + +pushmodfail: + logger(LOG_INFO, + "OpenLI: unable to push Email intercept modify for %s to collector fd %d", + em->common.liid, nb->fd); + return -1; +} + + #define VOIPINTERCEPT_MODIFY_BODY_LEN(vint) \ (vint->common.liid_len + vint->common.authcc_len + \ vint->common.delivcc_len + \ sizeof(vint->options) + sizeof(vint->common.toend_time) + \ - sizeof(vint->common.tostart_time) + (6 * 4)) + sizeof(vint->common.tomediate) + \ + sizeof(vint->common.tostart_time) + (7 * 4)) static int _push_voipintercept_modify(net_buffer_t *nb, voipintercept_t *vint) { @@ -522,6 +604,12 @@ static int _push_voipintercept_modify(net_buffer_t *nb, voipintercept_t *vint) goto pushmodfail; } + if (push_tlv(nb, OPENLI_PROTO_FIELD_TOMEDIATE, + (uint8_t *)&(vint->common.tomediate), + sizeof(vint->common.tomediate)) == -1) { + goto pushmodfail; + } + return (int)totallen; @@ -539,18 +627,110 @@ int push_intercept_modify_onto_net_buffer(net_buffer_t *nb, void *data, return _push_voipintercept_modify(nb, (voipintercept_t *)data); } else if (modtype == OPENLI_PROTO_MODIFY_IPINTERCEPT) { return _push_ipintercept_modify(nb, (ipintercept_t *)data); + } else if (modtype == OPENLI_PROTO_MODIFY_EMAILINTERCEPT) { + return _push_emailintercept_modify(nb, (emailintercept_t *)data); } logger(LOG_INFO, "OpenLI: bad modtype in push_intercept_modify_onto_net_buffer: %d\n", modtype); return -1; } +#define EMAILINTERCEPT_BODY_LEN(em) \ + (em->common.liid_len + em->common.authcc_len + \ + em->common.delivcc_len + strlen(em->common.targetagency) + \ + sizeof(em->common.destid) + sizeof(em->common.tostart_time) + \ + sizeof(em->common.tomediate) + \ + sizeof(em->common.toend_time) + (8 * 4)) + +int push_emailintercept_onto_net_buffer(net_buffer_t *nb, void *data) { + + ii_header_t hdr; + uint16_t totallen; + int ret; + emailintercept_t *em = (emailintercept_t *)data; + + /* Pre-compute our body length so we can write it in the header */ + totallen = EMAILINTERCEPT_BODY_LEN(em); + if (totallen > 65535) { + logger(LOG_INFO, + "OpenLI: Email intercept announcement is too long to fit in a single message (%d).", + totallen); + return -1; + } + + + /* Push on header */ + populate_header(&hdr, OPENLI_PROTO_START_EMAILINTERCEPT, totallen, 0); + if ((ret = push_generic_onto_net_buffer(nb, (uint8_t *)(&hdr), + sizeof(ii_header_t))) == -1) { + goto pushemailintfail; + } + + /* Push on each intercept field */ + if ((ret = push_tlv(nb, OPENLI_PROTO_FIELD_LIID, + (uint8_t *)em->common.liid, + em->common.liid_len)) == -1) { + goto pushemailintfail; + } + + if ((ret = push_tlv(nb, OPENLI_PROTO_FIELD_AUTHCC, + (uint8_t *)em->common.authcc, + em->common.authcc_len)) == -1) { + goto pushemailintfail; + } + + if ((ret = push_tlv(nb, OPENLI_PROTO_FIELD_DELIVCC, + (uint8_t *)em->common.delivcc, + em->common.delivcc_len)) == -1) { + goto pushemailintfail; + } + + if ((ret = push_tlv(nb, OPENLI_PROTO_FIELD_LEAID, + (uint8_t *)em->common.targetagency, + strlen(em->common.targetagency))) == -1) { + goto pushemailintfail; + } + + if ((ret = push_tlv(nb, OPENLI_PROTO_FIELD_MEDIATORID, + (uint8_t *)&(em->common.destid), + sizeof(em->common.destid))) == -1) { + goto pushemailintfail; + } + + if (push_tlv(nb, OPENLI_PROTO_FIELD_INTERCEPT_START_TIME, + (uint8_t *)&(em->common.tostart_time), + sizeof(em->common.tostart_time)) == -1) { + goto pushemailintfail; + } + + if (push_tlv(nb, OPENLI_PROTO_FIELD_INTERCEPT_END_TIME, + (uint8_t *)&(em->common.toend_time), + sizeof(em->common.toend_time)) == -1) { + goto pushemailintfail; + } + + if (push_tlv(nb, OPENLI_PROTO_FIELD_TOMEDIATE, + (uint8_t *)&(em->common.tomediate), + sizeof(em->common.tomediate)) == -1) { + goto pushemailintfail; + } + + return (int)totallen; + +pushemailintfail: + logger(LOG_INFO, + "OpenLI: unable to push new Email intercept %s to collector fd %d", + em->common.liid, nb->fd); + return -1; +} + #define VOIPINTERCEPT_BODY_LEN(vint) \ (vint->common.liid_len + vint->common.authcc_len + \ vint->common.delivcc_len + strlen(vint->common.targetagency) + \ sizeof(vint->common.destid) + sizeof(vint->options) \ + + sizeof(vint->common.tomediate) \ + sizeof(vint->common.tostart_time) + sizeof(vint->common.toend_time) \ - + (8 * 4)) + + (9 * 4)) #define INTERCEPT_WITHDRAW_BODY_LEN(liid, authcc) \ (strlen(liid) + strlen(authcc) + (2 * 4)) @@ -564,11 +744,16 @@ int push_intercept_withdrawal_onto_net_buffer(net_buffer_t *nb, char *liid, *authcc; voipintercept_t *vint = NULL; ipintercept_t *ipint = NULL; + emailintercept_t *mailint = NULL; if (wdtype == OPENLI_PROTO_HALT_VOIPINTERCEPT) { vint = (voipintercept_t *)data; liid = vint->common.liid; authcc = vint->common.authcc; + } else if (wdtype == OPENLI_PROTO_HALT_EMAILINTERCEPT) { + mailint = (emailintercept_t *)data; + liid = mailint->common.liid; + authcc = mailint->common.authcc; } else if (wdtype == OPENLI_PROTO_HALT_IPINTERCEPT) { ipint = (ipintercept_t *)data; liid = ipint->common.liid; @@ -689,6 +874,11 @@ int push_voipintercept_onto_net_buffer(net_buffer_t *nb, void *data) { goto pushvoipintfail; } + if (push_tlv(nb, OPENLI_PROTO_FIELD_TOMEDIATE, + (uint8_t *)&(vint->common.tomediate), + sizeof(vint->common.tomediate)) == -1) { + goto pushvoipintfail; + } return (int)totallen; @@ -793,6 +983,80 @@ int push_sip_target_withdrawal_onto_net_buffer(net_buffer_t *nb, OPENLI_PROTO_WITHDRAW_SIP_TARGET); } +#define EMAILTARGET_BODY_LEN(tgt, em) \ + (em->common.liid_len + strlen(tgt->address) \ + + (2 * 4)) + +static inline int push_email_target_onto_net_buffer_generic(net_buffer_t *nb, + email_target_t *tgt, emailintercept_t *em, + openli_proto_msgtype_t msgtype) { + + uint16_t totallen; + ii_header_t hdr; + int ret; + + if (msgtype != OPENLI_PROTO_ANNOUNCE_EMAIL_TARGET && + msgtype != OPENLI_PROTO_WITHDRAW_EMAIL_TARGET) { + logger(LOG_INFO, + "OpenLI: push_email_target_onto_net_buffer_generic() called with invalid message type: %d", + msgtype); + return -1; + } + + totallen = EMAILTARGET_BODY_LEN(tgt, em); + + if (totallen > 65535) { + logger(LOG_INFO, + "OpenLI: Email target announcement is too long to fit in a single message (%d).", + totallen); + return -1; + } + + /* Push on header */ + populate_header(&hdr, msgtype, totallen, 0); + if ((ret = push_generic_onto_net_buffer(nb, (uint8_t *)(&hdr), + sizeof(ii_header_t))) == -1) { + goto pushtargetfail; + } + + /* Push on each field */ + if ((ret = push_tlv(nb, OPENLI_PROTO_FIELD_LIID, + (uint8_t *)em->common.liid, em->common.liid_len)) == -1) { + goto pushtargetfail; + } + + if ((ret = push_tlv(nb, OPENLI_PROTO_FIELD_EMAIL_TARGET, + (uint8_t *)tgt->address, strlen(tgt->address))) == -1) { + goto pushtargetfail; + } + + /* Technically, we should also include authCC in here too for + * multi-national operators but that can probably wait for now. + */ + + return (int)totallen; + +pushtargetfail: + logger(LOG_INFO, + "OpenLI: unable to push new Email target %s to collector fd %d", + tgt->address, nb->fd); + return -1; +} + +int push_email_target_onto_net_buffer(net_buffer_t *nb, email_target_t *tgt, + emailintercept_t *mailint) { + + return push_email_target_onto_net_buffer_generic(nb, tgt, mailint, + OPENLI_PROTO_ANNOUNCE_EMAIL_TARGET); +} + +int push_email_target_withdrawal_onto_net_buffer(net_buffer_t *nb, + email_target_t *tgt, emailintercept_t *mailint) { + + return push_email_target_onto_net_buffer_generic(nb, tgt, mailint, + OPENLI_PROTO_WITHDRAW_EMAIL_TARGET); +} + #define STATICIP_RANGE_BODY_LEN(ipint, ipr) \ (strlen(ipr->rangestr) + sizeof(ipr->cin) + \ ipint->common.liid_len + (3 * 4)) @@ -871,18 +1135,18 @@ int push_static_ipranges_onto_net_buffer(net_buffer_t *nb, ipint->common.delivcc_len + \ ipint->username_len + sizeof(ipint->common.destid) + \ strlen(ipint->common.targetagency) + \ - sizeof(ipint->options) + \ + sizeof(ipint->options) + sizeof(ipint->common.tomediate) + \ sizeof(ipint->common.tostart_time) + sizeof(ipint->common.toend_time) \ - + sizeof(ipint->accesstype) + (10 * 4)) + + sizeof(ipint->accesstype) + (11 * 4)) #define VENDMIRROR_IPINTERCEPT_BODY_LEN(ipint) \ (ipint->common.liid_len + ipint->common.authcc_len + \ ipint->common.delivcc_len + ipint->username_len + \ strlen(ipint->common.targetagency) + \ sizeof(ipint->vendmirrorid) + sizeof(ipint->common.destid) + \ - sizeof(ipint->options) + \ + sizeof(ipint->options) + sizeof(ipint->common.tomediate) + \ sizeof(ipint->common.tostart_time) + sizeof(ipint->common.toend_time) \ - + sizeof(ipint->accesstype) + (11 * 4)) + + sizeof(ipint->accesstype) + (12 * 4)) int push_ipintercept_onto_net_buffer(net_buffer_t *nb, void *data) { @@ -982,6 +1246,11 @@ int push_ipintercept_onto_net_buffer(net_buffer_t *nb, void *data) { goto pushipintfail; } + if (push_tlv(nb, OPENLI_PROTO_FIELD_TOMEDIATE, + (uint8_t *)&(ipint->common.tomediate), + sizeof(ipint->common.tomediate)) == -1) { + goto pushipintfail; + } HASH_ITER(hh, ipint->statics, ipr, tmpr) { if (push_static_ipranges_onto_net_buffer(nb, ipint, ipr) < 0) { return -1; @@ -1060,7 +1329,8 @@ int push_mediator_withdraw_onto_net_buffer(net_buffer_t *nb, #define HI1_NOTIFY_BODY_LEN(ndata) \ (sizeof(ndata->notify_type) + sizeof(ndata->seqno) + sizeof(ndata->ts_sec) \ + sizeof(ndata->ts_usec) + strlen(ndata->liid) + strlen(ndata->authcc) + \ - strlen(ndata->delivcc) + strlen(ndata->agencyid) + (8 * 4)) + strlen(ndata->delivcc) + strlen(ndata->agencyid) + \ + target_info_len + (field_count * 4)) int push_hi1_notification_onto_net_buffer(net_buffer_t *nb, hi1_notify_data_t *ndata) { @@ -1068,6 +1338,16 @@ int push_hi1_notification_onto_net_buffer(net_buffer_t *nb, ii_header_t hdr; uint16_t totallen; int ret; + int field_count; + int target_info_len; + + if (ndata->target_info == NULL) { + field_count = 8; + target_info_len = 0; + } else { + field_count = 9; + target_info_len = strlen(ndata->target_info); + } if (HI1_NOTIFY_BODY_LEN(ndata) > 65535) { logger(LOG_INFO, @@ -1121,6 +1401,13 @@ int push_hi1_notification_onto_net_buffer(net_buffer_t *nb, return -1; } + if (ndata->target_info) { + if ((ret = push_tlv(nb, OPENLI_PROTO_FIELD_USERNAME, + (uint8_t *)(ndata->target_info), target_info_len)) == -1) { + return -1; + } + } + return (int)totallen; } @@ -1383,6 +1670,76 @@ static int decode_tlv(uint8_t *start, uint8_t *end, (target)[vallen] = '\0'; \ } while (0); +int decode_emailintercept_start(uint8_t *msgbody, uint16_t len, + emailintercept_t *mailint) { + + uint8_t *msgend = msgbody + len; + + mailint->common.liid = NULL; + mailint->common.authcc = NULL; + mailint->common.delivcc = NULL; + mailint->targets = NULL; + mailint->common.destid = 0; + mailint->common.targetagency = NULL; + mailint->awaitingconfirm = 0; + + mailint->common.liid_len = 0; + mailint->common.authcc_len = 0; + mailint->common.delivcc_len = 0; + mailint->common.tostart_time = 0; + mailint->common.toend_time = 0; + mailint->common.tomediate = 0; + + while (msgbody < msgend) { + openli_proto_fieldtype_t f; + uint8_t *valptr; + uint16_t vallen; + + if (decode_tlv(msgbody, msgend, &f, &vallen, &valptr) == -1) { + return -1; + } + + if (f == OPENLI_PROTO_FIELD_MEDIATORID) { + mailint->common.destid = *((uint32_t *)valptr); + } else if (f == OPENLI_PROTO_FIELD_LIID) { + DECODE_STRING_FIELD(mailint->common.liid, valptr, vallen); + mailint->common.liid_len = vallen; + } else if (f == OPENLI_PROTO_FIELD_AUTHCC) { + DECODE_STRING_FIELD(mailint->common.authcc, valptr, vallen); + mailint->common.authcc_len = vallen; + } else if (f == OPENLI_PROTO_FIELD_LEAID) { + DECODE_STRING_FIELD(mailint->common.targetagency, valptr, vallen); + } else if (f == OPENLI_PROTO_FIELD_DELIVCC) { + DECODE_STRING_FIELD(mailint->common.delivcc, valptr, vallen); + mailint->common.delivcc_len = vallen; + } else if (f == OPENLI_PROTO_FIELD_INTERCEPT_START_TIME) { + mailint->common.tostart_time = *((uint64_t *)valptr); + } else if (f == OPENLI_PROTO_FIELD_INTERCEPT_END_TIME) { + mailint->common.toend_time = *((uint64_t *)valptr); + } else if (f == OPENLI_PROTO_FIELD_TOMEDIATE) { + mailint->common.tomediate = *((intercept_outputs_t *)valptr); + } else { + dump_buffer_contents(msgbody, len); + logger(LOG_INFO, + "OpenLI: invalid field in received Email intercept: %d.", f); + return -1; + } + msgbody += (vallen + 4); + } + + return 0; +} + +int decode_emailintercept_halt(uint8_t *msgbody, uint16_t len, + emailintercept_t *mailint) { + return decode_emailintercept_start(msgbody, len, mailint); +} + +int decode_emailintercept_modify(uint8_t *msgbody, uint16_t len, + emailintercept_t *mailint) { + return decode_emailintercept_start(msgbody, len, mailint); +} + int decode_voipintercept_start(uint8_t *msgbody, uint16_t len, voipintercept_t *vint) { @@ -1408,6 +1765,7 @@ int decode_voipintercept_start(uint8_t *msgbody, uint16_t len, vint->common.delivcc_len = 0; vint->common.tostart_time = 0; vint->common.toend_time = 0; + vint->common.tomediate = 0; while (msgbody < msgend) { openli_proto_fieldtype_t f; @@ -1439,6 +1797,8 @@ int decode_voipintercept_start(uint8_t *msgbody, uint16_t len, vint->common.tostart_time = *((uint64_t *)valptr); } else if (f == OPENLI_PROTO_FIELD_INTERCEPT_END_TIME) { vint->common.toend_time = *((uint64_t *)valptr); + } else if (f == OPENLI_PROTO_FIELD_TOMEDIATE) { + vint->common.tomediate = *((intercept_outputs_t *)valptr); } else { dump_buffer_contents(msgbody, len); logger(LOG_INFO, @@ -1493,6 +1853,7 @@ int decode_ipintercept_start(uint8_t *msgbody, uint16_t len, ipint->common.delivcc_len = 0; ipint->common.tostart_time = 0; ipint->common.toend_time = 0; + ipint->common.tomediate = 0; ipint->username_len = 0; while (msgbody < msgend) { @@ -1528,6 +1889,8 @@ int decode_ipintercept_start(uint8_t *msgbody, uint16_t len, ipint->common.tostart_time = *((uint64_t *)valptr); } else if (f == OPENLI_PROTO_FIELD_INTERCEPT_END_TIME) { ipint->common.toend_time = *((uint64_t *)valptr); + } else if (f == OPENLI_PROTO_FIELD_TOMEDIATE) { + ipint->common.tomediate = *((intercept_outputs_t *)valptr); } else if (f == OPENLI_PROTO_FIELD_USERNAME) { DECODE_STRING_FIELD(ipint->username, valptr, vallen); if (vallen == 0) { @@ -1591,6 +1954,57 @@ int decode_mediator_withdraw(uint8_t *msgbody, uint16_t len, return decode_mediator_announcement(msgbody, len, med); } +int decode_email_target_announcement(uint8_t *msgbody, uint16_t len, + email_target_t *tgt, char *liidspace, int spacelen) { + + uint8_t *msgend = msgbody + len; + tgt->address = NULL; + tgt->awaitingconfirm = 0; + + while (msgbody < msgend) { + openli_proto_fieldtype_t f; + uint8_t *valptr; + uint16_t vallen; + + if (decode_tlv(msgbody, msgend, &f, &vallen, &valptr) == -1) { + return -1; + } + + if (f == OPENLI_PROTO_FIELD_EMAIL_TARGET) { + DECODE_STRING_FIELD(tgt->address, valptr, vallen); + } else if (f == OPENLI_PROTO_FIELD_LIID) { + if (vallen >= spacelen) { + logger(LOG_INFO, + "OpenLI: not enough space to save LIID from Email target message -- space provided %d, required %u\n", spacelen, vallen); + return -1; + } + strncpy(liidspace, (char *)valptr, vallen); + liidspace[vallen] = '\0'; + } else { + dump_buffer_contents(msgbody, len); + logger(LOG_INFO, + "OpenLI: invalid field in received Email target announcement: %d.", + f); + return -1; + } + msgbody += (vallen + 4); + } + + if (tgt->address == NULL) { + logger(LOG_INFO, + "OpenLI: received a Email target message with no address?"); + return -1; + } + return 0; +} + +int decode_email_target_withdraw(uint8_t *msgbody, uint16_t len, + email_target_t *tgt, char *liidspace, int spacelen) { + + return decode_email_target_announcement(msgbody, len, tgt, liidspace, + spacelen); +} + int decode_sip_target_announcement(uint8_t *msgbody, uint16_t len, openli_sip_identity_t *sipid, char *liidspace, int spacelen) { @@ -1627,7 +2041,7 @@ int decode_sip_target_announcement(uint8_t *msgbody, uint16_t len, } else { dump_buffer_contents(msgbody, len); logger(LOG_INFO, - "OpenLI: invalid field in received core server announcement: %d.", + "OpenLI: invalid field in received SIP target announcement: %d.", f); return -1; } @@ -1814,6 +2228,8 @@ int decode_hi1_notification(uint8_t *msgbody, uint16_t len, DECODE_STRING_FIELD(ndata->authcc, valptr, vallen); } else if (f == OPENLI_PROTO_FIELD_DELIVCC) { DECODE_STRING_FIELD(ndata->delivcc, valptr, vallen); + } else if (f == OPENLI_PROTO_FIELD_USERNAME) { + DECODE_STRING_FIELD(ndata->target_info, valptr, vallen); } else if (f == OPENLI_PROTO_FIELD_HI1_NOTIFY_TYPE) { ndata->notify_type = *((hi1_notify_t *)valptr); } else if (f == OPENLI_PROTO_FIELD_SEQNO) { diff --git a/src/netcomms.h b/src/netcomms.h index a99db281..87891c45 100644 --- a/src/netcomms.h +++ b/src/netcomms.h @@ -109,6 +109,11 @@ typedef enum { OPENLI_PROTO_HEARTBEAT, OPENLI_PROTO_SSL_REQUIRED, OPENLI_PROTO_HI1_NOTIFICATION, + OPENLI_PROTO_START_EMAILINTERCEPT, + OPENLI_PROTO_HALT_EMAILINTERCEPT, + OPENLI_PROTO_MODIFY_EMAILINTERCEPT, + OPENLI_PROTO_ANNOUNCE_EMAIL_TARGET, + OPENLI_PROTO_WITHDRAW_EMAIL_TARGET, } openli_proto_msgtype_t; typedef struct net_buffer { @@ -154,6 +159,8 @@ typedef enum { OPENLI_PROTO_FIELD_TS_USEC, OPENLI_PROTO_FIELD_INTERCEPT_START_TIME, OPENLI_PROTO_FIELD_INTERCEPT_END_TIME, + OPENLI_PROTO_FIELD_EMAIL_TARGET, + OPENLI_PROTO_FIELD_TOMEDIATE, } openli_proto_fieldtype_t; net_buffer_t *create_net_buffer(net_buffer_type_t buftype, int fd, SSL *ssl); @@ -174,6 +181,8 @@ int push_mediator_withdraw_onto_net_buffer(net_buffer_t *nb, int push_ipintercept_onto_net_buffer(net_buffer_t *nb, void *ipint); int push_voipintercept_onto_net_buffer(net_buffer_t *nb, void *vint); +int push_emailintercept_onto_net_buffer(net_buffer_t *nb, + void *mailint); int push_intercept_withdrawal_onto_net_buffer(net_buffer_t *nb, void *cept, openli_proto_msgtype_t wdtype); int push_intercept_modify_onto_net_buffer(net_buffer_t *nb, @@ -208,6 +217,10 @@ int push_static_ipranges_onto_net_buffer(net_buffer_t *nb, ipintercept_t *ipint, static_ipranges_t *ipr); int push_hi1_notification_onto_net_buffer(net_buffer_t *nb, hi1_notify_data_t *ndata); +int push_email_target_onto_net_buffer(net_buffer_t *nb, + email_target_t *tgt, emailintercept_t *mailint); +int push_email_target_withdrawal_onto_net_buffer(net_buffer_t *nb, + email_target_t *tgt, emailintercept_t *mailint); openli_proto_msgtype_t receive_RMQ_buffer(net_buffer_t *nb, amqp_connection_state_t amqp_state, uint8_t **msgbody, @@ -234,6 +247,12 @@ int decode_voipintercept_halt(uint8_t *msgbody, uint16_t len, voipintercept_t *vint); int decode_voipintercept_modify(uint8_t *msgbody, uint16_t len, voipintercept_t *vint); +int decode_emailintercept_start(uint8_t *msgbody, uint16_t len, + emailintercept_t *mailint); +int decode_emailintercept_halt(uint8_t *msgbody, uint16_t len, + emailintercept_t *mailint); +int decode_emailintercept_modify(uint8_t *msgbody, uint16_t len, + emailintercept_t *mailint); int decode_lea_announcement(uint8_t *msgbody, uint16_t len, liagency_t *lea); int decode_lea_withdrawal(uint8_t *msgbody, uint16_t len, liagency_t *lea); int decode_liid_mapping(uint8_t *msgbody, uint16_t len, char **agency, @@ -247,6 +266,10 @@ int decode_sip_target_announcement(uint8_t *msgbody, uint16_t len, openli_sip_identity_t *sipid, char *liidspace, int spacelen); int decode_sip_target_withdraw(uint8_t *msgbody, uint16_t len, openli_sip_identity_t *sipid, char *liidspace, int spacelen); +int decode_email_target_announcement(uint8_t *msgbody, uint16_t len, + email_target_t *tgt, char *liidspace, int spacelen); +int decode_email_target_withdraw(uint8_t *msgbody, uint16_t len, + email_target_t *tgt, char *liidspace, int spacelen); int decode_staticip_announcement(uint8_t *msgbody, uint16_t len, static_ipranges_t *ipr); int decode_staticip_removal(uint8_t *msgbody, uint16_t len, diff --git a/src/openli_tls.h b/src/openli_tls.h index 6af19d93..f39ed22e 100644 --- a/src/openli_tls.h +++ b/src/openli_tls.h @@ -49,6 +49,7 @@ enum { typedef struct openli_RMQ_config { char *name; char *pass; + char *internalpass; char *hostname; int port; int heartbeatFreq; diff --git a/src/provisioner/clientupdates.c b/src/provisioner/clientupdates.c index b9d209e6..d2684125 100644 --- a/src/provisioner/clientupdates.c +++ b/src/provisioner/clientupdates.c @@ -53,6 +53,45 @@ static inline int enable_epoll_write(provision_state_t *state, return 0; } +int compare_email_targets(provision_state_t *currstate, + emailintercept_t *existing, emailintercept_t *reload) { + + email_target_t *oldtgt, *newtgt, *tmp, *found; + int changes = 0; + + HASH_ITER(hh, existing->targets, oldtgt, tmp) { + oldtgt->awaitingconfirm = 1; + + HASH_FIND(hh, reload->targets, oldtgt->address, strlen(oldtgt->address), + found); + if (found) { + found->awaitingconfirm = 0; + oldtgt->awaitingconfirm = 0; + } else { + /* This target is no longer present in the target list */ + if (announce_email_target_change(currstate, oldtgt, + existing, 0) < 0) { + return -1; + } + changes ++; + } + } + + HASH_ITER(hh, reload->targets, newtgt, tmp) { + if (newtgt->awaitingconfirm == 0) { + continue; + } + /* This target has been added since we last reloaded config so + * announce it. */ + if (announce_email_target_change(currstate, newtgt, existing, 1) < 0) { + return -1; + } + changes ++; + } + + return changes; +} + int compare_sip_targets(provision_state_t *currstate, voipintercept_t *existing, voipintercept_t *reload) { @@ -344,7 +383,7 @@ int disconnect_mediators_from_collectors(provision_state_t *state) { } int announce_hi1_notification_to_mediators(provision_state_t *state, - intercept_common_t *intcomm, hi1_notify_t not_type) { + intercept_common_t *intcomm, char *target_id, hi1_notify_t not_type) { /* For now, I'm just going to send the notification to all mediators * and rely on them to ignore those that are not for agencies that @@ -369,6 +408,7 @@ int announce_hi1_notification_to_mediators(provision_state_t *state, ndata.seqno = intcomm->hi1_seqno; ndata.ts_sec = tv.tv_sec; ndata.ts_usec = tv.tv_usec; + ndata.target_info = target_id; SEND_ALL_MEDIATORS_BEGIN if (push_hi1_notification_onto_net_buffer(sock->outgoing, &ndata) == -1) @@ -479,6 +519,36 @@ int announce_coreserver_change(provision_state_t *state, return 0; } +int announce_email_target_change(provision_state_t *state, + email_target_t *target, emailintercept_t *mailint, uint8_t isnew) { + + SEND_ALL_COLLECTORS_BEGIN + if (isnew) { + if (push_email_target_onto_net_buffer(sock->outgoing, target, + mailint) == -1) { + logger(LOG_INFO, + "OpenLI: Unable to push Email target to collector %s", + col->identifier); + disconnect_provisioner_client(state->epoll_fd, col->client, + col->identifier); + continue; + } + } else { + if (push_email_target_withdrawal_onto_net_buffer(sock->outgoing, + target, mailint) == -1) { + logger(LOG_INFO, + "OpenLI: Unable to push removal of SIP target to collector %s", + col->identifier); + disconnect_provisioner_client(state->epoll_fd, col->client, + col->identifier); + continue; + } + } + SEND_ALL_COLLECTORS_END + + return 0; +} + int announce_sip_target_change(provision_state_t *state, openli_sip_identity_t *sipid, voipintercept_t *vint, uint8_t isnew) { @@ -526,6 +596,33 @@ int announce_all_sip_targets(provision_state_t *state, voipintercept_t *vint) { return 0; } +int announce_all_email_targets(provision_state_t *state, + emailintercept_t *mailint) { + email_target_t *tgt, *tmp; + + HASH_ITER(hh, mailint->targets, tgt, tmp) { + if (tgt->awaitingconfirm && announce_email_target_change(state, + tgt, mailint, 1) < 0) { + return -1; + } + tgt->awaitingconfirm = 0; + } + return 0; +} + +int remove_all_email_targets(provision_state_t *state, + emailintercept_t *mailint) { + email_target_t *tgt, *tmp; + + HASH_ITER(hh, mailint->targets, tgt, tmp) { + if (tgt->awaitingconfirm == 0 && announce_email_target_change(state, + tgt, mailint, 0) < 0) { + return -1; + } + } + return 0; +} + int remove_all_sip_targets(provision_state_t *state, voipintercept_t *vint) { libtrace_list_node_t *n; openli_sip_identity_t *sipid; diff --git a/src/provisioner/configwriter.c b/src/provisioner/configwriter.c index 8d890c35..e8facd97 100644 --- a/src/provisioner/configwriter.c +++ b/src/provisioner/configwriter.c @@ -429,6 +429,56 @@ static int emit_intercept_common(intercept_common_t *intcom, (yaml_char_t *)buffer, strlen(buffer), 1, 0, YAML_PLAIN_SCALAR_STYLE); if (!yaml_emitter_emit(emitter, &event)) return -1; + + yaml_scalar_event_initialize(&event, NULL, (yaml_char_t *)YAML_STR_TAG, + (yaml_char_t *)"outputhandovers", strlen("outputhandovers"), 1, 0, + YAML_PLAIN_SCALAR_STYLE); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + if (intcom->tomediate == OPENLI_INTERCEPT_OUTPUTS_IRIONLY) { + yaml_scalar_event_initialize(&event, NULL, (yaml_char_t *)YAML_STR_TAG, + (yaml_char_t *)"irionly", strlen("irionly"), 1, 0, + YAML_PLAIN_SCALAR_STYLE); + } else if (intcom->tomediate == OPENLI_INTERCEPT_OUTPUTS_CCONLY) { + yaml_scalar_event_initialize(&event, NULL, (yaml_char_t *)YAML_STR_TAG, + (yaml_char_t *)"cconly", strlen("cconly"), 1, 0, + YAML_PLAIN_SCALAR_STYLE); + } else { + yaml_scalar_event_initialize(&event, NULL, (yaml_char_t *)YAML_STR_TAG, + (yaml_char_t *)"all", strlen("all"), 1, 0, + YAML_PLAIN_SCALAR_STYLE); + } + if (!yaml_emitter_emit(emitter, &event)) return -1; + + + return 0; +} + +static int emit_email_targets(emailintercept_t *mailint, + yaml_emitter_t *emitter) { + + email_target_t *tgt, *tmp; + yaml_event_t event; + + HASH_ITER(hh, mailint->targets, tgt, tmp) { + + yaml_mapping_start_event_initialize(&event, NULL, + (yaml_char_t *)YAML_MAP_TAG, 1, YAML_ANY_MAPPING_STYLE); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + yaml_scalar_event_initialize(&event, NULL, (yaml_char_t *)YAML_STR_TAG, + (yaml_char_t *)"address", strlen("address"), 1, 0, + YAML_PLAIN_SCALAR_STYLE); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + yaml_scalar_event_initialize(&event, NULL, (yaml_char_t *)YAML_STR_TAG, + (yaml_char_t *)tgt->address, strlen(tgt->address), 1, 0, + YAML_PLAIN_SCALAR_STYLE); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + yaml_mapping_end_event_initialize(&event); + if (!yaml_emitter_emit(emitter, &event)) return -1; + } return 0; } @@ -619,6 +669,58 @@ static int emit_ipintercepts(ipintercept_t *ipints, yaml_emitter_t *emitter) { return 0; } +static int emit_emailintercepts(emailintercept_t *mailints, + yaml_emitter_t *emitter) { + yaml_event_t event; + emailintercept_t *mail, *tmp; + + yaml_scalar_event_initialize(&event, NULL, (yaml_char_t *)YAML_STR_TAG, + (yaml_char_t *)"emailintercepts", strlen("emailintercepts"), 1, 0, + YAML_PLAIN_SCALAR_STYLE); + + if (!yaml_emitter_emit(emitter, &event)) return -1; + + yaml_sequence_start_event_initialize(&event, NULL, + (yaml_char_t *)YAML_SEQ_TAG, 1, YAML_ANY_SEQUENCE_STYLE); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + HASH_ITER(hh_liid, mailints, mail, tmp) { + + yaml_mapping_start_event_initialize(&event, NULL, + (yaml_char_t *)YAML_MAP_TAG, 1, YAML_ANY_MAPPING_STYLE); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + if (emit_intercept_common(&(mail->common), emitter) < 0) { + return -1; + } + + yaml_scalar_event_initialize(&event, NULL, (yaml_char_t *)YAML_STR_TAG, + (yaml_char_t *)"targets", strlen("targets"), 1, 0, + YAML_PLAIN_SCALAR_STYLE); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + yaml_sequence_start_event_initialize(&event, NULL, + (yaml_char_t *)YAML_SEQ_TAG, 1, YAML_ANY_SEQUENCE_STYLE); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + if (emit_email_targets(mail, emitter) < 0) { + return -1; + } + + yaml_sequence_end_event_initialize(&event); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + yaml_mapping_end_event_initialize(&event); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + } + + yaml_sequence_end_event_initialize(&event); + if (!yaml_emitter_emit(emitter, &event)) return -1; + + return 0; +} + int emit_intercept_config(char *configfile, prov_intercept_conf_t *conf) { @@ -671,6 +773,21 @@ int emit_intercept_config(char *configfile, prov_intercept_conf_t *conf) { goto error; } + if (emit_core_server_list(conf->smtpservers, "smtpservers", + &emitter) < 0) { + goto error; + } + + if (emit_core_server_list(conf->imapservers, "imapservers", + &emitter) < 0) { + goto error; + } + + if (emit_core_server_list(conf->pop3servers, "pop3servers", + &emitter) < 0) { + goto error; + } + if (emit_default_radius_usernames(conf->defradusers, &emitter) < 0) { goto error; } @@ -687,6 +804,10 @@ int emit_intercept_config(char *configfile, prov_intercept_conf_t *conf) { goto error; } + if (emit_emailintercepts(conf->emailintercepts, &emitter) < 0) { + goto error; + } + yaml_mapping_end_event_initialize(&event); if (!yaml_emitter_emit(&emitter, &event)) goto error; diff --git a/src/provisioner/hup_reload.c b/src/provisioner/hup_reload.c index 2e0be95f..42f08b29 100644 --- a/src/provisioner/hup_reload.c +++ b/src/provisioner/hup_reload.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2018 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2018 - 2022 The University of Waikato, Hamilton, New Zealand. * All rights reserved. * * This file is part of OpenLI. @@ -88,6 +88,10 @@ static inline int ip_intercept_equal(ipintercept_t *a, ipintercept_t *b) { return 0; } + if (a->common.tomediate != b->common.tomediate) { + return 0; + } + if (strcmp(a->common.targetagency, b->common.targetagency) != 0) { return 0; } @@ -195,12 +199,174 @@ static int reload_coreservers(provision_state_t *state, coreserver_t *currserv, return 0; } +static int reload_emailintercepts(provision_state_t *currstate, + emailintercept_t *curremail, emailintercept_t *newemail, + prov_intercept_conf_t *intconf, int droppedcols, int droppedmeds) { + + emailintercept_t *mailint, *tmp, *newequiv; + liid_hash_t *h = NULL; + char *target_info; + + /* TODO error handling in the "inform other components about changes" + * functions? + */ + HASH_ITER(hh_liid, curremail, mailint, tmp) { + HASH_FIND(hh_liid, newemail, mailint->common.liid, + mailint->common.liid_len, newequiv); + + if (!newequiv) { + /* Intercept has been withdrawn entirely */ + if (!droppedcols) { + halt_existing_intercept(currstate, (void *)mailint, + OPENLI_PROTO_HALT_EMAILINTERCEPT); + } + remove_liid_mapping(currstate, mailint->common.liid, + mailint->common.liid_len, droppedmeds); + if (!droppedmeds) { + target_info = list_email_targets(mailint, 256); + announce_hi1_notification_to_mediators(currstate, + &(mailint->common), target_info, + HI1_LI_DEACTIVATED); + if (target_info) { + free(target_info); + } + } + continue; + } else { + int intsame = email_intercept_equal(mailint, newequiv); + int agencychanged = strcmp(mailint->common.targetagency, + newequiv->common.targetagency); + int changedtargets = compare_email_targets(currstate, mailint, + newequiv); + + newequiv->common.hi1_seqno = mailint->common.hi1_seqno; + newequiv->awaitingconfirm = 0; + + if (intsame && !agencychanged && changedtargets == 0) { + continue; + } + + logger(LOG_INFO, "OpenLI provisioner: Details for Email intercept %s have changed -- updating collectors", + mailint->common.liid); + + if (!droppedmeds) { + if (agencychanged) { + target_info = list_email_targets(mailint, 256); + announce_hi1_notification_to_mediators(currstate, + &(mailint->common), target_info, + HI1_LI_DEACTIVATED); + if (target_info) { + free(target_info); + } + newequiv->common.hi1_seqno = 0; + target_info = list_email_targets(newequiv, 256); + announce_hi1_notification_to_mediators(currstate, + &(newequiv->common), target_info, + HI1_LI_ACTIVATED); + if (target_info) { + free(target_info); + } + } else { + target_info = list_email_targets(newequiv, 256); + announce_hi1_notification_to_mediators(currstate, + &(newequiv->common), target_info, + HI1_LI_MODIFIED); + if (target_info) { + free(target_info); + } + } + } + + if (!intsame && !droppedcols) { + modify_existing_intercept_options(currstate, (void *)newequiv, + OPENLI_PROTO_MODIFY_EMAILINTERCEPT); + } + + if (agencychanged) { + remove_liid_mapping(currstate, mailint->common.liid, + mailint->common.liid_len, droppedmeds); + + h = add_liid_mapping(intconf, newequiv->common.liid, + newequiv->common.targetagency); + if (!droppedmeds && announce_liidmapping_to_mediators( + currstate, h) == -1) { + logger(LOG_INFO, + "OpenLI provisioner: unable to announce new agency for Email intercept to mediators."); + return -1; + } + } + } + } + + HASH_ITER(hh_liid, newemail, mailint, tmp) { + int skip = 0; + prov_agency_t *lea = NULL; + + if (!mailint->awaitingconfirm) { + continue; + } + + if (strcmp(mailint->common.targetagency, "pcapdisk") != 0) { + HASH_FIND_STR(intconf->leas, mailint->common.targetagency, lea); + if (lea == NULL) { + skip = 1; + } + } + + if (skip) { + continue; + } + + /* Add the LIID mapping */ + h = add_liid_mapping(intconf, mailint->common.liid, + mailint->common.targetagency); + + target_info = list_email_targets(mailint, 256); + if (!droppedmeds && announce_hi1_notification_to_mediators(currstate, + &(mailint->common), target_info, + HI1_LI_ACTIVATED) == -1) { + if (target_info) { + free(target_info); + } + logger(LOG_INFO, + "OpenLI provisioner: unable to send HI1 notification for new Email intercept to mediators."); + return -1; + } + if (target_info) { + free(target_info); + } + + if (!droppedmeds && announce_liidmapping_to_mediators(currstate, + h) == -1) { + logger(LOG_INFO, + "OpenLI provisioner: unable to announce new Email intercept to mediators."); + return -1; + } + + if (!droppedcols && announce_single_intercept(currstate, + (void *)mailint, push_emailintercept_onto_net_buffer) == -1) { + logger(LOG_INFO, + "OpenLI provisioner: unable to announce new Email intercept to collectors."); + return -1; + } + + if (!droppedcols && announce_all_email_targets(currstate, mailint) < 0) { + logger(LOG_INFO, + "OpenLI provisioner: error pushing targets for Email intercept %s onto buffer.", mailint->common.liid); + return -1; + } + } + + return 0; +} + static int reload_voipintercepts(provision_state_t *currstate, voipintercept_t *currvoip, voipintercept_t *newvoip, prov_intercept_conf_t *intconf, int droppedcols, int droppedmeds) { voipintercept_t *voipint, *tmp, *newequiv; liid_hash_t *h = NULL; + char *target_info; /* TODO error handling in the "inform other components about changes" * functions? @@ -222,8 +388,13 @@ static int reload_voipintercepts(provision_state_t *currstate, remove_liid_mapping(currstate, voipint->common.liid, voipint->common.liid_len, droppedmeds); if (!droppedmeds) { + target_info = list_sip_targets(voipint, 256); announce_hi1_notification_to_mediators(currstate, - &(voipint->common), HI1_LI_DEACTIVATED); + &(voipint->common), target_info, + HI1_LI_DEACTIVATED); + if (target_info) { + free(target_info); + } } continue; } else { @@ -245,14 +416,29 @@ static int reload_voipintercepts(provision_state_t *currstate, if (!droppedmeds) { if (agencychanged) { + target_info = list_sip_targets(voipint, 256); announce_hi1_notification_to_mediators(currstate, - &(voipint->common), HI1_LI_DEACTIVATED); + &(voipint->common), target_info, + HI1_LI_DEACTIVATED); + if (target_info) { + free(target_info); + } newequiv->common.hi1_seqno = 0; + target_info = list_sip_targets(newequiv, 256); announce_hi1_notification_to_mediators(currstate, - &(newequiv->common), HI1_LI_ACTIVATED); + &(newequiv->common), target_info, + HI1_LI_ACTIVATED); + if (target_info) { + free(target_info); + } } else { + target_info = list_sip_targets(newequiv, 256); announce_hi1_notification_to_mediators(currstate, - &(newequiv->common), HI1_LI_MODIFIED); + &(newequiv->common), target_info, + HI1_LI_MODIFIED); + if (target_info) { + free(target_info); + } } } @@ -270,7 +456,7 @@ static int reload_voipintercepts(provision_state_t *currstate, if (!droppedmeds && announce_liidmapping_to_mediators( currstate, h) == -1) { logger(LOG_INFO, - "OpenLI provisioner: unable to announce new agency for IP intercept to mediators."); + "OpenLI provisioner: unable to announce new agency for VOIP intercept to mediators."); return -1; } } @@ -304,12 +490,20 @@ static int reload_voipintercepts(provision_state_t *currstate, h = add_liid_mapping(intconf, voipint->common.liid, voipint->common.targetagency); + target_info = list_sip_targets(voipint, 256); if (!droppedmeds && announce_hi1_notification_to_mediators(currstate, - &(voipint->common), HI1_LI_ACTIVATED) == -1) { + &(voipint->common), target_info, + HI1_LI_ACTIVATED) == -1) { + if (target_info) { + free(target_info); + } logger(LOG_INFO, "OpenLI provisioner: unable to send HI1 notification for new VOIP intercept to mediators."); return -1; } + if (target_info) { + free(target_info); + } if (!droppedmeds && announce_liidmapping_to_mediators(currstate, h) == -1) { @@ -358,7 +552,7 @@ static int reload_ipintercepts(provision_state_t *currstate, ipint->common.liid_len, droppedmeds); if (!droppedmeds) { announce_hi1_notification_to_mediators(currstate, - &(ipint->common), HI1_LI_DEACTIVATED); + &(ipint->common), ipint->username, HI1_LI_DEACTIVATED); } logger(LOG_INFO, "OpenLI provisioner: LIID %s has been withdrawn", ipint->common.liid); @@ -382,13 +576,16 @@ static int reload_ipintercepts(provision_state_t *currstate, if (!droppedmeds) { if (agencychanged) { announce_hi1_notification_to_mediators(currstate, - &(ipint->common), HI1_LI_DEACTIVATED); + &(ipint->common), ipint->username, + HI1_LI_DEACTIVATED); newequiv->common.hi1_seqno = 0; announce_hi1_notification_to_mediators(currstate, - &(newequiv->common), HI1_LI_ACTIVATED); + &(newequiv->common), newequiv->username, + HI1_LI_ACTIVATED); } else { announce_hi1_notification_to_mediators(currstate, - &(newequiv->common), HI1_LI_MODIFIED); + &(newequiv->common), newequiv->username, + HI1_LI_MODIFIED); } } @@ -437,7 +634,7 @@ static int reload_ipintercepts(provision_state_t *currstate, ipint->common.targetagency); if (!droppedmeds && announce_hi1_notification_to_mediators(currstate, - &(ipint->common), HI1_LI_ACTIVATED) == -1) { + &(ipint->common), ipint->username, HI1_LI_ACTIVATED) == -1) { logger(LOG_INFO, "OpenLI provisioner: unable to send HI1 notification for new IP intercept to mediators."); return -1; @@ -501,6 +698,24 @@ static int reload_intercept_config(provision_state_t *currstate, return -1; } + if (reload_coreservers(currstate, + currstate->interceptconf.smtpservers, + newconf.smtpservers) < 0) { + return -1; + } + + if (reload_coreservers(currstate, + currstate->interceptconf.imapservers, + newconf.imapservers) < 0) { + return -1; + } + + if (reload_coreservers(currstate, + currstate->interceptconf.pop3servers, + newconf.pop3servers) < 0) { + return -1; + } + if (reload_coreservers(currstate, currstate->interceptconf.gtpservers, newconf.gtpservers) < 0) { return -1; @@ -520,6 +735,13 @@ static int reload_intercept_config(provision_state_t *currstate, return -1; } + if (reload_emailintercepts(currstate, + currstate->interceptconf.emailintercepts, + newconf.emailintercepts, &newconf, + clientchanged, mediatorchanged) < 0) { + return -1; + } + if (reload_ipintercepts(currstate, currstate->interceptconf.ipintercepts, newconf.ipintercepts, &newconf, @@ -761,6 +983,7 @@ int reload_provisioner_config(provision_state_t *currstate) { int tlschanged = 0; int voipoptschanged = 0; int restauthchanged = 0; + char *target_info; if (init_prov_state(&newstate, currstate->conffile) == -1) { logger(LOG_INFO, @@ -820,8 +1043,13 @@ int reload_provisioner_config(provision_state_t *currstate) { modify_existing_intercept_options(currstate, (void *)vint, OPENLI_PROTO_MODIFY_VOIPINTERCEPT); + target_info = list_sip_targets(vint, 256); announce_hi1_notification_to_mediators(currstate, - &(vint->common), HI1_LI_MODIFIED); + &(vint->common), target_info, + HI1_LI_MODIFIED); + if (target_info) { + free(target_info); + } } pthread_mutex_unlock(&(currstate->interceptconf.safelock)); } diff --git a/src/provisioner/provisioner.c b/src/provisioner/provisioner.c index 3272f863..f869dd7b 100644 --- a/src/provisioner/provisioner.c +++ b/src/provisioner/provisioner.c @@ -159,7 +159,11 @@ void init_intercept_config(prov_intercept_conf_t *state) { state->radiusservers = NULL; state->gtpservers = NULL; state->sipservers = NULL; + state->smtpservers = NULL; + state->imapservers = NULL; + state->pop3servers = NULL; state->voipintercepts = NULL; + state->emailintercepts = NULL; state->ipintercepts = NULL; state->liid_map = NULL; state->leas = NULL; @@ -202,6 +206,7 @@ int map_intercepts_to_leas(prov_intercept_conf_t *conf) { int failed = 0; ipintercept_t *ipint, *iptmp; voipintercept_t *vint; + emailintercept_t *mailint; /* Do IP Intercepts */ HASH_ITER(hh_liid, conf->ipintercepts, ipint, iptmp) { @@ -214,6 +219,12 @@ int map_intercepts_to_leas(prov_intercept_conf_t *conf) { add_liid_mapping(conf, vint->common.liid, vint->common.targetagency); } + for (mailint = conf->emailintercepts; mailint != NULL; + mailint = mailint->hh_liid.next) { + add_liid_mapping(conf, mailint->common.liid, + mailint->common.targetagency); + } + /* Sort the final mapping nicely */ HASH_SORT(conf->liid_map, liid_hash_sort); @@ -640,8 +651,12 @@ void clear_intercept_state(prov_intercept_conf_t *conf) { free_all_ipintercepts(&(conf->ipintercepts)); free_all_voipintercepts(&(conf->voipintercepts)); + free_all_emailintercepts(&(conf->emailintercepts)); free_coreserver_list(conf->radiusservers); free_coreserver_list(conf->gtpservers); + free_coreserver_list(conf->smtpservers); + free_coreserver_list(conf->imapservers); + free_coreserver_list(conf->pop3servers); free_coreserver_list(conf->sipservers); pthread_mutex_destroy(&(conf->safelock)); @@ -766,6 +781,19 @@ static int push_all_mediators(prov_mediator_t *mediators, net_buffer_t *nb) { return 0; } +static int push_all_email_targets(net_buffer_t *nb, email_target_t *targets, + emailintercept_t *mailint) { + + email_target_t *tgt, *tmp; + + HASH_ITER(hh, targets, tgt, tmp) { + if (push_email_target_onto_net_buffer(nb, tgt, mailint) < 0) { + return -1; + } + } + return 0; +} + static int push_all_sip_targets(net_buffer_t *nb, libtrace_list_t *targets, voipintercept_t *vint) { @@ -830,6 +858,43 @@ static int push_all_voipintercepts(provision_state_t *state, return 0; } +static int push_all_emailintercepts(provision_state_t *state, + emailintercept_t *mailintercepts, net_buffer_t *nb, + prov_agency_t *agencies) { + + emailintercept_t *m; + prov_agency_t *lea; + int skip = 0; + + for (m = mailintercepts; m != NULL; m = m->hh_liid.next) { + skip = 0; + if (strcmp(m->common.targetagency, "pcapdisk") != 0) { + HASH_FIND_STR(agencies, m->common.targetagency, lea); + if (lea == NULL) { + skip = 1; + } + } + + if (skip) { + continue; + } + + if (push_emailintercept_onto_net_buffer(nb, m) < 0) { + logger(LOG_INFO, + "OpenLI provisioner: error pushing Email intercept %s onto buffer for writing to collector.", + m->common.liid); + return -1; + } + + if (push_all_email_targets(nb, m->targets, m) < 0) { + logger(LOG_INFO, + "OpenLI provisioner: error pushing targets for Email intercept %s onto buffer.", m->common.liid); + return -1; + } + } + return 0; +} + static int push_all_ipintercepts(ipintercept_t *ipintercepts, net_buffer_t *nb, prov_agency_t *agencies) { @@ -874,7 +939,11 @@ static int respond_collector_auth(provision_state_t *state, HASH_CNT(hh, state->interceptconf.radiusservers) + HASH_CNT(hh, state->interceptconf.gtpservers) + HASH_CNT(hh, state->interceptconf.sipservers) + + HASH_CNT(hh, state->interceptconf.imapservers) + + HASH_CNT(hh, state->interceptconf.pop3servers) + + HASH_CNT(hh, state->interceptconf.smtpservers) + HASH_CNT(hh_liid, state->interceptconf.ipintercepts) + + HASH_CNT(hh_liid, state->interceptconf.emailintercepts) + HASH_CNT(hh_liid, state->interceptconf.voipintercepts) == 0) { pthread_mutex_unlock(&(state->interceptconf.safelock)); return 0; @@ -920,7 +989,31 @@ static int respond_collector_auth(provision_state_t *state, if (push_coreservers(state->interceptconf.sipservers, OPENLI_CORE_SERVER_SIP, outgoing) == -1) { logger(LOG_INFO, - "OpenLI: unable to queue RADIUS server details to be sent to new collector on fd %d", pev->fd); + "OpenLI: unable to queue SIP server details to be sent to new collector on fd %d", pev->fd); + pthread_mutex_unlock(&(state->interceptconf.safelock)); + return -1; + } + + if (push_coreservers(state->interceptconf.smtpservers, + OPENLI_CORE_SERVER_SMTP, outgoing) == -1) { + logger(LOG_INFO, + "OpenLI: unable to queue SMTP server details to be sent to new collector on fd %d", pev->fd); + pthread_mutex_unlock(&(state->interceptconf.safelock)); + return -1; + } + + if (push_coreservers(state->interceptconf.imapservers, + OPENLI_CORE_SERVER_IMAP, outgoing) == -1) { + logger(LOG_INFO, + "OpenLI: unable to queue IMAP server details to be sent to new collector on fd %d", pev->fd); + pthread_mutex_unlock(&(state->interceptconf.safelock)); + return -1; + } + + if (push_coreservers(state->interceptconf.pop3servers, + OPENLI_CORE_SERVER_POP3, outgoing) == -1) { + logger(LOG_INFO, + "OpenLI: unable to queue POP3 server details to be sent to new collector on fd %d", pev->fd); pthread_mutex_unlock(&(state->interceptconf.safelock)); return -1; } @@ -938,7 +1031,17 @@ static int respond_collector_auth(provision_state_t *state, state->interceptconf.voipintercepts, outgoing, state->interceptconf.leas) == -1) { logger(LOG_INFO, - "OpenLI: unable to queue VOIP IP intercepts to be sent to new collector on fd %d", + "OpenLI: unable to queue VOIP intercepts to be sent to new collector on fd %d", + pev->fd); + pthread_mutex_unlock(&(state->interceptconf.safelock)); + return -1; + } + + if (push_all_emailintercepts(state, + state->interceptconf.emailintercepts, outgoing, + state->interceptconf.leas) == -1) { + logger(LOG_INFO, + "OpenLI: unable to queue Email intercepts to be sent to new collector on fd %d", pev->fd); pthread_mutex_unlock(&(state->interceptconf.safelock)); return -1; diff --git a/src/provisioner/provisioner.h b/src/provisioner/provisioner.h index c2450f1c..41f3074d 100644 --- a/src/provisioner/provisioner.h +++ b/src/provisioner/provisioner.h @@ -193,10 +193,18 @@ typedef struct prov_intercept_conf { coreserver_t *gtpservers; /** The set of known SIP servers that will be provided to collectors */ coreserver_t *sipservers; + /** The set of known SMTP servers that will be provided to collectors */ + coreserver_t *smtpservers; + /** The set of known IMAP servers that will be provided to collectors */ + coreserver_t *imapservers; + /** The set of known POP3 servers that will be provided to collectors */ + coreserver_t *pop3servers; /** The set of VOIP intercepts that we are currently running */ voipintercept_t *voipintercepts; /** The set of IP intercepts that we are currently running */ ipintercept_t *ipintercepts; + /** The set of IP intercepts that we are currently running */ + emailintercept_t *emailintercepts; /** The set of LEAs that are potential intercept recipients */ prov_agency_t *leas; /** A map of LIIDs to their destination LEAs */ @@ -330,6 +338,8 @@ int emit_intercept_config(char *configfile, prov_intercept_conf_t *conf); /* Implemented in clientupdates.c */ int compare_sip_targets(provision_state_t *currstate, voipintercept_t *existing, voipintercept_t *reload); +int compare_email_targets(provision_state_t *currstate, + emailintercept_t *existing, emailintercept_t *reload); int announce_default_radius_username(provision_state_t *state, default_radius_user_t *raduser); int withdraw_default_radius_username(provision_state_t *state, @@ -355,6 +365,12 @@ int announce_liidmapping_to_mediators(provision_state_t *state, liid_hash_t *liidmap); int announce_coreserver_change(provision_state_t *state, coreserver_t *cs, uint8_t isnew); +int announce_email_target_change(provision_state_t *state, + email_target_t *tgt, emailintercept_t *mailint, uint8_t isnew); +int announce_all_email_targets(provision_state_t *state, + emailintercept_t *mailint); +int remove_all_email_targets(provision_state_t *state, + emailintercept_t *mailint); int announce_sip_target_change(provision_state_t *state, openli_sip_identity_t *sipid, voipintercept_t *vint, uint8_t isnew); int announce_all_sip_targets(provision_state_t *state, voipintercept_t *vint); @@ -364,7 +380,7 @@ int announce_single_intercept(provision_state_t *state, liid_hash_t *add_liid_mapping(prov_intercept_conf_t *conf, char *liid, char *agency); int announce_hi1_notification_to_mediators(provision_state_t *state, - intercept_common_t *intcomm, hi1_notify_t not_type); + intercept_common_t *intcomm, char *target_id, hi1_notify_t not_type); /* Implemented in hup_reload.c */ int reload_provisioner_config(provision_state_t *state); diff --git a/src/provisioner/updateserver.c b/src/provisioner/updateserver.c index 1539cc0f..7ca1759b 100644 --- a/src/provisioner/updateserver.c +++ b/src/provisioner/updateserver.c @@ -50,6 +50,25 @@ #define OPAQUE_TOKEN "a7844291bd990a17bfe389e1ccb0981ed6d187a" +const char *update_success_page = + "OpenLI provisioner configuration was successfully updated.\n"; + +const char *update_failure_page_start = + "

OpenLI provisioner configuration failed."; +const char *update_failure_page_end = "\n"; + +const char *get_not_implemented = + "OpenLI provisioner does not support fetching intercept config (yet).\n"; + +const char *auth_failed = + "Authentication failed\n"; + +const char *unsupported_operation = + "OpenLI provisioner does not support that type of request.\n"; + +const char *get404 = + "OpenLI provisioner was unable to find the requested resource in its running intercept configuration.\n"; + int init_restauth_db(provision_state_t *state) { #ifdef HAVE_SQLCIPHER int rc; @@ -251,12 +270,27 @@ static int update_configuration_delete(update_con_info_t *cinfo, ret = remove_coreserver(cinfo, state, target, OPENLI_CORE_SERVER_GTP); break; + case TARGET_SMTPSERVER: + ret = remove_coreserver(cinfo, state, target, + OPENLI_CORE_SERVER_SMTP); + break; + case TARGET_IMAPSERVER: + ret = remove_coreserver(cinfo, state, target, + OPENLI_CORE_SERVER_IMAP); + break; + case TARGET_POP3SERVER: + ret = remove_coreserver(cinfo, state, target, + OPENLI_CORE_SERVER_POP3); + break; case TARGET_IPINTERCEPT: ret = remove_ip_intercept(cinfo, state, target); break; case TARGET_VOIPINTERCEPT: ret = remove_voip_intercept(cinfo, state, target); break; + case TARGET_EMAILINTERCEPT: + ret = remove_email_intercept(cinfo, state, target); + break; case TARGET_DEFAULTRADIUS: ret = remove_defaultradius(cinfo, state, target); break; @@ -307,12 +341,24 @@ static json_object *create_get_response(update_con_info_t *cinfo, case TARGET_GTPSERVER: jobj = get_coreservers(cinfo, state, OPENLI_CORE_SERVER_GTP); break; + case TARGET_SMTPSERVER: + jobj = get_coreservers(cinfo, state, OPENLI_CORE_SERVER_SMTP); + break; + case TARGET_IMAPSERVER: + jobj = get_coreservers(cinfo, state, OPENLI_CORE_SERVER_IMAP); + break; + case TARGET_POP3SERVER: + jobj = get_coreservers(cinfo, state, OPENLI_CORE_SERVER_POP3); + break; case TARGET_IPINTERCEPT: jobj = get_ip_intercept(cinfo, state, tgtptr); break; case TARGET_VOIPINTERCEPT: jobj = get_voip_intercept(cinfo, state, tgtptr); break; + case TARGET_EMAILINTERCEPT: + jobj = get_email_intercept(cinfo, state, tgtptr); + break; } @@ -360,6 +406,15 @@ static int update_configuration_post(update_con_info_t *cinfo, case TARGET_GTPSERVER: ret = add_new_coreserver(cinfo, state, OPENLI_CORE_SERVER_GTP); break; + case TARGET_SMTPSERVER: + ret = add_new_coreserver(cinfo, state, OPENLI_CORE_SERVER_SMTP); + break; + case TARGET_IMAPSERVER: + ret = add_new_coreserver(cinfo, state, OPENLI_CORE_SERVER_IMAP); + break; + case TARGET_POP3SERVER: + ret = add_new_coreserver(cinfo, state, OPENLI_CORE_SERVER_POP3); + break; case TARGET_IPINTERCEPT: if (strcmp(method, "POST") == 0) { ret = add_new_ipintercept(cinfo, state); @@ -374,6 +429,13 @@ static int update_configuration_post(update_con_info_t *cinfo, ret = modify_voipintercept(cinfo, state); } break; + case TARGET_EMAILINTERCEPT: + if (strcmp(method, "POST") == 0) { + ret = add_new_emailintercept(cinfo, state); + } else { + ret = modify_emailintercept(cinfo, state); + } + break; } @@ -552,14 +614,14 @@ static int authenticate_request(provision_state_t *provstate, return MHD_YES; } -int handle_update_request(void *cls, struct MHD_Connection *conn, +MHD_RESULT handle_update_request(void *cls, struct MHD_Connection *conn, const char *url, const char *method, const char *version, const char *upload_data, size_t *upload_data_size, void **con_cls) { update_con_info_t *cinfo; provision_state_t *provstate = (provision_state_t *)cls; - int ret; + MHD_RESULT ret; const char *realm = "provisioner@openli.nz"; if (*con_cls == NULL) { @@ -586,10 +648,18 @@ int handle_update_request(void *cls, struct MHD_Connection *conn, cinfo->target = TARGET_RADIUSSERVER; } else if (strncmp(url, "/gtpserver", 10) == 0) { cinfo->target = TARGET_GTPSERVER; + } else if (strncmp(url, "/smtpserver", 11) == 0) { + cinfo->target = TARGET_SMTPSERVER; + } else if (strncmp(url, "/imapserver", 11) == 0) { + cinfo->target = TARGET_IMAPSERVER; + } else if (strncmp(url, "/pop3server", 11) == 0) { + cinfo->target = TARGET_POP3SERVER; } else if (strncmp(url, "/ipintercept", 12) == 0) { cinfo->target = TARGET_IPINTERCEPT; } else if (strncmp(url, "/voipintercept", 14) == 0) { cinfo->target = TARGET_VOIPINTERCEPT; + } else if (strncmp(url, "/emailintercept", 15) == 0) { + cinfo->target = TARGET_EMAILINTERCEPT; } else if (strncmp(url, "/defaultradius", 14) == 0) { cinfo->target = TARGET_DEFAULTRADIUS; } else { diff --git a/src/provisioner/updateserver.h b/src/provisioner/updateserver.h index 8d830b4f..1d737faa 100644 --- a/src/provisioner/updateserver.h +++ b/src/provisioner/updateserver.h @@ -31,6 +31,12 @@ #include #include "provisioner.h" +#if MHD_VERSION < 0x0097002 +#define MHD_RESULT int +#else +#define MHD_RESULT enum MHD_Result +#endif + typedef struct con_info { int connectiontype; int answercode; @@ -51,28 +57,22 @@ enum { TARGET_VOIPINTERCEPT, TARGET_GTPSERVER, TARGET_DEFAULTRADIUS, + TARGET_EMAILINTERCEPT, + TARGET_SMTPSERVER, + TARGET_IMAPSERVER, + TARGET_POP3SERVER, }; -static const char *update_success_page = - "OpenLI provisioner configuration was successfully updated.\n"; - -static const char *update_failure_page_start = - "

OpenLI provisioner configuration failed."; -static const char *update_failure_page_end = "\n"; - -static const char *get_not_implemented = - "OpenLI provisioner does not support fetching intercept config (yet).\n"; +extern const char *update_success_page; +extern const char *update_failure_page_start; +extern const char *update_failure_page_end; -static const char *auth_failed = - "Authentication failed\n"; +extern const char *get_not_implemented; +extern const char *auth_failed; +extern const char *unsupported_operation; +extern const char *get404; -static const char *unsupported_operation = - "OpenLI provisioner does not support that type of request.\n"; - -static const char *get404 = - "OpenLI provisioner was unable to find the requested resource in its running intercept configuration.\n"; - -int handle_update_request(void *cls, struct MHD_Connection *conn, +MHD_RESULT handle_update_request(void *cls, struct MHD_Connection *conn, const char *url, const char *method, const char *version, const char *upload_data, size_t *upload_data_size, void **con_cls); @@ -94,17 +94,21 @@ int remove_ip_intercept(update_con_info_t *cinfo, provision_state_t *state, const char *idstr); int remove_voip_intercept(update_con_info_t *cinfo, provision_state_t *state, const char *idstr); +int remove_email_intercept(update_con_info_t *cinfo, provision_state_t *state, + const char *idstr); int add_new_agency(update_con_info_t *cinfo, provision_state_t *state); int add_new_defaultradius(update_con_info_t *cinfo, provision_state_t *state); int add_new_voipintercept(update_con_info_t *cinfo, provision_state_t *state); int add_new_ipintercept(update_con_info_t *cinfo, provision_state_t *state); +int add_new_emailintercept(update_con_info_t *cinfo, provision_state_t *state); int add_new_coreserver(update_con_info_t *cinfo, provision_state_t *state, uint8_t srvtype); int modify_agency(update_con_info_t *cinfo, provision_state_t *state); int modify_ipintercept(update_con_info_t *cinfo, provision_state_t *state); int modify_voipintercept(update_con_info_t *cinfo, provision_state_t *state); +int modify_emailintercept(update_con_info_t *cinfo, provision_state_t *state); struct json_object *get_agency(update_con_info_t *cinfo, provision_state_t *state, char *target); @@ -116,5 +120,7 @@ struct json_object *get_voip_intercept(update_con_info_t *cinfo, provision_state_t *state, char *target); struct json_object *get_ip_intercept(update_con_info_t *cinfo, provision_state_t *state, char *target); +struct json_object *get_email_intercept(update_con_info_t *cinfo, + provision_state_t *state, char *target); #endif // vim: set sw=4 tabstop=4 softtabstop=4 expandtab : diff --git a/src/provisioner/updateserver_jsoncreation.c b/src/provisioner/updateserver_jsoncreation.c index 197db5e6..776f47a9 100644 --- a/src/provisioner/updateserver_jsoncreation.c +++ b/src/provisioner/updateserver_jsoncreation.c @@ -70,7 +70,7 @@ static json_object *convert_ipintercept_to_json(ipintercept_t *ipint) { json_object *jobj; json_object *liid, *authcc, *delivcc, *agencyid, *mediator; json_object *vendmirrorid, *user, *accesstype, *radiusident; - json_object *staticips, *starttime, *endtime; + json_object *staticips, *starttime, *endtime, *tomediate; jobj = json_object_new_object(); @@ -79,6 +79,7 @@ static json_object *convert_ipintercept_to_json(ipintercept_t *ipint) { delivcc = json_object_new_string(ipint->common.delivcc); agencyid = json_object_new_string(ipint->common.targetagency); mediator = json_object_new_int(ipint->common.destid); + tomediate = json_object_new_int(ipint->common.tomediate); user = json_object_new_string(ipint->username); accesstype = json_object_new_string( get_access_type_string(ipint->accesstype)); @@ -90,6 +91,7 @@ static json_object *convert_ipintercept_to_json(ipintercept_t *ipint) { json_object_object_add(jobj, "delivcc", delivcc); json_object_object_add(jobj, "agencyid", agencyid); json_object_object_add(jobj, "mediator", mediator); + json_object_object_add(jobj, "outputhandovers", tomediate); json_object_object_add(jobj, "user", user); json_object_object_add(jobj, "accesstype", accesstype); json_object_object_add(jobj, "radiusident", radiusident); @@ -133,10 +135,57 @@ static json_object *convert_ipintercept_to_json(ipintercept_t *ipint) { return jobj; } +static json_object *convert_emailintercept_to_json(emailintercept_t *mailint) { + json_object *jobj; + json_object *liid, *authcc, *delivcc, *agencyid, *mediator; + json_object *targets, *starttime, *endtime, *tomediate; + email_target_t *tgt, *tmp; + + jobj = json_object_new_object(); + + liid = json_object_new_string(mailint->common.liid); + authcc = json_object_new_string(mailint->common.authcc); + delivcc = json_object_new_string(mailint->common.delivcc); + agencyid = json_object_new_string(mailint->common.targetagency); + mediator = json_object_new_int(mailint->common.destid); + tomediate = json_object_new_int(mailint->common.tomediate); + targets = json_object_new_array(); + + json_object_object_add(jobj, "liid", liid); + json_object_object_add(jobj, "authcc", authcc); + json_object_object_add(jobj, "delivcc", delivcc); + json_object_object_add(jobj, "agencyid", agencyid); + json_object_object_add(jobj, "mediator", mediator); + json_object_object_add(jobj, "outputhandovers", tomediate); + + if (mailint->common.tostart_time != 0) { + starttime = json_object_new_int(mailint->common.tostart_time); + json_object_object_add(jobj, "starttime", starttime); + } + + if (mailint->common.toend_time != 0) { + endtime = json_object_new_int(mailint->common.toend_time); + json_object_object_add(jobj, "endtime", endtime); + } + + HASH_ITER(hh, mailint->targets, tgt, tmp) { + json_object *jsontgt, *address; + + jsontgt = json_object_new_object(); + address = json_object_new_string(tgt->address); + json_object_object_add(jsontgt, "address", address); + + json_object_array_add(targets, jsontgt); + } + + json_object_object_add(jobj, "targets", targets); + return jobj; +} + static json_object *convert_voipintercept_to_json(voipintercept_t *vint) { json_object *jobj; json_object *liid, *authcc, *delivcc, *agencyid, *mediator; - json_object *siptargets, *starttime, *endtime; + json_object *siptargets, *starttime, *endtime, *tomediate; libtrace_list_node_t *n; jobj = json_object_new_object(); @@ -146,6 +195,7 @@ static json_object *convert_voipintercept_to_json(voipintercept_t *vint) { delivcc = json_object_new_string(vint->common.delivcc); agencyid = json_object_new_string(vint->common.targetagency); mediator = json_object_new_int(vint->common.destid); + tomediate = json_object_new_int(vint->common.tomediate); siptargets = json_object_new_array(); json_object_object_add(jobj, "liid", liid); @@ -153,6 +203,7 @@ static json_object *convert_voipintercept_to_json(voipintercept_t *vint) { json_object_object_add(jobj, "delivcc", delivcc); json_object_object_add(jobj, "agencyid", agencyid); json_object_object_add(jobj, "mediator", mediator); + json_object_object_add(jobj, "outputhandovers", tomediate); if (vint->common.tostart_time != 0) { starttime = json_object_new_int(vint->common.tostart_time); @@ -230,6 +281,15 @@ json_object *get_coreservers(update_con_info_t *cinfo, provision_state_t *state, case OPENLI_CORE_SERVER_SIP: toiter = state->interceptconf.sipservers; break; + case OPENLI_CORE_SERVER_SMTP: + toiter = state->interceptconf.smtpservers; + break; + case OPENLI_CORE_SERVER_IMAP: + toiter = state->interceptconf.imapservers; + break; + case OPENLI_CORE_SERVER_POP3: + toiter = state->interceptconf.pop3servers; + break; case OPENLI_CORE_SERVER_RADIUS: toiter = state->interceptconf.radiusservers; break; @@ -304,6 +364,32 @@ json_object *get_voip_intercept(update_con_info_t *cinfo, return jarray; } +json_object *get_email_intercept(update_con_info_t *cinfo, + provision_state_t *state, char *target) { + + emailintercept_t *mailint, *tmp; + json_object *jarray, *jobj; + + if (target) { + HASH_FIND(hh_liid, state->interceptconf.emailintercepts, target, + strlen(target), mailint); + if (!mailint) { + return NULL; + } + + jobj = convert_emailintercept_to_json(mailint); + return jobj; + } + + jarray = json_object_new_array(); + HASH_ITER(hh_liid, state->interceptconf.emailintercepts, mailint, tmp) { + jobj = convert_emailintercept_to_json(mailint); + json_object_array_add(jarray, jobj); + } + + return jarray; +} + json_object *get_ip_intercept(update_con_info_t *cinfo, provision_state_t *state, char *target) { diff --git a/src/provisioner/updateserver_jsonparsing.c b/src/provisioner/updateserver_jsonparsing.c index 76e692b3..8e9eebf8 100644 --- a/src/provisioner/updateserver_jsonparsing.c +++ b/src/provisioner/updateserver_jsonparsing.c @@ -57,6 +57,8 @@ struct json_intercept { struct json_object *endtime; struct json_object *staticips; struct json_object *siptargets; + struct json_object *emailtargets; + struct json_object *tomediate; }; #define EXTRACT_JSON_INT_PARAM(name, uptype, jsonobj, dest, errflag, force) \ @@ -181,9 +183,11 @@ static inline void extract_intercept_json_objects( json_object_object_get_ex(parsed, "radiusident", &(ipjson->radiusident)); json_object_object_get_ex(parsed, "starttime", &(ipjson->starttime)); json_object_object_get_ex(parsed, "endtime", &(ipjson->endtime)); + json_object_object_get_ex(parsed, "outputhandovers", &(ipjson->tomediate)); json_object_object_get_ex(parsed, "vendmirrorid", &(ipjson->vendmirrorid)); json_object_object_get_ex(parsed, "staticips", &(ipjson->staticips)); json_object_object_get_ex(parsed, "siptargets", &(ipjson->siptargets)); + json_object_object_get_ex(parsed, "targets", &(ipjson->emailtargets)); } static inline int compare_intercept_times(intercept_common_t *latest, @@ -219,6 +223,7 @@ int remove_voip_intercept(update_con_info_t *cinfo, provision_state_t *state, const char *idstr) { voipintercept_t *found; + char *target_info; HASH_FIND(hh_liid, state->interceptconf.voipintercepts, idstr, strlen(idstr), found); @@ -229,9 +234,13 @@ int remove_voip_intercept(update_con_info_t *cinfo, provision_state_t *state, OPENLI_PROTO_HALT_VOIPINTERCEPT); remove_liid_mapping(state, found->common.liid, found->common.liid_len, 0); + target_info = list_sip_targets(found, 256); announce_hi1_notification_to_mediators(state, &(found->common), - HI1_LI_DEACTIVATED); + target_info, HI1_LI_DEACTIVATED); free_single_voipintercept(found); + if (target_info) { + free(target_info); + } logger(LOG_INFO, "OpenLI: removed VOIP intercept '%s' via update socket.", idstr); @@ -240,6 +249,36 @@ int remove_voip_intercept(update_con_info_t *cinfo, provision_state_t *state, return 0; } +int remove_email_intercept(update_con_info_t *cinfo, provision_state_t *state, + const char *idstr) { + + emailintercept_t *found; + char *target_info; + + HASH_FIND(hh_liid, state->interceptconf.emailintercepts, idstr, + strlen(idstr), found); + + if (found) { + HASH_DELETE(hh_liid, state->interceptconf.emailintercepts, found); + halt_existing_intercept(state, (void *)found, + OPENLI_PROTO_HALT_EMAILINTERCEPT); + remove_liid_mapping(state, found->common.liid, found->common.liid_len, + 0); + target_info = list_email_targets(found, 256); + announce_hi1_notification_to_mediators(state, &(found->common), + target_info, HI1_LI_DEACTIVATED); + free_single_emailintercept(found); + if (target_info) { + free(target_info); + } + logger(LOG_INFO, + "OpenLI: removed Email intercept '%s' via update socket.", + idstr); + return 1; + } + return 0; +} + int remove_ip_intercept(update_con_info_t *cinfo, provision_state_t *state, const char *idstr) { @@ -255,7 +294,7 @@ int remove_ip_intercept(update_con_info_t *cinfo, provision_state_t *state, remove_liid_mapping(state, found->common.liid, found->common.liid_len, 0); announce_hi1_notification_to_mediators(state, &(found->common), - HI1_LI_DEACTIVATED); + found->username, HI1_LI_DEACTIVATED); free_single_ipintercept(found); logger(LOG_INFO, "OpenLI: removed IP intercept '%s' via update socket.", @@ -308,19 +347,34 @@ int remove_defaultradius(update_con_info_t *cinfo, provision_state_t *state, int remove_coreserver(update_con_info_t *cinfo, provision_state_t *state, const char *idstr, uint8_t srvtype) { + char search[1024]; coreserver_t *found = NULL; coreserver_t **src; + snprintf(search, 1024, "%s-%s", idstr, coreserver_type_to_string(srvtype)); + if (srvtype == OPENLI_CORE_SERVER_SIP) { - HASH_FIND(hh, state->interceptconf.sipservers, idstr, strlen(idstr), + HASH_FIND(hh, state->interceptconf.sipservers, search, strlen(search), found); src = &(state->interceptconf.sipservers); } else if (srvtype == OPENLI_CORE_SERVER_RADIUS) { - HASH_FIND(hh, state->interceptconf.radiusservers, idstr, strlen(idstr), - found); + HASH_FIND(hh, state->interceptconf.radiusservers, search, + strlen(search), found); src = &(state->interceptconf.radiusservers); + } else if (srvtype == OPENLI_CORE_SERVER_SMTP) { + HASH_FIND(hh, state->interceptconf.smtpservers, search, strlen(search), + found); + src = &(state->interceptconf.smtpservers); + } else if (srvtype == OPENLI_CORE_SERVER_IMAP) { + HASH_FIND(hh, state->interceptconf.imapservers, search, strlen(search), + found); + src = &(state->interceptconf.imapservers); + } else if (srvtype == OPENLI_CORE_SERVER_POP3) { + HASH_FIND(hh, state->interceptconf.pop3servers, search, strlen(search), + found); + src = &(state->interceptconf.pop3servers); } else if (srvtype == OPENLI_CORE_SERVER_GTP) { - HASH_FIND(hh, state->interceptconf.gtpservers, idstr, strlen(idstr), + HASH_FIND(hh, state->interceptconf.gtpservers, search, strlen(search), found); src = &(state->interceptconf.gtpservers); } @@ -329,9 +383,12 @@ int remove_coreserver(update_con_info_t *cinfo, provision_state_t *state, HASH_DEL(*src, found); announce_coreserver_change(state, found, false); free_single_coreserver(found); - logger(LOG_INFO, "OpenLI: removed %s server via update socket.", - coreserver_type_to_string(srvtype)); + logger(LOG_INFO, "OpenLI: removed %s server %s via update socket.", + coreserver_type_to_string(srvtype), idstr); return 1; + } else { + logger(LOG_INFO, "OpenLI: unable to remove %s server %s via update socket.", + coreserver_type_to_string(srvtype), idstr); } return 0; @@ -471,6 +528,15 @@ int add_new_coreserver(update_con_info_t *cinfo, provision_state_t *state, } else if (srvtype == OPENLI_CORE_SERVER_RADIUS) { HASH_FIND(hh, state->interceptconf.radiusservers, new_cs->serverkey, strlen(new_cs->serverkey), found); + } else if (srvtype == OPENLI_CORE_SERVER_SMTP) { + HASH_FIND(hh, state->interceptconf.smtpservers, new_cs->serverkey, + strlen(new_cs->serverkey), found); + } else if (srvtype == OPENLI_CORE_SERVER_IMAP) { + HASH_FIND(hh, state->interceptconf.imapservers, new_cs->serverkey, + strlen(new_cs->serverkey), found); + } else if (srvtype == OPENLI_CORE_SERVER_POP3) { + HASH_FIND(hh, state->interceptconf.pop3servers, new_cs->serverkey, + strlen(new_cs->serverkey), found); } else if (srvtype == OPENLI_CORE_SERVER_GTP) { HASH_FIND(hh, state->interceptconf.gtpservers, new_cs->serverkey, strlen(new_cs->serverkey), found); @@ -485,6 +551,15 @@ int add_new_coreserver(update_con_info_t *cinfo, provision_state_t *state, } else if (srvtype == OPENLI_CORE_SERVER_RADIUS) { HASH_ADD_KEYPTR(hh, state->interceptconf.radiusservers, new_cs->serverkey, strlen(new_cs->serverkey), new_cs); + } else if (srvtype == OPENLI_CORE_SERVER_SMTP) { + HASH_ADD_KEYPTR(hh, state->interceptconf.smtpservers, + new_cs->serverkey, strlen(new_cs->serverkey), new_cs); + } else if (srvtype == OPENLI_CORE_SERVER_IMAP) { + HASH_ADD_KEYPTR(hh, state->interceptconf.imapservers, + new_cs->serverkey, strlen(new_cs->serverkey), new_cs); + } else if (srvtype == OPENLI_CORE_SERVER_POP3) { + HASH_ADD_KEYPTR(hh, state->interceptconf.pop3servers, + new_cs->serverkey, strlen(new_cs->serverkey), new_cs); } else if (srvtype == OPENLI_CORE_SERVER_GTP) { HASH_ADD_KEYPTR(hh, state->interceptconf.gtpservers, new_cs->serverkey, strlen(new_cs->serverkey), new_cs); @@ -516,6 +591,67 @@ int add_new_coreserver(update_con_info_t *cinfo, provision_state_t *state, } +int parse_emailintercept_targets(provision_state_t *state, + emailintercept_t *mailint, struct json_object *jsontargets, + update_con_info_t *cinfo) { + + email_target_t *newtgt, *found; + struct json_object *jobj; + struct json_object *address; + int parseerr = 0, i, tgtcnt; + + newtgt = NULL; + tgtcnt = 0; + + if (json_object_get_type(jsontargets) != json_type_array) { + logger(LOG_INFO, "OpenLI update socket: 'targets' for an Email intercept must be expressed as a JSON array"); + snprintf(cinfo->answerstring, 4096, "%s

The 'targets' members for a Email intercept must be expressed as a JSON array. %s", + update_failure_page_start, update_failure_page_end); + goto targeterr; + } + + for (i = 0; i < json_object_array_length(jsontargets); i++) { + jobj = json_object_array_get_idx(jsontargets, i); + + json_object_object_get_ex(jobj, "address", &(address)); + + newtgt = (email_target_t *)calloc(1, sizeof(email_target_t)); + newtgt->awaitingconfirm = 1; + + EXTRACT_JSON_STRING_PARAM("address", "Email intercept target", + address, newtgt->address, &parseerr, true); + + if (parseerr) { + goto targeterr; + } + + HASH_FIND(hh, mailint->targets, newtgt->address, + strlen(newtgt->address), found); + + if (found) { + free(newtgt->address); + free(newtgt); + continue; + } + + tgtcnt ++; + HASH_ADD_KEYPTR(hh, mailint->targets, newtgt->address, + strlen(newtgt->address), newtgt); + } + + return tgtcnt; + +targeterr: + if (newtgt) { + if (newtgt->address) { + free(newtgt->address); + } + free(newtgt); + } + return -1; + +} + int parse_voipintercept_siptargets(provision_state_t *state, voipintercept_t *vint, struct json_object *jsontargets, update_con_info_t *cinfo) { @@ -699,6 +835,131 @@ static inline void new_intercept_liidmapping(provision_state_t *state, } } +int add_new_emailintercept(update_con_info_t *cinfo, provision_state_t *state) { + struct json_intercept emailjson; + struct json_tokener *tknr; + struct json_object *parsed = NULL; + emailintercept_t *found = NULL; + emailintercept_t *mailint = NULL; + int parseerr = 0, r; + char *target_info; + + INIT_JSON_INTERCEPT_PARSING + extract_intercept_json_objects(&emailjson, parsed); + + mailint = calloc(1, sizeof(emailintercept_t)); + /* XXX does internalid still matter? if not, let's remove it */ + mailint->awaitingconfirm = 1; + mailint->targets = NULL; + mailint->common.tostart_time = 0; + mailint->common.toend_time = 0; + + EXTRACT_JSON_STRING_PARAM("liid", "Email intercept", emailjson.liid, + mailint->common.liid, &parseerr, true); + EXTRACT_JSON_STRING_PARAM("authcc", "Email intercept", emailjson.authcc, + mailint->common.authcc, &parseerr, true); + EXTRACT_JSON_STRING_PARAM("delivcc", "Email intercept", emailjson.delivcc, + mailint->common.delivcc, &parseerr, true); + EXTRACT_JSON_STRING_PARAM("agencyid", "Email intercept", emailjson.agencyid, + mailint->common.targetagency, &parseerr, true); + EXTRACT_JSON_INT_PARAM("outputhandovers", "Email intercept", + emailjson.tomediate, + mailint->common.tomediate, &parseerr, false); + EXTRACT_JSON_INT_PARAM("mediator", "Email intercept", emailjson.mediator, + mailint->common.destid, &parseerr, true); + EXTRACT_JSON_INT_PARAM("starttime", "Email intercept", emailjson.starttime, + mailint->common.tostart_time, &parseerr, false); + EXTRACT_JSON_INT_PARAM("endtime", "Email intercept", emailjson.endtime, + mailint->common.toend_time, &parseerr, false); + + if (parseerr) { + goto cepterr; + } + + r = 0; + if (emailjson.emailtargets != NULL) { + if ((r = parse_emailintercept_targets(state, mailint, + emailjson.emailtargets, cinfo)) < 0) { + goto cepterr; + } + } + + if (r == 0) { + snprintf(cinfo->answerstring, 4096, + "%s

Email intercept %s has been specified without valid target addresses. %s", + update_failure_page_start, mailint->common.liid, + update_failure_page_end); + goto cepterr; + } + + mailint->common.liid_len = strlen(mailint->common.liid); + mailint->common.authcc_len = strlen(mailint->common.authcc); + mailint->common.delivcc_len = strlen(mailint->common.delivcc); + + HASH_FIND(hh_liid, state->interceptconf.emailintercepts, + mailint->common.liid, mailint->common.liid_len, found); + + if (found) { + snprintf(cinfo->answerstring, 4096, + "%s

LIID %s already exists as an Email intercept, please use PUT method if you wish to modify it. %s", + update_failure_page_start, + mailint->common.liid, + update_failure_page_end); + goto cepterr; + } + + HASH_ADD_KEYPTR(hh_liid, state->interceptconf.emailintercepts, + mailint->common.liid, mailint->common.liid_len, mailint); + + new_intercept_liidmapping(state, mailint->common.targetagency, + mailint->common.liid); + + if (announce_single_intercept(state, (void *)mailint, + push_emailintercept_onto_net_buffer) < 0) { + logger(LOG_INFO, + "OpenLI provisioner: unable to announce new Email intercept %s to collectors.", + mailint->common.liid); + } + + if (announce_all_email_targets(state, mailint) < 0) { + logger(LOG_INFO, + "OpenLI provisioner: unable to announce targets for new Email intercept %s to collectors.", + mailint->common.liid); + } + + target_info = list_email_targets(mailint, 256); + if (announce_hi1_notification_to_mediators(state, &(mailint->common), + target_info, HI1_LI_ACTIVATED) < 0) { + logger(LOG_INFO, + "OpenLI provisioner: unable to send HI1 notification for new Email intercept %s to mediators.", + mailint->common.liid); + } + if (target_info) { + free(target_info); + } + + mailint->awaitingconfirm = 0; + logger(LOG_INFO, + "OpenLI provisioner: added new Email intercept %s via update socket.", + mailint->common.liid); + + if (parsed) { + json_object_put(parsed); + } + json_tokener_free(tknr); + return 0; + +cepterr: + if (mailint) { + free_single_emailintercept(mailint); + } + if (parsed) { + json_object_put(parsed); + } + json_tokener_free(tknr); + return -1; +} + int add_new_voipintercept(update_con_info_t *cinfo, provision_state_t *state) { struct json_intercept voipjson; struct json_tokener *tknr; @@ -706,6 +967,7 @@ int add_new_voipintercept(update_con_info_t *cinfo, provision_state_t *state) { voipintercept_t *found = NULL; voipintercept_t *vint = NULL; int parseerr = 0, r; + char *target_info; INIT_JSON_INTERCEPT_PARSING extract_intercept_json_objects(&voipjson, parsed); @@ -732,6 +994,9 @@ int add_new_voipintercept(update_con_info_t *cinfo, provision_state_t *state) { vint->common.delivcc, &parseerr, true); EXTRACT_JSON_STRING_PARAM("agencyid", "VOIP intercept", voipjson.agencyid, vint->common.targetagency, &parseerr, true); + EXTRACT_JSON_INT_PARAM("outputhandovers", "VOIP intercept", + voipjson.tomediate, + vint->common.tomediate, &parseerr, false); EXTRACT_JSON_INT_PARAM("mediator", "VOIP intercept", voipjson.mediator, vint->common.destid, &parseerr, true); EXTRACT_JSON_INT_PARAM("starttime", "VOIP intercept", voipjson.starttime, @@ -794,12 +1059,16 @@ int add_new_voipintercept(update_con_info_t *cinfo, provision_state_t *state) { vint->common.liid); } + target_info = list_sip_targets(vint, 256); if (announce_hi1_notification_to_mediators(state, &(vint->common), - HI1_LI_ACTIVATED) < 0) { + target_info, HI1_LI_ACTIVATED) < 0) { logger(LOG_INFO, "OpenLI provisioner: unable to send HI1 notification for new VOIP intercept %s to mediators.", vint->common.liid); } + if (target_info) { + free(target_info); + } vint->awaitingconfirm = 0; logger(LOG_INFO, @@ -853,6 +1122,9 @@ int add_new_ipintercept(update_con_info_t *cinfo, provision_state_t *state) { ipint->common.delivcc, &parseerr, true); EXTRACT_JSON_STRING_PARAM("agencyid", "IP intercept", ipjson.agencyid, ipint->common.targetagency, &parseerr, true); + EXTRACT_JSON_INT_PARAM("outputhandovers", "IP intercept", + ipjson.tomediate, + ipint->common.tomediate, &parseerr, false); EXTRACT_JSON_INT_PARAM("mediator", "IP intercept", ipjson.mediator, ipint->common.destid, &parseerr, true); EXTRACT_JSON_INT_PARAM("vendmirrorid", "IP intercept", ipjson.vendmirrorid, @@ -925,7 +1197,7 @@ int add_new_ipintercept(update_con_info_t *cinfo, provision_state_t *state) { } if (announce_hi1_notification_to_mediators(state, &(ipint->common), - HI1_LI_ACTIVATED) < 0) { + ipint->username, HI1_LI_ACTIVATED) < 0) { logger(LOG_INFO, "OpenLI provisioner: unable to send HI1 notification for new VOIP intercept %s to mediators.", ipint->common.liid); @@ -959,6 +1231,153 @@ int add_new_ipintercept(update_con_info_t *cinfo, provision_state_t *state) { return -1; } +int modify_emailintercept(update_con_info_t *cinfo, provision_state_t *state) { + + struct json_intercept emailjson; + struct json_tokener *tknr; + struct json_object *parsed = NULL; + emailintercept_t *found = NULL; + emailintercept_t *mailint = NULL; + int changedtargets = 0; + email_target_t *tmp; + char *target_info; + + char *liidstr = NULL; + int parseerr = 0, changed = 0, agencychanged = 0, timechanged = 0; + + INIT_JSON_INTERCEPT_PARSING + extract_intercept_json_objects(&emailjson, parsed); + + EXTRACT_JSON_STRING_PARAM("liid", "Email intercept", emailjson.liid, + liidstr, &parseerr, true); + + if (parseerr) { + goto cepterr; + } + + HASH_FIND(hh_liid, state->interceptconf.emailintercepts, liidstr, + strlen(liidstr), found); + + if (!found) { + json_object_put(parsed); + json_tokener_free(tknr); + if (liidstr) { + free(liidstr); + } + return add_new_emailintercept(cinfo, state); + } + + mailint = calloc(1, sizeof(emailintercept_t)); + mailint->awaitingconfirm = 1; + mailint->common.liid = liidstr; + mailint->targets = NULL; + mailint->common.tostart_time = (uint64_t)-1; + mailint->common.toend_time = (uint64_t)-1; + + EXTRACT_JSON_STRING_PARAM("authcc", "Email intercept", emailjson.authcc, + mailint->common.authcc, &parseerr, false); + EXTRACT_JSON_STRING_PARAM("delivcc", "Email intercept", emailjson.delivcc, + mailint->common.delivcc, &parseerr, false); + EXTRACT_JSON_STRING_PARAM("agencyid", "Email intercept", emailjson.agencyid, + mailint->common.targetagency, &parseerr, false); + EXTRACT_JSON_INT_PARAM("outputhandovers", "Email intercept", + emailjson.tomediate, + mailint->common.tomediate, &parseerr, false); + EXTRACT_JSON_INT_PARAM("mediator", "Email intercept", emailjson.mediator, + mailint->common.destid, &parseerr, false); + EXTRACT_JSON_INT_PARAM("starttime", "Email intercept", emailjson.starttime, + mailint->common.tostart_time, &parseerr, false); + EXTRACT_JSON_INT_PARAM("endtime", "Email intercept", emailjson.endtime, + mailint->common.toend_time, &parseerr, false); + + if (parseerr) { + goto cepterr; + } + + if (emailjson.emailtargets != NULL) { + + if (parse_emailintercept_targets(state, mailint, emailjson.emailtargets, + cinfo) < 0) { + goto cepterr; + } + + if ((changedtargets = compare_email_targets(state, found, + mailint)) < 0) { + goto cepterr; + } + } + + if (changedtargets) { + tmp = found->targets; + found->targets = mailint->targets; + mailint->targets = tmp; + } + + /* TODO: warn if user tries to change fields that we don't support + * changing (e.g. mediator) ? + * + */ + + MODIFY_STRING_MEMBER(mailint->common.authcc, found->common.authcc, &changed); + found->common.authcc_len = strlen(found->common.authcc); + + MODIFY_STRING_MEMBER(mailint->common.delivcc, found->common.delivcc, &changed); + found->common.delivcc_len = strlen(found->common.delivcc); + + MODIFY_STRING_MEMBER(mailint->common.targetagency, found->common.targetagency, + &agencychanged); + + timechanged = compare_intercept_times(&(mailint->common), &(found->common)); + + if (mailint->common.tomediate != found->common.tomediate) { + changed = 1; + found->common.tomediate = mailint->common.tomediate; + } + + if (agencychanged) { + new_intercept_liidmapping(state, found->common.targetagency, + found->common.liid); + } + + if (changed || timechanged) { + modify_existing_intercept_options(state, (void *)found, + OPENLI_PROTO_MODIFY_EMAILINTERCEPT); + } + + if (changedtargets) { + target_info = list_email_targets(found, 256); + announce_hi1_notification_to_mediators(state, &(found->common), + target_info, HI1_LI_MODIFIED); + if (target_info) { + free(target_info); + } + } + + mailint->common.hi1_seqno = found->common.hi1_seqno; + logger(LOG_INFO, + "OpenLI provisioner: updated Email intercept %s via update socket.", + found->common.liid); + + if (mailint) { + free_single_emailintercept(mailint); + } + if (parsed) { + json_object_put(parsed); + } + json_tokener_free(tknr); + return 0; + +cepterr: + if (mailint) { + free_single_emailintercept(mailint); + } + if (parsed) { + json_object_put(parsed); + } + json_tokener_free(tknr); + return -1; +} + int modify_voipintercept(update_con_info_t *cinfo, provision_state_t *state) { struct json_intercept voipjson; @@ -969,7 +1388,7 @@ int modify_voipintercept(update_con_info_t *cinfo, provision_state_t *state) { int changedtargets = 0; libtrace_list_t *tmp; - char *liidstr = NULL; + char *liidstr = NULL, *target_info; int parseerr = 0, changed = 0, agencychanged = 0, timechanged = 0; INIT_JSON_INTERCEPT_PARSING @@ -1007,11 +1426,14 @@ int modify_voipintercept(update_con_info_t *cinfo, provision_state_t *state) { vint->common.delivcc, &parseerr, false); EXTRACT_JSON_STRING_PARAM("agencyid", "VOIP intercept", voipjson.agencyid, vint->common.targetagency, &parseerr, false); + EXTRACT_JSON_INT_PARAM("outputhandovers", "VOIP intercept", + voipjson.tomediate, + vint->common.tomediate, &parseerr, false); EXTRACT_JSON_INT_PARAM("mediator", "VOIP intercept", voipjson.mediator, vint->common.destid, &parseerr, false); EXTRACT_JSON_INT_PARAM("starttime", "VOIP intercept", voipjson.starttime, vint->common.tostart_time, &parseerr, false); - EXTRACT_JSON_INT_PARAM("mediator", "VOIP intercept", voipjson.endtime, + EXTRACT_JSON_INT_PARAM("endtime", "VOIP intercept", voipjson.endtime, vint->common.toend_time, &parseerr, false); if (parseerr) { @@ -1050,6 +1472,11 @@ int modify_voipintercept(update_con_info_t *cinfo, provision_state_t *state) { MODIFY_STRING_MEMBER(vint->common.targetagency, found->common.targetagency, &agencychanged); + if (vint->common.tomediate != found->common.tomediate) { + changed = 1; + found->common.tomediate = vint->common.tomediate; + } + timechanged = compare_intercept_times(&(vint->common), &(found->common)); if (agencychanged) { @@ -1063,8 +1490,12 @@ int modify_voipintercept(update_con_info_t *cinfo, provision_state_t *state) { } if (changedtargets) { + target_info = list_sip_targets(found, 256); announce_hi1_notification_to_mediators(state, &(found->common), - HI1_LI_MODIFIED); + target_info, HI1_LI_MODIFIED); + if (target_info) { + free(target_info); + } } vint->common.hi1_seqno = found->common.hi1_seqno; @@ -1141,6 +1572,9 @@ int modify_ipintercept(update_con_info_t *cinfo, provision_state_t *state) { ipint->common.delivcc, &parseerr, false); EXTRACT_JSON_STRING_PARAM("agencyid", "IP intercept", ipjson.agencyid, ipint->common.targetagency, &parseerr, false); + EXTRACT_JSON_INT_PARAM("outputhandovers", "IP intercept", + ipjson.tomediate, + ipint->common.tomediate, &parseerr, false); EXTRACT_JSON_INT_PARAM("mediator", "IP intercept", ipjson.mediator, ipint->common.destid, &parseerr, false); EXTRACT_JSON_INT_PARAM("vendmirrorid", "IP intercept", ipjson.vendmirrorid, @@ -1225,6 +1659,11 @@ int modify_ipintercept(update_con_info_t *cinfo, provision_state_t *state) { &changed); found->common.delivcc_len = strlen(found->common.delivcc); + if (ipint->common.tomediate != found->common.tomediate) { + found->common.tomediate = ipint->common.tomediate; + changed = 1; + } + MODIFY_STRING_MEMBER(ipint->username, found->username, &changed); found->username_len = strlen(found->username); @@ -1262,7 +1701,7 @@ int modify_ipintercept(update_con_info_t *cinfo, provision_state_t *state) { if (agencychanged) { announce_hi1_notification_to_mediators(state, &(found->common), - HI1_LI_ACTIVATED); + found->username, HI1_LI_ACTIVATED); } if (compare_intercept_times(&(ipint->common), &(found->common)) == 1) { @@ -1274,7 +1713,7 @@ int modify_ipintercept(update_con_info_t *cinfo, provision_state_t *state) { OPENLI_PROTO_MODIFY_IPINTERCEPT); if (!agencychanged) { announce_hi1_notification_to_mediators(state, &(found->common), - HI1_LI_MODIFIED); + found->username, HI1_LI_MODIFIED); } } @@ -1315,6 +1754,7 @@ int add_new_agency(update_con_info_t *cinfo, provision_state_t *state) { struct json_agency agjson; const char *idstr; + const char *verb; struct json_object *parsed = NULL; struct json_tokener *tknr; liagency_t *nag = NULL; @@ -1355,17 +1795,18 @@ int add_new_agency(update_con_info_t *cinfo, provision_state_t *state) { strlen(nag->agencyid), found); if (found) { HASH_DEL(state->interceptconf.leas, found); - withdraw_agency_from_mediators(state, found); free_liagency(found->ag); free(found); + verb = "modified"; + } else { + verb = "added new"; } - HASH_ADD_KEYPTR(hh, state->interceptconf.leas, nag->agencyid, strlen(nag->agencyid), lea); announce_lea_to_mediators(state, lea); - logger(LOG_INFO, "OpenLI: added new agency '%s' via update socket.", - nag->agencyid); + logger(LOG_INFO, "OpenLI: %s agency '%s' via update socket.", + verb, nag->agencyid); if (parsed) { json_object_put(parsed); @@ -1466,7 +1907,6 @@ int modify_agency(update_con_info_t *cinfo, provision_state_t *state) { } if (changed) { - withdraw_agency_from_mediators(state, found); announce_lea_to_mediators(state, found); logger(LOG_INFO, "OpenLI: modified existing agency '%s' via update socket.", diff --git a/src/util.c b/src/util.c index fca2fc6e..7c37b397 100644 --- a/src/util.c +++ b/src/util.c @@ -355,6 +355,29 @@ int epoll_add_timer(int epoll_fd, uint32_t secs, void *ptr) { return timerfd; } +int epoll_add_ms_timer(int epoll_fd, uint32_t msecs, void *ptr) { + int timerfd; + struct epoll_event ev; + struct itimerspec its; + + ev.data.ptr = ptr; + ev.events = EPOLLIN; + + its.it_interval.tv_sec = 0; + its.it_interval.tv_nsec = 0; + its.it_value.tv_sec = msecs / 1000; + its.it_value.tv_nsec = (msecs % 1000) * 1000000; + + timerfd = timerfd_create(CLOCK_MONOTONIC, 0); + timerfd_settime(timerfd, 0, &its, NULL); + + if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, timerfd, &ev) == -1) { + return -1; + } + + return timerfd; +} + int extract_ip_addresses(libtrace_packet_t *pkt, uint8_t *srcip, uint8_t *destip, int *ipfamily) { diff --git a/src/util.h b/src/util.h index d95fcd94..50975d35 100644 --- a/src/util.h +++ b/src/util.h @@ -39,6 +39,7 @@ int connect_socket(char *ipstr, char *portstr, uint8_t isretry, uint8_t setkeepalive); int epoll_add_timer(int epoll_fd, uint32_t secs, void *ptr); +int epoll_add_ms_timer(int epoll_fd, uint32_t msecs, void *ptr); int create_listener(char *addr, char *port, char *name); char *sockaddr_to_string(struct sockaddr *sa, char *str, int len); uint8_t *sockaddr_to_key(struct sockaddr *sa, int *socklen); diff --git a/systemd/openli-collector.service b/systemd/openli-collector.service index 76d977f6..579efd7a 100644 --- a/systemd/openli-collector.service +++ b/systemd/openli-collector.service @@ -1,6 +1,6 @@ [Unit] Description=OpenLI collector daemon -Documentation=http://github.com/wanduow/openli/wiki +Documentation=http://github.com/OpenLI-NZ/openli/wiki [Service] Type=simple diff --git a/systemd/openli-mediator.service b/systemd/openli-mediator.service index bd4ec905..c65bbee5 100644 --- a/systemd/openli-mediator.service +++ b/systemd/openli-mediator.service @@ -1,6 +1,6 @@ [Unit] Description=OpenLI mediator daemon -Documentation=http://github.com/wanduow/openli/wiki +Documentation=http://github.com/OpenLI-NZ/openli/wiki [Service] User=openli diff --git a/systemd/openli-provisioner.service b/systemd/openli-provisioner.service index 4d7b54e3..2b8780f5 100644 --- a/systemd/openli-provisioner.service +++ b/systemd/openli-provisioner.service @@ -1,6 +1,6 @@ [Unit] Description=OpenLI provisioner daemon -Documentation=http://github.com/wanduow/openli/wiki +Documentation=http://github.com/OpenLI-NZ/openli/wiki [Service] User=openli diff --git a/utils/update-provconf-1.0.4.py b/utils/update-provconf-1.0.4.py index daafc35e..20d80d88 100644 --- a/utils/update-provconf-1.0.4.py +++ b/utils/update-provconf-1.0.4.py @@ -15,7 +15,7 @@ # # NOTE: any comments in your original config file will NOT be preserved. If # these are important to you, please update your config according to the -# manual process ( https://github.com/wanduow/openli/wiki/Upgrading-to-1.0.4 ). +# manual process ( https://github.com/OpenLI-NZ/openli/wiki/Upgrading-to-1.0.4 ). import yaml, sys, string