diff --git a/.gitignore b/.gitignore index 892e7840..3cb1c39d 100644 --- a/.gitignore +++ b/.gitignore @@ -89,3 +89,90 @@ debian/ripe-atlas-probe*.debhelper* debian/ripe-atlas-anchor*.substvars debian/ripe-atlas-common*.substvars debian/ripe-atlas-probe*.substvars + +# dpkg-buildpackage (repository) +.repo/debian/.debhelper/ +.repo/debian/autoreconf.* +.repo/debian/autoreconf.before +.repo/debian/debhelper-build-stamp +.repo/debian/files +.repo/debian/ripe-atlas-repo/ +.repo/debian/ripe-atlas-repo*.debhelper* +.repo/debian/ripe-atlas-repo*.substvars +.repo/ripe-atlas.list +.repo/ripe-atlas.pol +.repo/ripe-atlas.pub.gpg +.repo/ripe-atlas.pub.gpg.asc + +# +# Kbuild ignores +# +probe-busybox/**/.* +probe-busybox/**/*.o +probe-busybox/**/*.o.* +probe-busybox/**/*.a +probe-busybox/**/*.s +probe-busybox/**/Kbuild +probe-busybox/**/Config.in +.config + +# +# Normal output +# +probe-busybox/busybox +probe-busybox/busybox_old +probe-busybox/busybox_unstripped* + +# +# Backups / patches +# +*~ +*.orig +*.rej +/*.patch + +# +# debugging stuff +# +core +.gdb_history +.gdbinit + +# +# testing output +# +probe-busybox/busybox.links +probe-busybox/runtest-tempdir-links +probe-busybox/testsuite/echo-ne + +# +# cscope output +# +cscope.files +cscope.in.out +cscope.out +cscope.po.out + +probe-busybox/libevent-2.0.20-stable/autom4te.cache +probe-busybox/libevent-2.0.20-stable/config.h +probe-busybox/libevent-2.0.20-stable/config.log +probe-busybox/libevent-2.0.20-stable/config.status +probe-busybox/libevent-2.0.20-stable/.deps +probe-busybox/libevent-2.0.20-stable/include/event2/event-config.h +probe-busybox/libevent-2.0.20-stable/include/Makefile +probe-busybox/libevent-2.0.20-stable/*.la +probe-busybox/libevent-2.0.20-stable/.libs +probe-busybox/libevent-2.0.20-stable/libtool +probe-busybox/libevent-2.0.20-stable/*.lo +probe-busybox/libevent-2.0.20-stable/Makefile +probe-busybox/libevent-2.0.20-stable/*.pc +probe-busybox/libevent-2.0.20-stable/sample +probe-busybox/libevent-2.0.20-stable/sample/.libs/* +probe-busybox/libevent-2.0.20-stable/stamp-h1 +probe-busybox/libevent-2.0.20-stable/test +probe-busybox/include/atlas_path.h + +# +# Never ignore these +# +!.gitignore diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index afaa96f9..60153dd6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -3,12 +3,11 @@ stages: - build - lint - prepare - - deploy:el8 - - deploy:el9 + - deploy variables: - # DOCKER_IMAGE_DEBIAN11: ${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}/debian11:${CI_COMMIT_REF_SLUG} - # DOCKER_IMAGE_DEBIAN12: ${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}/debian12:${CI_COMMIT_REF_SLUG} + DOCKER_IMAGE_DEBIAN11: ${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}/debian11:${CI_COMMIT_REF_SLUG} + DOCKER_IMAGE_DEBIAN12: ${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}/debian12:${CI_COMMIT_REF_SLUG} DOCKER_IMAGE_OL8: ${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}/ol8:${CI_COMMIT_REF_SLUG} DOCKER_IMAGE_OL9: ${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}/ol9:${CI_COMMIT_REF_SLUG} DOCKER_IMAGE_PREP: ${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}/prep:${CI_COMMIT_REF_SLUG} @@ -22,26 +21,25 @@ variables: - /^devel.*/ - /^testing$/ - /^master$/ - - tags changes: - ${DOCKER_DIR}/Dockerfile image: docker:latest script: - - docker build -t ${DOCKER_IMAGE} ${DOCKER_DIR} - docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY} + - docker build -t ${DOCKER_IMAGE} ${DOCKER_DIR} - docker push ${DOCKER_IMAGE} -# build_image_debian11: -# variables: -# DOCKER_DIR: .gitlab-docker/debian11 -# DOCKER_IMAGE: ${DOCKER_IMAGE_DEBIAN11} -# extends: .build_image +build_image_debian11: + variables: + DOCKER_DIR: .gitlab-docker/debian11 + DOCKER_IMAGE: ${DOCKER_IMAGE_DEBIAN11} + extends: .build_image -# build_image_debian12: -# variables: -# DOCKER_DIR: .gitlab-docker/debian12 -# DOCKER_IMAGE: ${DOCKER_IMAGE_DEBIAN12} -# extends: .build_image +build_image_debian12: + variables: + DOCKER_DIR: .gitlab-docker/debian12 + DOCKER_IMAGE: ${DOCKER_IMAGE_DEBIAN12} + extends: .build_image build_image_ol8: variables: @@ -61,28 +59,38 @@ build_image_prep: DOCKER_IMAGE: ${DOCKER_IMAGE_PREP} extends: .build_image -# .build-ci-debian: -# stage: build -# only: -# - /^devel.*/ -# - tags -# script: -# - cd ripe-atlas-software-probe -# - dpkg-buildpackage -b -us -uc -# - cp ../ripe-atlas-*.deb . -# artifacts: -# name: "${CI_PROJECT_NAME}-${CI_BUILD_REF_NAME}" -# paths: -# - ripe-atlas-*.deb -# expire_in: never - -# build-ci-debian11: -# extends: .build-ci-debian -# image: ${DOCKER_IMAGE_DEBIAN11} - -# build-ci-debian12: -# extends: .build-ci-debian -# image: ${DOCKER_IMAGE_DEBIAN12} +.build-ci-debian: + stage: build + only: + - /^devel.*/ + - /^testing$/ + - /^master$/ + script: + - export HOME="${PWD}" + - mkdir -p ${HOME}/${OS_VERSION}/main/binary-amd64 # create a repo with same structure as prod + - dpkg-buildpackage -b -us -uc + - pushd .repo + - dpkg-buildpackage -b -us -uc + - popd + - mv {..,.}/ripe-atlas-*.deb ${HOME}/${OS_VERSION}/main/binary-amd64 + - mv VERSION ${HOME}/${OS_VERSION} + artifacts: + name: "${CI_PROJECT_NAME}-${CI_COMMIT_REF_NAME}" + paths: + - ./${OS_VERSION} + expire_in: never + +build-ci-debian11: + variables: + OS_VERSION: bullseye + extends: .build-ci-debian + image: ${DOCKER_IMAGE_DEBIAN11} + +build-ci-debian12: + variables: + OS_VERSION: bookworm + extends: .build-ci-debian + image: ${DOCKER_IMAGE_DEBIAN12} .build-ci-rhel: stage: build @@ -90,7 +98,6 @@ build_image_prep: - /^devel.*/ - /^testing$/ - /^master$/ - - tags variables: REPO_RPM_PATH: ${RPMS_DIR}/noarch/*.rpm X86_RPM_PATH: ${RPMS_DIR}/x86_64/*.rpm @@ -103,7 +110,9 @@ build_image_prep: - mkdir -p ${HOME}/${OS_VERSION}/{noarch,x86_64} # create a repo with same structure as prod # build repo rpm + - pushd .repo - rpmbuild -bb --define "git_source ${REPO_GIT_SOURCE}" --define "git_tag ${REPO_GIT_TAG}" rhel/ripe-atlas-repo.spec + - popd - cp -a ${HOME}/${REPO_RPM_PATH} ${HOME}/${OS_VERSION}/noarch # build x86_64 rpm @@ -140,7 +149,6 @@ build-ci-el9: - /^devel.*/ - /^testing$/ - /^master$/ - - tags script: - mkdir lint_report - for RPM in $(find ${OS_VERSION} -type f); do rpmlint $RPM > lint_report/$(echo $RPM | sed -n "s/^.*\/\s*\(\S*\)-.*$/\1.log/p") ; done || true @@ -168,7 +176,7 @@ lint-ci-el9: image: ${DOCKER_IMAGE_OL9} extends: .lint-ci-rhel -.prepare-ci-rhel: +.prepare-ci: stage: prepare script: - 'eval ${PREPARE}' @@ -180,7 +188,18 @@ lint-ci-el9: - ./${OS_VERSION}-deployable expire_in: never +.prepare-ci-rhel: + variables: + OS_DISTRIBUTION: rhel + extends: .prepare-ci + +.prepare-ci-debian: + variables: + OS_DISTRIBUTION: debian + extends: .prepare-ci + .prepare-ci-el8: + stage: prepare needs: - job: build-ci-el8 artifacts: true @@ -189,6 +208,7 @@ lint-ci-el9: extends: .prepare-ci-rhel .prepare-ci-el9: + stage: prepare needs: - job: build-ci-el9 artifacts: true @@ -196,7 +216,26 @@ lint-ci-el9: OS_VERSION: el9 extends: .prepare-ci-rhel +.prepare-ci-debian11: + stage: prepare + needs: + - job: build-ci-debian11 + artifacts: true + variables: + OS_VERSION: bullseye + extends: .prepare-ci-debian + +.prepare-ci-debian12: + stage: prepare + needs: + - job: build-ci-debian12 + artifacts: true + variables: + OS_VERSION: bookworm + extends: .prepare-ci-debian + prepare-ci-el8-devel: + stage: prepare only: - /^devel.*/ extends: .prepare-ci-el8 @@ -204,13 +243,31 @@ prepare-ci-el8-devel: name: devel prepare-ci-el9-devel: + stage: prepare only: - /^devel.*/ extends: .prepare-ci-el9 environment: name: devel +prepare-ci-debian11-devel: + stage: prepare + only: + - /^devel.*/ + extends: .prepare-ci-debian11 + environment: + name: devel + +prepare-ci-debian12-devel: + stage: prepare + only: + - /^devel.*/ + extends: .prepare-ci-debian12 + environment: + name: devel + prepare-ci-el8-testing: + stage: prepare only: - /^testing$/ extends: .prepare-ci-el8 @@ -218,107 +275,243 @@ prepare-ci-el8-testing: name: testing prepare-ci-el9-testing: + stage: prepare only: - /^testing$/ extends: .prepare-ci-el9 environment: name: testing +prepare-ci-debian11-testing: + stage: prepare + only: + - /^testing$/ + extends: .prepare-ci-debian11 + environment: + name: testing + +prepare-ci-debian12-testing: + stage: prepare + only: + - /^testing$/ + extends: .prepare-ci-debian12 + environment: + name: testing + prepare-ci-el8-master: + stage: prepare only: - /^master$/ - - tags extends: .prepare-ci-el8 environment: name: master prepare-ci-el9-master: + stage: prepare only: - /^master$/ - - tags extends: .prepare-ci-el9 environment: name: master +prepare-ci-debian11-master: + stage: prepare + only: + - /^master$/ + extends: .prepare-ci-debian11 + environment: + name: master + +prepare-ci-debian12-master: + stage: prepare + only: + - /^master$/ + extends: .prepare-ci-debian12 + environment: + name: master + +approve-ci: + stage: deploy + script: ':' + when: manual + .deploy-ci: variables: REPO_GIT_SOURCE: https://${DEPLOY_USER}:${DEPLOY_TOKEN}@${URL} + image: ${DOCKER_IMAGE_PREP} script: - 'eval ${DEPLOY}' - when: manual + +.deploy-ci-rhel: + variables: + OS_DISTRIBUTION: rhel + TARGET: ${OS_DISTRIBUTION}/${OS_VERSION} + extends: .deploy-ci + +.deploy-ci-debian: + variables: + OS_DISTRIBUTION: debian + TARGET: ${OS_DISTRIBUTION}/dists/${OS_VERSION} + extends: .deploy-ci .deploy-ci-el8: - stage: deploy:el8 + stage: deploy variables: OS_VERSION: el8 image: ${DOCKER_IMAGE_PREP} - extends: .deploy-ci + extends: .deploy-ci-rhel + resource_group: deploy .deploy-ci-el9: - stage: deploy:el9 + stage: deploy variables: OS_VERSION: el9 image: ${DOCKER_IMAGE_PREP} - extends: .deploy-ci + extends: .deploy-ci-rhel + resource_group: deploy + +.deploy-ci-debian11: + stage: deploy + variables: + OS_VERSION: bullseye + extends: .deploy-ci-debian + resource_group: deploy + +.deploy-ci-debian12: + stage: deploy + variables: + OS_VERSION: bookworm + extends: .deploy-ci-debian + resource_group: deploy deploy-ci-el8-devel: + needs: + - job: prepare-ci-el8-devel + artifacts: true + - job: approve-ci only: - /^devel.*/ - needs: - - job: prepare-ci-el8-devel - artifacts: true extends: .deploy-ci-el8 environment: name: devel deploy-ci-el9-devel: + needs: + - job: prepare-ci-el9-devel + artifacts: true + - job: approve-ci only: - /^devel.*/ - needs: - - job: prepare-ci-el9-devel - artifacts: true extends: .deploy-ci-el9 environment: - name: devel + name: devel + +deploy-ci-debian11-devel: + needs: + - job: prepare-ci-debian11-devel + artifacts: true + - job: approve-ci + only: + - /^devel.*/ + extends: .deploy-ci-debian11 + environment: + name: devel + +deploy-ci-debian12-devel: + needs: + - job: prepare-ci-debian12-devel + artifacts: true + - job: approve-ci + only: + - /^devel.*/ + extends: .deploy-ci-debian12 + environment: + name: devel deploy-ci-el8-testing: + needs: + - job: prepare-ci-el8-testing + artifacts: true + - job: approve-ci only: - /^testing$/ - needs: - - job: prepare-ci-el8-testing - artifacts: true extends: .deploy-ci-el8 environment: name: testing deploy-ci-el9-testing: + needs: + - job: prepare-ci-el9-testing + artifacts: true + - job: approve-ci only: - /^testing$/ - needs: - - job: prepare-ci-el9-testing - artifacts: true extends: .deploy-ci-el9 environment: name: testing +deploy-ci-debian11-testing: + needs: + - job: prepare-ci-debian11-testing + artifacts: true + - job: approve-ci + only: + - /^testing$/ + extends: .deploy-ci-debian11 + environment: + name: testing + +deploy-ci-debian12-testing: + needs: + - job: prepare-ci-debian12-testing + artifacts: true + - job: approve-ci + only: + - /^testing$/ + extends: .deploy-ci-debian12 + environment: + name: testing + deploy-ci-el8-master: + needs: + - job: prepare-ci-el8-master + artifacts: true + - job: approve-ci only: - /^master$/ - - tags - needs: - - job: prepare-ci-el8-master - artifacts: true extends: .deploy-ci-el8 environment: name: master deploy-ci-el9-master: + needs: + - job: prepare-ci-el9-master + artifacts: true + - job: approve-ci only: - /^master$/ - - tags - needs: - - job: prepare-ci-el9-master - artifacts: true extends: .deploy-ci-el9 environment: - name: master + name: master + +deploy-ci-debian11-master: + needs: + - job: prepare-ci-debian11-master + artifacts: true + - job: approve-ci + only: + - /^master$/ + extends: .deploy-ci-debian11 + environment: + name: master + +deploy-ci-debian12-testing: + needs: + - job: prepare-ci-debian12-master + artifacts: true + - job: approve-ci + only: + - /^master$/ + extends: .deploy-ci-debian12 + environment: + name: master diff --git a/.gitlab-docker/debian11/Dockerfile b/.gitlab-docker/debian11/Dockerfile index 7c27f61e..1b7d8df9 100644 --- a/.gitlab-docker/debian11/Dockerfile +++ b/.gitlab-docker/debian11/Dockerfile @@ -6,5 +6,8 @@ RUN apt-get update RUN apt-get dist-upgrade -y RUN apt-get install -y libssl-dev RUN apt-get install -y build-essential -RUN apt-get install -y autoconf-dev +RUN apt-get install -y autoconf RUN apt-get install -y debhelper +RUN apt-get install -y gpg +RUN apt-get install -y lsb-release +RUN apt-get install -y systemd diff --git a/.gitlab-docker/debian12/Dockerfile b/.gitlab-docker/debian12/Dockerfile index 7c27f61e..87ff472a 100644 --- a/.gitlab-docker/debian12/Dockerfile +++ b/.gitlab-docker/debian12/Dockerfile @@ -1,4 +1,4 @@ -FROM debian:11.0 +FROM debian:12.0 MAINTAINER mstam@ripe.net @@ -6,5 +6,8 @@ RUN apt-get update RUN apt-get dist-upgrade -y RUN apt-get install -y libssl-dev RUN apt-get install -y build-essential -RUN apt-get install -y autoconf-dev +RUN apt-get install -y autoconf RUN apt-get install -y debhelper +RUN apt-get install -y gpg +RUN apt-get install -y lsb-release +RUN apt-get install -y systemd diff --git a/.gitlab-docker/prep/Dockerfile b/.gitlab-docker/prep/Dockerfile index 2c2c61bb..5575e64b 100644 --- a/.gitlab-docker/prep/Dockerfile +++ b/.gitlab-docker/prep/Dockerfile @@ -4,4 +4,4 @@ MAINTAINER gmeyer@ripe.net RUN apt-get update RUN apt-get dist-upgrade -y -RUN apt-get install -y file libgfshare-bin openssl rpm gpg git createrepo-c +RUN apt-get install -y file libgfshare-bin binutils apt-utils debsigs openssl rpm gpg git createrepo-c diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 5191cd60..00000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "probe-measurements"] - path = probe-busybox - url = ../ripe-atlas-probe-measurements.git diff --git a/.indent.pro b/.indent.pro new file mode 100644 index 00000000..492ecf1c --- /dev/null +++ b/.indent.pro @@ -0,0 +1,33 @@ +--blank-lines-after-declarations +--blank-lines-after-procedures +--break-before-boolean-operator +--no-blank-lines-after-commas +--braces-on-if-line +--braces-on-struct-decl-line +--comment-indentation25 +--declaration-comment-column25 +--no-comment-delimiters-on-blank-lines +--cuddle-else +--continuation-indentation4 +--case-indentation0 +--else-endif-column33 +--space-after-cast +--line-comments-indentation0 +--declaration-indentation1 +--dont-format-first-column-comments +--dont-format-comments +--honour-newlines +--indent-level4 +/* changed from 0 to 4 */ +--parameter-indentation4 +--line-length78 /* changed from 75 */ +--continue-at-parentheses +--no-space-after-function-call-names +--dont-break-procedure-type +--dont-star-comments +--leave-optional-blank-lines +--dont-space-special-semicolon +--tab-size4 +/* additions by Mark */ +--case-brace-indentation0 +--leave-preprocessor-space diff --git a/rhel/RPM-GPG-KEY-ripe-atlas.devel b/.repo/RPM-GPG-KEY-ripe-atlas-20220721.devel similarity index 100% rename from rhel/RPM-GPG-KEY-ripe-atlas.devel rename to .repo/RPM-GPG-KEY-ripe-atlas-20220721.devel diff --git a/rhel/RPM-GPG-KEY-ripe-atlas.master b/.repo/RPM-GPG-KEY-ripe-atlas-20220721.master similarity index 100% rename from rhel/RPM-GPG-KEY-ripe-atlas.master rename to .repo/RPM-GPG-KEY-ripe-atlas-20220721.master diff --git a/rhel/RPM-GPG-KEY-ripe-atlas.testing b/.repo/RPM-GPG-KEY-ripe-atlas-20220721.testing similarity index 100% rename from rhel/RPM-GPG-KEY-ripe-atlas.testing rename to .repo/RPM-GPG-KEY-ripe-atlas-20220721.testing diff --git a/.repo/RPM-GPG-KEY-ripe-atlas-20240924.devel b/.repo/RPM-GPG-KEY-ripe-atlas-20240924.devel new file mode 100644 index 00000000..c5c5904a --- /dev/null +++ b/.repo/RPM-GPG-KEY-ripe-atlas-20240924.devel @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGbqnSQBEADP5Mj/yTXRYZbEGGRJcD3s468syfulzVhTRjbhfCm1qbdr2UhM +XBOfw6Sx3v/9AQ6lPlqNKgE+iVkz7STC2BzzVOq5GaH810LXASb2qAj0LtT+fNLT +Jb9BVqsffsfUOxfW2Zq4gcgyUBv8WYJmP4Lu2UkwrjPXFyeXdECw8Q7vDMS3E2h5 +n9hhvwirKMAbomMSg9hgQy9nHe1YJsBp1OiV8SDoQUxov1zmKuLghcbluPcFzhGc ++IjVcHDmgszmNpmo24txjQNoIssC/zvlVK3BzGlyfid5se5Xi7A9iZ5WhQJ/3xf0 +aENrQ2PRP2j99vyNYDqpa9AyfCnUimtUr2/kabDSqdoYSZlIrRNve57A/NdJL0Si +h0b9fvBVDpLPnrysbLbfzyFs65YnEkCfEyX3Pw3fY803wfoIXa8pD9+JKlxLGGqx +AuHidzQBr8u0du6KSNAmomUJMQsUwjxR0B6kLi/DzvEYiO+NjBUeziUMWmxK/UA4 +cduAE/GWJFhfPLKmS3sRIi4PpL5BldMnEJXyWvVQbCv+talkZ7YsIPVMkyV1x1cv +KwycEjzvSDsz6rIJ3YfWvJlLns2TchtO8rb+RBS2J0ZCanghkB6jfSMY/DdI7jNn +/FOlGKE3pPdJ7lAcbzTlXtBHgMbmp4lBcBwKN5m/Wdnn1jVOUCOdRFjI5wARAQAB +tCFhdGxhcytwcm9iZS1kZXYtMjAyNDA5MjRAcmlwZS5uZXSJAk4EEwEKADgWIQTG +YbWveDPXIqGNVnhOIIhAiR5w4AUCZuqdJAIbAwULCQgHAgYVCgkICwIEFgIDAQIe +AQIXgAAKCRBOIIhAiR5w4JpWD/9WogCwZWnpJ/cyrBWW1kodSWYE/AZtCdMyq7MU +5Fj6kMDxch0BTWLA16MFV/y4qK47r9ulgcp7zXmRWb791nKnRkPMFaYN49YegK88 +knUbN/BzdVRE9eCFWi8m2qMJGyS1OQTxHXujFZTg/iN+O4rjv0BL+EmwrjLxK5e/ +Le2XXN2MRRPoMDZCFScJHVHJLS2Al1B3A3IzBykOxLyXSHx+AkTrTrIsCTxDxF8K +LGQd7Et3d0rzw3+PPtnLTJAGrRF2WZrIzohpiLzv5pIWoNIjuLzAD9Mfzj/Ae6S9 +BhvK0EsKWWDy4NP5SS3dAJcISxgYVHP0B0HEcQmxZBAINz90Uyj2jNxOTvRD5tQG +tm6xX3VZzTkxe4xaXC6pWrajaDTrNcXHiRvc9MOqwTBAurWWsJ0ojC19DtvmLaXu +xUPILNjMJaA+QkPfFhey1mHhWCctrsLCjsscd9/XlLPQF7e4Fi1sP12R7ZntCOWO +gouT8lueKQ0leWKaWncF54z51OO0Bs+JkHOuLu/LszCHLa8ulkXjaTARb8d+J6J9 +3ViOQ3YJyCwS8EPbbT7JKwF6BuO1+aRY9ymQnzufpEkFw0eO2M8h23aTOiiwQMAK +paXmbLN8KDMzpzEHas1kDglzYHNkZzT+dXtKzM3X5z2tD5V9Q2ktT84P29cYj6mX +6DT2AA== +=lgag +-----END PGP PUBLIC KEY BLOCK----- diff --git a/.repo/RPM-GPG-KEY-ripe-atlas-20240924.master b/.repo/RPM-GPG-KEY-ripe-atlas-20240924.master new file mode 100644 index 00000000..4c93cb36 --- /dev/null +++ b/.repo/RPM-GPG-KEY-ripe-atlas-20240924.master @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGbyrAwBEADIT+rYVlzTSWYsC/oNsIQI+B+iX9o9zfphcjHGsRLdnzBx0ZjM +8schxbUP5xu3DzA6pBftasQPEXqlDA6HFxaX3L7wCPqRDRKcy7K5ChDdR7nBWS6I +sOp6eNeox2FJ8BbEAl3agT+q3NXzFUZfYfaqnNLqlNuJ2/aJjOrbo9X9Jl4u8eu5 +KCDqGSaY9DlyYpBOue9HGOaLgbjBTDOWcicXek+z1yU59aULlsx8BlAuxfo9l/NR +0m05bTnoUcSwzhYQykhlGQweEQHYlGyEANwGwskuuWD0KmvlZdfICWAMiHKryW7K +lI3/0YA0LaA3Z0WxGyNTe2Fr1J/ePJ3biMt43U8mf3P0KYbVS+dMJGK7AFVgLVIJ +luZq6nTdW8+eHNf6xGi79XhBLdsCoNp2cV01D30euxDZdZFMSR1GnLIcZ/dWSDWN +wnIwRVWrbAFlq9ddWsdapwZZAEE1r0TRgaOAMAHHjiZObwLnIEb/qL157rmdGPS6 +WW3WFyWcfQIUuqRcIennBpfMyU0rh2yHbkfFnL9oWDNcLb6jYPMnDk7SgKw8Cf7F +YkXdYQtE+iGdX5JiYFrIJYMJ1q0DdMMHTSCGDAsXaWfREJ36BRrU8fD28rA3KBx4 +EuEbGFD4Jrfkar7Id+6jiN23LQOHRtnV+jLVykaw0xg53up96wRwVS3eRwARAQAB +tCJhdGxhcytwcm9iZS1wcm9kLTIwMjQwOTI0QHJpcGUubmV0iQJOBBMBCgA4FiEE +OqFTW956gyIRpH1G2XNFtPb89LgFAmbyrAwCGwMFCwkIBwIGFQoJCAsCBBYCAwEC +HgECF4AACgkQ2XNFtPb89LjX4hAAgYYp2VDiGsnlxUYs3M2A5BhVwxq1FFvEKV3I +4yXH1tzoeHgMOUb2lJxoRxUGYDXFl0PhFucQrcn9p6qEZszy7r8k/4XWpLP9RMJc +w30YdSQIt89OadnFU+Zm93Eg1waH3FR7gD5k1b994S9Am7ny3u8UdpEFWrcSRb5J +1EZEyUo+M+aRTln1KuVVJAuCcdVgZ4JsygTVWeA++9qAdUgDouDTTiaqS4JBhaXj +jI0ckMEZg82fbgfeM1PN2qaj/TiGa+i3yJDvGUsPRUE+WcSOv+kJF4YupGHLzDBF +QSyK+G0lvMSyZzfYaOG84n/kEixUBEnK2yWdb/2eUPS9pqKC4er1FCctHuIX6/L8 +ryPagPDX1rtQ/I4GqC/TYWSFxBRSumVo7H37zfWNJpsCe3YENpa+eAKXBkFrzQLn +BWCiNUZ0Mn7hTGablur0sYJdEPdrIQAucLJZG+0iP34fViWDPwCNIUPGtHcouesw +G5cC5aLYziumbdeF9GtjyPXuXxXt28e7mUBh8yVL1VcQYyfS4ueKeD2lgVF9T++p +DSKW9gIwRJ54vbr3jdRnwblwdG7KUF9qRwGXKvRkxUABAs8KOj34Ldpn2HMcY4uJ +VCi1RGFnBdkWcL+jjGBb+5feM9jfQfLgBPmOI8fuqS3NYrERA489PawLx5/Fm8FA +ArL98yM= +=GOfn +-----END PGP PUBLIC KEY BLOCK----- diff --git a/.repo/RPM-GPG-KEY-ripe-atlas-20240924.testing b/.repo/RPM-GPG-KEY-ripe-atlas-20240924.testing new file mode 100644 index 00000000..55f744f0 --- /dev/null +++ b/.repo/RPM-GPG-KEY-ripe-atlas-20240924.testing @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGbqnUQBEADO0HW1x/WK+lY8f9M75hDpbcbqLHu9tclImxgjI2K86UTzzb3h +S17WUUGqqIrGbKmcaHJZJd4gepALSIAByA6QYsXTPQJ5lzvPUT2HkEislAoHYAMK +/BezcDWiyQ2yCkz8LZSPS7GwP7qh3gI6R17EarOj7ofKQdwGS3v5yoGQneHD0TxE +wy/yj45eqPX47Jj5laaQNwdyid3q8J9jTN4Nz3bamp6+azK7r0iCyi+4Uw75PkV0 +NJNOg3NOVVuL58MNaz60PHARv66OdVvlQJzqqDZSzlLWG98F0v+clc3yOdFUz6Mg +HP+EpShxenvjgl3YjU72L0av80070ZlKwL+wxrPR4467FpUb2xOhxbGcNoXT0jaC +mUg6wbfTDxuNC5IpzCZxZd1bN7REJQsXt4+Rpz54SxF1l9xJj6sDXwAP50ajQu48 +EwOJaAdio1AGmIoKc+HqMHK6/qBw4/nh6GxjH/exBfrXWsLO5Cm5jI6SMLsNTQ15 +52uz7LIWkxcvfjtknewAoZCTAVo+OKjEZITmDgrmWEOxo+RUba7zXniU0/SrsGj/ +ZapcjgvULl0oZyNun9ppQ00Yj8COVctO3W/N6KDVbF294LI3s2mEmIFQQzCfT35N +FF44d6oy7e8xkjc5lrauj8cOF21X0KelAJf7G+KpxzYw1R8oPstvomabhwARAQAB +tCJhdGxhcytwcm9iZS10ZXN0LTIwMjQwOTI0QHJpcGUubmV0iQJOBBMBCgA4FiEE +7kCNLryifK1Nz0xOmtJTx67/Kf0FAmbqnUQCGwMFCwkIBwIGFQoJCAsCBBYCAwEC +HgECF4AACgkQmtJTx67/Kf0etBAAv9UMpAYilDBwkNq3h+uUO6MokdGUxKVGL8yB +4VVhYHM7Y9VSjtzdEeQoAPgPWRWaUccCEUT+j6478KYyl+YDCHLv4NThBf1V5i58 ++qnPL2+o73yWfIFkxVOaImWM5StrA8wOlVEJanFoPFnw9bvSHw65HQhos7CO8Gyp +T/wIDqKuHg9wyxT9Qwjzgwb4B5QfpYQCAkqe1I8GJdlFMcHy1xFCwsHqL8t9akWB ++9WVeE4k2AST5kT94EmFXrQk3WLxrc6bUC02K8Ngkd1y2ctJXl7fNfijS6H4/xkK +OVrXaGNjOq/NP2WLcL2MJrYMHHoW2R83uKjmYFVMq8Cdu0oLcKA5OoFpbaRxcK7U +4NKSJBplpqq/RS/rv3kRFaJ+L509+AJ/cQASHEmrVfsUSnVdrey/oyKO6M/3gLkX +Tdl/wu7seVOpxfQ97U2/gw3fqwVBUPQW3kVR4l6/IiAHVm5f6fiBaHExCKHGVMRV +jKh3bzbJN3iJT19WDbIxJvLJeqQeqknQ3yVmxfCJCNg6yHzKaQRg/rwNYA+mSqVy +CEWdnh6/8rjs8zlg0mAn4ho/Q99HirHD3s7Is88I5sk4hQu6mTsmaLwLoBaxqOuk +9nCzQG9zhq7lEQ6rkHQvY7UW9yqjKnS2g51pZltANgr4EtoiAP+23KKU1kL5dbYi +xtbPyXU= +=VCGv +-----END PGP PUBLIC KEY BLOCK----- diff --git a/.repo/debian/changelog b/.repo/debian/changelog new file mode 100644 index 00000000..a9979a94 --- /dev/null +++ b/.repo/debian/changelog @@ -0,0 +1,5 @@ +ripe-atlas-repo (1.5-2) bookworm bullseye; urgency=medium + + * Initial version after restructuring + + -- RIPE Atlas Team Tue, 24 Sep 2024 00:00:00 -0000 diff --git a/.repo/debian/control b/.repo/debian/control new file mode 100644 index 00000000..8b572a9f --- /dev/null +++ b/.repo/debian/control @@ -0,0 +1,26 @@ +Source: ripe-atlas-repo +Section: net +Priority: optional +Build-Depends: + debhelper-compat (= 13), + autotools-dev, + libssl-dev, + gpg, +Rules-Requires-Root: binary-targets +Maintainer: Michel Stam + +Package: ripe-atlas-repo +Architecture: all +Section: net +Priority: optional +Homepage: https://atlas.ripe.net/ +Description: RIPE Atlas repository specific files and configuration. + RIPE Atlas is the RIPE NCC's main Internet data collection system. + It is a global network of devices, called probes and anchors, that + actively measure Internet connectivity. Anyone can access this + data via Internet traffic maps, streaming data visualisations, and + an API. RIPE Atlas users can also perform customised measurements + to gain valuable data about their own networks. + . + This package contains files and configuration needed to use 'apt' + with the RIPE Atlas packages. diff --git a/.repo/debian/copyright b/.repo/debian/copyright new file mode 100644 index 00000000..b4da7da7 --- /dev/null +++ b/.repo/debian/copyright @@ -0,0 +1,25 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Source: https://github.com/RIPE-NCC/ripe-atlas-software-probe.git +Upstream-Name: ripe-atlas-software-probe +Upstream-Contact: RIPE Atlas + +Files: + * +Copyright: + 2023 RIPE NCC +License: GPL-3+ + This package is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + . + This package is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + . + You should have received a copy of the GNU General Public License + along with this program. If not, see +Comment: + On Debian systems, the complete text of the GNU General + Public License version 3 can be found in "/usr/share/common-licenses/GPL-3". diff --git a/.repo/debian/ripe-atlas-repo.install b/.repo/debian/ripe-atlas-repo.install new file mode 100644 index 00000000..555adbd2 --- /dev/null +++ b/.repo/debian/ripe-atlas-repo.install @@ -0,0 +1,2 @@ +ripe-atlas.list etc/apt/sources.list.d/ +ripe-atlas.pub.gpg.asc etc/apt/trusted.gpg.d/ diff --git a/.repo/debian/rules b/.repo/debian/rules new file mode 100755 index 00000000..fdfe1f67 --- /dev/null +++ b/.repo/debian/rules @@ -0,0 +1,42 @@ +#!/usr/bin/make -f + +include rules.mk + +KEY:=RPM-GPG-KEY-ripe-atlas-20240924.$(RELEASE) +FINGERPRINT:=$(call fingerprint,$(KEY)) +KEYID:=$(call keyid,$(KEY)) +DISTRIBUTION:=$(call distribution) +SOURCES:= \ + ripe-atlas.list \ + ripe-atlas.pol \ + ripe-atlas.pub.gpg.asc \ + ripe-atlas.pub.gpg + +# See debhelper(7) (uncomment to enable). +# Output every command that modifies files on the build system. +export DH_VERBOSE = 1 + + +export DEB_BUILD_MAINT_OPTIONS = hardening=+all + +ripe-atlas.pub.gpg.asc: $(KEY) + cp -f $< $@ + +%: + dh $@ + +override_dh_auto_clean: + @rm -f $(SOURCES) + +override_dh_auto_configure: + +override_dh_auto_build: $(SOURCES) + +override_dh_auto_install: PKG:=debian/ripe-atlas-repo +override_dh_auto_install: + install -d -m 0755 $(PKG)/etc/debsig/policies/$(KEYID) + install -d -m 0755 $(PKG)/usr/share/debsig/keyrings/$(KEYID) + install -m 0644 ripe-atlas.pol $(PKG)/etc/debsig/policies/$(KEYID)/ripe-atlas.pol + install -m 0644 ripe-atlas.pub.gpg $(PKG)/usr/share/debsig/keyrings/$(KEYID)/ripe-atlas.pub.gpg + +override_dh_auto_test: diff --git a/.repo/debian/source/format b/.repo/debian/source/format new file mode 100644 index 00000000..163aaf8d --- /dev/null +++ b/.repo/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/.repo/debian/upstream/metadata b/.repo/debian/upstream/metadata new file mode 100644 index 00000000..1a674561 --- /dev/null +++ b/.repo/debian/upstream/metadata @@ -0,0 +1,6 @@ +Bug-Database: https://github.com/RIPE-NCC/ripe-atlas-software-probe/issues +Bug-Submit: https://github.com/RIPE-NCC/ripe-atlas-software-probe/issues/new +Changelog: https://github.com/RIPE-NCC/ripe-atlas-software-probe/blob/master/.repo/CHANGES +Documentation: https://github.com/RIPE-NCC/ripe-atlas-software-probe/wiki +Repository-Browse: https://github.com/RIPE-NCC/ripe-atlas-software-probe +Repository: https://github.com/RIPE-NCC/ripe-atlas-software-probe.git diff --git a/.repo/debian/watch b/.repo/debian/watch new file mode 100644 index 00000000..6755e6a9 --- /dev/null +++ b/.repo/debian/watch @@ -0,0 +1,6 @@ +# Compulsory line, this is a version 4 file. +version=4 + +# Direct Git. +opts="mode=git" http://git.example.com/ripe-atlas-software-probe.git \ + refs/tags/v([\d\.]+) diff --git a/.repo/rhel/changelog b/.repo/rhel/changelog new file mode 100644 index 00000000..f3485e57 --- /dev/null +++ b/.repo/rhel/changelog @@ -0,0 +1,3 @@ +%changelog +* Tue Sep 24 2024 RIPE Atlas Team - 1.5-2 +- Initial version after restructuring diff --git a/.repo/rhel/ripe-atlas-repo.spec b/.repo/rhel/ripe-atlas-repo.spec new file mode 100644 index 00000000..16bc19e5 --- /dev/null +++ b/.repo/rhel/ripe-atlas-repo.spec @@ -0,0 +1,89 @@ +%define git_repo ripe-atlas-software-probe +%define base_name ripe-atlas-repo + +%define repo_dir %{_sysconfdir}/yum.repos.d +%define repo_file ripe-atlas.repo +%define key_dir %{_sysconfdir}/pki/rpm-gpg +%define oldkey_file RPM-GPG-KEY-ripe-atlas-20220721 +%define newkey_file RPM-GPG-KEY-ripe-atlas-20240924 + +%define source_path %{_builddir}/%{base_name}/.repo +%define repo_path %{source_path}/%{repo_file} + +Name: ripe-atlas-repo +Summary: RIPE Atlas Software Probe Repo +Version: 1.5 +Release: 2%{?dist} +License: RIPE NCC +Group: Applications/Internet +BuildArch: noarch + +%description +Setup the RIPE Atlas Software Probe Repo + +%prep +# performing the steps of '%setup' manually since we are pulling from a remote git repo +echo "Cleaning build dir" +cd %{_builddir} +rm -rf %{_builddir}/%{base_name} +echo "Getting Sources..." + +%{!?git_tag:%define git_tag master} +%{!?git_source:%define git_source https://github.com/RIPE_NCC} + +git clone -b %{git_tag} %{git_source}/%{git_repo}.git %{_builddir}/%{base_name} + +cd %{_builddir}/%{base_name} +%{?git_commit:git checkout %{git_commit}} + +%build +RELEASE='%{git_tag}' +RELEASE="${RELEASE%%%.*}" +case "${RELEASE}" in + ([0-9]*) + RELEASE='master' + ;; + + master) + ;; + + *) + sed -i -e "s/baseurl.*\$/&.${RELEASE}\//" %{repo_path} + ;; +esac + +sed -i -e "s/baseurl.*\$/&rhel\//" %{repo_path} + +STRIPPED_DIST="$(echo %{?dist} | sed -r 's/^\.//')" +if [ -z "${STRIPPED_DIST}" ] ; then + echo "OS Error: No Distribution Detected! rpm macro ?dist is empty" + exit 1 +fi + +echo "OS Distro detected as: ${STRIPPED_DIST}" +sed -i -e "s/baseurl.*\$/&${STRIPPED_DIST}\//" %{repo_path} + +%install +RELEASE='%{git_tag}' +RELEASE="${RELEASE%%%.*}" +case "${RELEASE}" in + ([0-9]*) + RELEASE='master' + ;; + + *) + ;; +esac +mkdir -p %{buildroot}/{%{repo_dir},%{key_dir}} +install -m 0644 %{repo_path} %{buildroot}%{repo_dir} +install -m 0644 "%{source_path}/%{oldkey_file}.${RELEASE}" %{buildroot}%{key_dir}/%{oldkey_file} +install -m 0644 "%{source_path}/%{newkey_file}.${RELEASE}" %{buildroot}%{key_dir}/%{newkey_file} + +%files +%{repo_dir}/* +%{key_dir}/* +%exclude %dir %{repo_dir} +%exclude %dir %{key_dir} + +%include rhel/changelog + diff --git a/.repo/ripe-atlas.list.in b/.repo/ripe-atlas.list.in new file mode 100644 index 00000000..68dfea3f --- /dev/null +++ b/.repo/ripe-atlas.list.in @@ -0,0 +1 @@ +deb https://ftp.ripe.net/ripe/atlas/software-probe/@repo@debian/ @distribution@ main diff --git a/.repo/ripe-atlas.pol.in b/.repo/ripe-atlas.pol.in new file mode 100644 index 00000000..caf0e7d2 --- /dev/null +++ b/.repo/ripe-atlas.pol.in @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + diff --git a/rhel/ripe-atlas-probe.repo b/.repo/ripe-atlas.repo similarity index 52% rename from rhel/ripe-atlas-probe.repo rename to .repo/ripe-atlas.repo index f92436d7..d4226da2 100644 --- a/rhel/ripe-atlas-probe.repo +++ b/.repo/ripe-atlas.repo @@ -1,7 +1,8 @@ [ripe-atlas-probe] name=RIPE Atlas Probe Repo baseurl=https://ftp.ripe.net/ripe/atlas/software-probe/ -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ripe-atlas +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ripe-atlas-20220721 + file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ripe-atlas-20240924 enabled=1 repo_gpgcheck=1 gpgcheck=1 diff --git a/.repo/rules.mk b/.repo/rules.mk new file mode 100644 index 00000000..c7361aa2 --- /dev/null +++ b/.repo/rules.mk @@ -0,0 +1,53 @@ +# Try and default to GitLab CI/CD variables +BRANCH:=$(CI_COMMIT_TAG) +ifeq ($(BRANCH),) +BRANCH:=$(CI_COMMIT_BRANCH) +endif + +# Derive from GIT if none available (local build?) +ifeq ($(BRANCH),) +BRANCH:=$(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +endif + +# Default to the master branch +RELEASE:=$(firstword $(subst ., ,$(BRANCH))) +ifeq ($(RELEASE),) +RELEASE:=master +endif + +ifneq ($(RELEASE),master) +REPO:=.$(RELEASE)/ +else +REPO:= +endif + +define fingerprint +$(shell gpg --list-packets $(1) | sed -nEe 's/^.*\(issuer fpr v4 ([0-9A-F]+)\)$$/\1/p') +endef + +define keyid +$(shell gpg --list-packets $(1) | sed -nEe 's/^.*\(issuer key ID ([0-9A-F]+)\)$$/\1/p') +endef + +define distribution +$(shell lsb_release -s -c 2>/dev/null) +endef + +SUBST_KEYWORDS = sed \ + -e 's|@fingerprint[@]|$(FINGERPRINT)|g' \ + -e 's|@keyid[@]|$(KEYID)|g' \ + -e 's|@distribution[@]|$(DISTRIBUTION)|g' \ + -e 's|@repo[@]|$(REPO)|g' \ + -e 's|@release[@]|$(RELEASE)|g' + +%.list: %.list.in + rm -f $@ + $(SUBST_KEYWORDS) $< > $@ + +%.pol: %.pol.in + rm -f $@ + $(SUBST_KEYWORDS) $< > $@ + +%.gpg: %.gpg.asc + rm -f $@ + gpg --dearmor < $< > $@ diff --git a/CHANGES.rst b/CHANGES.rst index 72fe1e33..bc7f5d3f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,18 @@ Release History =============== +5100 (released 2024-09-24) +-------------------------- +- All platforms + * As of this version, the ripe-atlas-probe-measurements repository on GitHub is deprecated. Its code has been merged into the ripe-atlas-software-probe repository + * Package can now be built with arbitrary shell shebangs. Note that only OpenWRT ash and bash are officially supported + * Bug fixed where permissions are wrong when systemd is disabled (reported by César de Tassis Filho mailto:ctassisf@gmail.com) + * A new code signing key has been introduced. It only signs Debian packages currently, but will sign the RHEL packages in an upcoming release as well. The current RHEL key will then be deprecated +- Software probes + * Streamlined deployment on RHEL CI/CD + * systemd unit ownership moved to probe and anchor packages + * Debian 11/12 x86_64 support added + 5090 (released 2024-07-12) -------------------------- - All platforms diff --git a/INSTALL.rst b/INSTALL.rst index 48d05297..9a723233 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -1,10 +1,12 @@ +========================= Installation Instructions ========================= Picking a release ------------------ +================= The repository is structured around 3 main branches, and a topic branch: + - A master branch which contains production-ready code. - A testing branch - A devel(opment) branch @@ -13,23 +15,25 @@ The repository is structured around 3 main branches, and a topic branch: The master branch contains the latest production-level code. The firmware for hardware probes is built from this branch. The testing branch is a pointer on the master branch that contains code that is being readied for the next production release. The development branch contains code which is by its nature feature complete, but may not be fully tested yet. This code is merged into the testing branch upon completion and unit testing. -Ticket branches that branch off the development branch contain features or fixes that may or may not work +Ticket branches that branch off the development branch contain features or fixes that may or may not work. Any tag which is a number divisible by 10 is a production release (5060, 5070, 5080). Any tag with another number is either a development or a testing release. When uncertain, always select the master branch. To build RPMs for RHEL-based distributions ------------------------------------------- +========================================== The build process is performed using rpmbuild. Currently tested on Oracle Enterprise Linux 8, Oracle Enterprise Linux 9 and Rocky Linux 9 on the x86_64 platform. - (using root privileges) ``dnf update && dnf install git tar rpm-build openssl-devel autoconf automake libtool make`` -- ``git clone --recursive https://github.com/RIPE-NCC/ripe-atlas-software-probe.git`` +- ``git clone https://github.com/RIPE-NCC/ripe-atlas-software-probe.git`` - cd ripe-atlas-software-probe - ``rpmbuild --bb rhel/ripe-atlas-probe.spec``, see note. - ``rpmbuild --bb rhel/ripe-atlas-anchor.spec``, see note. +- cd .repo +- ``rpmbuild --bb rhel/ripe-atlas-repo.spec``, see note. - NOTE: if you wish to build specific (development) branches or repositories: * git_source; to specify a GIT repository (--define "git_source https://github.com/RIPE-NCC") * git_tag; to specify a particular version (--define "git_tag 5080") @@ -37,54 +41,126 @@ Currently tested on Oracle Enterprise Linux 8, Oracle Enterprise Linux 9 and Roc - This will leave the RPMs in rpmbuild/RPMS/x86_64 and rpmbuild/RPMS/noarch To install RPMs for RHEL-based distributions --------------------------------------------- +============================================ + +NOTE: The ripe-atlas-anchor package is intended for deploying Atlas + anchors. Please only install when instructed to do so by RIPE + NCC staff. + +Automatic Updates +----------------- +As of release 5080, the RPM will no longer automatically update. + +The intent of this decision is to conform to operational practices and to +make deployment and maintenance easier on hosts (and the Atlas team). +If you wish to keep automatically updating your software probe, please +install the automatic update package of your choice. + +Suggested solutions available are yum-cron, dnf-automatic or unattended-upgrades. + +Offline (locally built) +----------------------- To install, execute: + - ``cd ~/rpmbuild/RPMS`` - (using root privileges) ``dnf -y install x86_64/ripe-atlas-common-????-1.el?.x86_64.rpm noarch/ripe-atlas-probe-????-1.el?.noarch.rpm`` - (using root privileges) ``systemctl enable ripe-atlas.service`` - (using root privileges) ``systemctl start ripe-atlas.service`` +Online (built by RIPE NCC) +-------------------------- + +To install, execute: + +- (using root privileges on el8) ``dnf -y install https://ftp.ripe.net/ripe/atlas/software-probe/el8/noarch/ripe-atlas-repo-1.5-2.el8.noarch.rpm`` +- (using root privileges on el9) ``dnf -y install https://ftp.ripe.net/ripe/atlas/software-probe/el9/noarch/ripe-atlas-repo-1.5-2.el9.noarch.rpm`` +- (using root privileges) ``dnf -y install ripe-atlas-probe`` +- (using root privileges) ``systemctl enable ripe-atlas.service`` +- (using root privileges) ``systemctl start ripe-atlas.service`` + To upgrade RPMs from atlasswprobe -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------------------------- + +Upgrading from atlasswprobe will attempt to migrate existing +probe keys and configuration. + +Existing probe state will be removed (/var/atlas-probe). + +Offline (locally built) +^^^^^^^^^^^^^^^^^^^^^^^ + +To upgrade on EL8, execute: -To upgrade from the existing atlasswprobe: - ``cd ~/rpmbuild/RPMS`` -- (using root privileges) ``dnf -y install noarch/ripe-atlas-common-????-1.el?.noarch.rpm`` -- (using root privileges) ``rpm -Uvh x86_64/ripe-atlas-probe-????-1.el?.x86_64.rpm`` +- (using root privileges) ``dnf -y install noarch/ripe-atlas-common-????-1.el8.noarch.rpm`` +- (using root privileges) ``dnf -y upgrade x86_64/ripe-atlas-probe-????-1.el8.x86_64.rpm`` - (using root privileges) ``systemctl enable ripe-atlas.service`` - (using root privileges) ``systemctl start ripe-atlas.service`` -Note that this will attempt to migrate existing probe keys and configuration. -Existing probe state will be removed (/var/atlas-probe). +Online (built by RIPE NCC) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To upgrade on EL8, execute: + +- (using root privileges) ``dnf -y upgrade https://ftp.ripe.net/ripe/atlas/software-probe/el8/noarch/ripe-atlas-repo-1-5.el8.noarch.rpm`` +- (using root privileges) ``dnf -y install ripe-atlas-probe`` +- (using root privileges) ``systemctl enable ripe-atlas.service`` +- (using root privileges) ``systemctl start ripe-atlas.service`` To build DEB files for Debian or Debian-based distributions ------------------------------------------------------------ +=========================================================== The build process is performed using dpkg-buildpackage (compat version 13). -Currently compile tested on Debian 11 and 12 on the x86_64 platform. Code -should be considered Beta quality and will be fully supported in an -upcoming release. +Currently tested on Debian 11 and 12 on the x86_64 platform. -- Get the needed tools (using root privileges): ``apt-get update && apt-get -y install git build-essential debhelper libssl-dev autotools-dev``. -- Clone the repo: ``git clone --recursive https://github.com/RIPE-NCC/ripe-atlas-software-probe.git`` +- Get the needed tools (using root privileges): ``apt-get update && apt-get -y install git build-essential debhelper libssl-dev autotools-dev psmisc net-tools``. +- Clone the repo: ``git clone https://github.com/RIPE-NCC/ripe-atlas-software-probe.git`` - Build the needed .deb file in the current working directory: * ``cd ripe-atlas-software-probe`` << this will change into the root directory of the git repo that you have clone * ``git checkout BRANCH`` << if needed (optional) - * ``git submodule update`` << this will update the submodule within this branch * ``dpkg-buildpackage -b -us -uc`` << this will create the package * ``cp ../ripe-atlas-*.deb .`` + * ``cd .repo`` + * ``dpkg-buildpackage -b -us -uc`` << this will create the repository package To install DEB files for Debian or Debian-based distributions -------------------------------------------------------------- +============================================================= + +NOTE: The ripe-atlas-anchor package is intended for deploying Atlas + anchors. Please only install when instructed to do so by RIPE + NCC staff. + +Offline (locally built) +----------------------- To install, execute: + - (using root privileges): ``dpkg -i ripe-atlas-common_????_amd64.deb ripe-atlas-probe_????_all.deb`` - (using root privileges) ``systemctl enable ripe-atlas.service`` - (using root privileges) ``systemctl start ripe-atlas.service`` +Online (built by RIPE NCC) +-------------------------- + +To install, execute: + +- (on debian11) ``wget https://ftp.ripe.net/ripe/atlas/software-probe/debian/dists/bullseye/main/binary-amd64/ripe-atlas-repo_1.5-2_all.deb +- (on debian12) ``wget https://ftp.ripe.net/ripe/atlas/software-probe/debian/dists/bookworm/main/binary-amd64/ripe-atlas-repo_1.5-2_all.deb +- (using root privileges) ``dpkg -i ./ripe-atlas-repo_1.5-2_all.deb`` +- (using root privileges) ``apt-get update`` +- (using root privileges) ``apt-get install ripe-atlas-probe`` +- (using root privileges) ``systemctl enable ripe-atlas.service`` +- (using root privileges) ``systemctl start ripe-atlas.service`` + +Note that packages have been signed and can be verified using ``debsigs``, +for example: +``debsig-verify ./ripe-atlas-probe_????_amd64.deb`` + +This can only be done after the ripe-atlas-repo package has been installed. + To build IPKG files for OpenWRT -------------------------------- +=============================== The build process is performed using OpenWRT's build process. Currently compile tested on OpenWRT 22.03. OpenWRT 22.03 will be @@ -101,26 +177,26 @@ The branch checked out is master, other branches can be checked out by appending After adding the package can be selected using menuconfig and built as normal. To install IPKG files for OpenWRT ---------------------------------- +================================= To install, execute: + - ``opkg install ripe-atlas-common-????.ipkg ripe-atlas-software-probe-????.ipkg`` - ``service ripe-atlas start`` -Manual build (using systemd) ----------------------------- +Manual build +============================ To build using autoconf tooling and install the software probe, execute the following commands at the top level of the git repo: - ``autoreconf -iv`` -- ``./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --libdir=/usr/lib64 --runstatedir=/run --with-user=ripe-atlas --with-group=ripe-atlas --with-measurement-user=ripe-atlas-measurement --enable-systemd --enable-chown --enable-setcap-install`` +- ``./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --libdir=/usr/lib64 --runstatedir=/run --with-user=ripe-atlas --with-group=ripe-atlas --with-measurement-user=ripe-atlas-measurement --disable-systemd --enable-chown --enable-setcap-install`` - ``make`` Manual installation -------------------- +=================== To install, execute: -- (using root privileges) ``make install`` -- (using root privileges) ``systemctl enable ripe-atlas.service`` -- (using root privileges) ``systemctl start ripe-atlas.service`` +- (using root privileges) ``make install`` +- (using root privileges) ``/usr/sbin/ripe-atlas`` diff --git a/README.rst b/README.rst index 6130d40d..e4754627 100644 --- a/README.rst +++ b/README.rst @@ -3,13 +3,14 @@ RIPE Atlas Software Probe This is the source code for RIPE Atlas software probes. Currently this source code supports building: + - RPM package for Oracle EL8 (RHEL8), Oracle EL9 / Rocky Linux 9 (RHEL9) - DEB package for Debian 11/12 - OpenWRT package for OpenWRT 22.03 -Of the supported builds, only the RPM has received sufficient testing. -DEB and OpenWRT should be considered Beta quality and will be fully -supported in an upcoming release. +Of the supported builds, the RPM and DEB packages have received +sufficient testing. OpenWRT should be considered Beta quality and will be +fully supported in an upcoming release. See INSTALL.rst for installation instructions. @@ -34,4 +35,4 @@ Common installation instructions -------------------------------- The public key is stored in ``/etc/ripe-atlas/probe_key.pub``. Use -this to register your probe at . +This to register your probe at . diff --git a/VERSION b/VERSION index a6f65e58..0ead4c61 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -5090 +5100 diff --git a/bin/Makefile.am b/bin/Makefile.am index 6c916b8d..cfb05089 100644 --- a/bin/Makefile.am +++ b/bin/Makefile.am @@ -32,7 +32,13 @@ CLEANFILES = \ ripe-atlas: ripe-atlas.in @rm -f $@ $(AM_V_GEN)$(SUBST_PATHS) $< > $@ +if SHELL_FIXUP + $(AM_V_FIXUP)$(SHEFIX) $@ +endif resolvconf: resolvconf.in @rm -f $@ $(AM_V_GEN)$(SUBST_PATHS) $< > $@ +if SHELL_FIXUP + $(AM_V_FIXUP)$(SHEFIX) $@ +endif diff --git a/bin/arch/openwrt/openwrt-common.sh b/bin/arch/openwrt/openwrt-common.sh index 86e3f2ca..a913e9af 100644 --- a/bin/arch/openwrt/openwrt-common.sh +++ b/bin/arch/openwrt/openwrt-common.sh @@ -27,7 +27,6 @@ RESOLVCONF_CMD=/home/atlas/bin/resolvconf SU_CMD="sudo -E -u atlas" CHOWN_FOR_MSM=chown_for_msm CHMOD_FOR_MSM=: -AFTER_PASSWDSET=after_passwdset CHOWN_DATA_DIRS=chown_data_dirs HANDLE_STORAGE_CURRENT_TIME=handle_storage_current_time LOAD_STORAGE_CURRENT_TIME=load_storage_current_time @@ -70,11 +69,6 @@ ATLASINIT_DEVICE_OPT='-I br-lan' # Commands -after_passwdset() -{ - # Remount root read-only - $MOUNT_ROOT_RO -} check_sig() { file="$1" diff --git a/bin/array.lib.sh b/bin/array.lib.sh index f6f553ac..17faf108 100644 --- a/bin/array.lib.sh +++ b/bin/array.lib.sh @@ -1,4 +1,3 @@ -#!/bin/sh # # Array manipulation in scripts for shells that do not support arrays (ash) # Copyright (c) 2022 RIPE NCC, portions Michel Stam (c) 2013 diff --git a/bin/atlas_log.lib.sh b/bin/atlas_log.lib.sh index f74c3461..aad3e478 100644 --- a/bin/atlas_log.lib.sh +++ b/bin/atlas_log.lib.sh @@ -1,4 +1,3 @@ -#!/bin/sh # # RIPE Atlas logging in shell script # Copyright (c) 2022 RIPE NCC diff --git a/bin/class.lib.sh b/bin/class.lib.sh index 7eeb02d9..fed04e28 100644 --- a/bin/class.lib.sh +++ b/bin/class.lib.sh @@ -1,4 +1,3 @@ -#!/bin/sh # # Support functions for object oriented functionality in shell scripts # Copyright (c) 2022 RIPE NCC diff --git a/bin/json.lib.sh b/bin/json.lib.sh index 27b8717f..fa0ec3ff 100644 --- a/bin/json.lib.sh +++ b/bin/json.lib.sh @@ -1,4 +1,3 @@ -#!/bin/sh # # JSON string generation in shell scripts # Copyright (c) 2022 RIPE NCC diff --git a/bin/ripe-atlas.in b/bin/ripe-atlas.in index ce0b9a68..35802441 100755 --- a/bin/ripe-atlas.in +++ b/bin/ripe-atlas.in @@ -64,12 +64,6 @@ $LOAD_STORAGE_CURRENT_TIME $SET_DATE_FROM_CURRENTTIME_TXT # Source system specific initializing files -if [ -f $BIN_DIR/passwdset ] ; then -. $BIN_DIR/passwdset -rm -f $BIN_DIR/passwdset -$AFTER_PASSWDSET -fi - if [ -f $BIN_DIR/rc.local ] ; then . $BIN_DIR/rc.local fi diff --git a/bin/support.lib.sh b/bin/support.lib.sh index af05d36b..582a9209 100644 --- a/bin/support.lib.sh +++ b/bin/support.lib.sh @@ -1,4 +1,3 @@ -#!/bin/sh # Support routines # Copyright (c) 2022 RIPE NCC # diff --git a/config/Makefile.am b/config/Makefile.am index 68b11757..a9c4414c 100644 --- a/config/Makefile.am +++ b/config/Makefile.am @@ -89,8 +89,8 @@ common/FIRMWARE_APPS_VERSION: common/FIRMWARE_APPS_VERSION.in @rm -f $@ $(AM_V_GEN)$(SUBST_PATHS) $< > $@ -if SYSTEMD_INSTALL install-data-hook: +if SYSTEMD_INSTALL if SINGLE_USER @mv -f $(DESTDIR)$(systemd_sysusersdir)/ripe-atlas.user.conf \ $(DESTDIR)$(systemd_sysusersdir)/ripe-atlas.conf diff --git a/configure.ac b/configure.ac index 2513452e..598429fd 100644 --- a/configure.ac +++ b/configure.ac @@ -83,6 +83,12 @@ AC_ARG_WITH([install-mode], [AS_HELP_STRING([--with-install-mode], [define the p AM_CONDITIONAL([INSTALL_MODE_probe], [test "x${with_install_mode}" = 'xprobe']) AM_CONDITIONAL([INSTALL_MODE_anchor], [test "x${with_install_mode}" = 'xanchor']) +AC_ARG_WITH([shell-fixup], [AS_HELP_STRING([--with-shell-fixup], [force the shell to use in scripts])], [], [with_shell_fixup='']) +AM_CONDITIONAL([SHELL_FIXUP], [test "x${with_shell_fixup}" != 'x']) +if test "x${with_shell_fixup}" != 'x'; then + shebang="\#!${with_shell_fixup}" +fi + AC_PATH_PROG(SETCAP, setcap) AC_ARG_ENABLE([setcap-install], [AC_HELP_STRING([--enable-setcap-install], [install measurements with cap_net_raw @<:@default=yes@:>@])], [enable_setcap_install="${enableval}"],[enable_setcap_install=yes]) @@ -132,6 +138,7 @@ AM_CONDITIONAL(SYSTEMD_INSTALL, [test "x${SYSTEMDPATH}" != 'x' -a "x${enable_sys AC_SUBST(systemd_tmpfilesdir) AC_SUBST(systemd_unitsdir) AC_SUBST(systemd_sysusersdir) +AC_SUBST(shebang) # system paths atlas_libexecdir="\${libexecdir}/ripe-atlas" diff --git a/debian/changelog b/debian/changelog index d44530e3..6fae0dbe 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,17 @@ +ripe-atlas-software-probe (5100) bookworm bullseye; urgency=medium + + * All platforms + - As of this version, the ripe-atlas-probe-measurements repository on GitHub is deprecated. Its code has been merged into the ripe-atlas-software-probe repository + - Package can now be built with arbitrary shell shebangs. Note that only OpenWRT ash and bash are officially supported + - Bug fixed where permissions are wrong when systemd is disabled (reported by César de Tassis Filho mailto:ctassisf@gmail.com) + - A new code signing key has been introduced. It only signs Debian packages currently, but will sign the RHEL packages in an upcoming release as well. The current RHEL key will then be deprecated + * Software probes + - Streamlined deployment on RHEL CI/CD + - systemd unit ownership moved to probe and anchor packages + - Debian 11/12 x86_64 support added + + -- RIPE Atlas Team Tue, 24 Sep 2024 00:00:00 -0000 + ripe-atlas-software-probe (5090) bookworm bullseye; urgency=medium * All platforms diff --git a/debian/control b/debian/control index 29cfb96d..3c674f43 100644 --- a/debian/control +++ b/debian/control @@ -11,11 +11,14 @@ Maintainer: Michel Stam Package: ripe-atlas-common Architecture: any Depends: + bash, ${shlibs:Depends}, ${misc:Depends}, - libssl3 | libssl1, + libssl3 | libssl1.1, net-tools, openssh-client, + psmisc, + systemd, Section: net Priority: optional Homepage: https://atlas.ripe.net/ @@ -33,7 +36,7 @@ Description: RIPE Atlas essential components Package: ripe-atlas-probe Architecture: all Depends: ripe-atlas-common (= ${binary:Version}) -Conflicts: ripe-atlas-anchor +Conflicts: ripe-atlas-anchor, atlasswprobe Replaces: atlasswprobe Section: net Priority: optional @@ -54,7 +57,7 @@ Description: RIPE Atlas probe specific files and configuration Package: ripe-atlas-anchor Architecture: all Depends: ripe-atlas-common (= ${binary:Version}) -Conflicts: ripe-atlas-probe +Conflicts: ripe-atlas-probe, atlasswprobe Replaces: atlasswprobe Section: net Priority: optional diff --git a/debian/copyright b/debian/copyright index 6ebdd8ad..b4da7da7 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,26 +1,13 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Source: +Source: https://github.com/RIPE-NCC/ripe-atlas-software-probe.git Upstream-Name: ripe-atlas-software-probe -Upstream-Contact: +Upstream-Contact: RIPE Atlas Files: * Copyright: - - -License: - - - . - - -# If you want to use GPL v2 or later for the /debian/* files use -# the following clauses, or change it to suit. Delete these two lines -Files: - debian/* -Copyright: - 2023 atlas -License: GPL-2+ + 2023 RIPE NCC +License: GPL-3+ This package is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or @@ -35,13 +22,4 @@ License: GPL-2+ along with this program. If not, see Comment: On Debian systems, the complete text of the GNU General - Public License version 2 can be found in "/usr/share/common-licenses/GPL-2". - -# Please also look if there are files or directories which have a -# different copyright/license attached and list them here. -# Please avoid picking licenses with terms that are more restrictive than the -# packaged work, as it may make Debian's contributions unacceptable upstream. -# -# If you need, there are some extra license texts available in two places: -# /usr/share/debhelper/dh_make/licenses/ -# /usr/share/common-licenses/ + Public License version 3 can be found in "/usr/share/common-licenses/GPL-3". diff --git a/debian/ripe-atlas-anchor.install b/debian/ripe-atlas-anchor.install index 34df45da..ff164801 100644 --- a/debian/ripe-atlas-anchor.install +++ b/debian/ripe-atlas-anchor.install @@ -1,2 +1,3 @@ config/anchor/known_hosts.reg usr/share/ripe-atlas/ config/anchor/reg_servers.sh.prod usr/libexec/ripe-atlas/scripts/ +config/common/ripe-atlas.service usr/lib/systemd/system/ diff --git a/debian/ripe-atlas-anchor.postinst b/debian/ripe-atlas-anchor.postinst index 2dcd48fa..bb095733 100755 --- a/debian/ripe-atlas-anchor.postinst +++ b/debian/ripe-atlas-anchor.postinst @@ -1,5 +1,5 @@ #!/bin/sh -# postinst script for ripe-atlas-anchor +# postinst script for ripe-atlas-anchor. # # See: dh_installdeb(1). @@ -23,23 +23,44 @@ set -e #DEBHELPER# -atlas_olddir='var/atlas-probe' -atlas_newdir='etc/ripe-atlas' -atlas_newkey="${atlas_newdir}/probe_key" -atlas_newmode="${atlas_newdir}/mode" +service_name='ripe-atlas.service' +atlas_upgradedir='/var/lib/ripe-atlas-upgrade' + +get_state() +{ + if [ -f "${atlas_upgradedir}/$1" ]; then + return 0 + else + return 1 + fi +} + +init_state() +{ + mkdir -p "${atlas_upgradedir}" + if systemctl "$1" --quiet "${service_name}" 1>/dev/null 2>&1; then + touch "${atlas_upgradedir}/$1" 2>/dev/null + else + rm -f "${atlas_upgradedir}/$1" 2>/dev/null + fi +} + +clear_state() +{ + rm -rf "${atlas_upgradedir}" 2>/dev/null +} case "${1}" in configure) - # This will be regenerated when the service restarts - rm -f etc/ripe-atlas/reg_servers.sh 1>/dev/null 2>&1 - - # clean up old atlas installation, it is now obsolete - if ( [ -f "%{atlas_newkey}" ] && - [ -f "%{atlas_newkey}.pub" ] && - [ -f "%{atlas_newmode}" ] && - [ -d "%{atlas_olddir}" ] ); then - rm -rf "%{atlas_olddir}" + if get_state "is-enabled"; then + systemctl enable "${service_name}" 1>/dev/null 2>&1 fi + + if get_state "is-active"; then + systemctl start "${service_name}" 1>/dev/null 2>&1 + fi + + clear_state ;; abort-upgrade|abort-remove|abort-deconfigure) diff --git a/debian/ripe-atlas-anchor.postrm b/debian/ripe-atlas-anchor.postrm new file mode 100755 index 00000000..83832984 --- /dev/null +++ b/debian/ripe-atlas-anchor.postrm @@ -0,0 +1,60 @@ +#!/bin/sh +# postrm script for ripe-atlas-anchor. +# +# See: dh_installdeb(1). + +set -e + +# Summary of how this script can be called: +# * 'remove' +# * 'purge' +# * 'upgrade' +# * 'failed-upgrade' +# * 'abort-install' +# * 'abort-install' +# * 'abort-upgrade' +# * 'disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package. + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +atlas_newdir='/etc/ripe-atlas' +fix_rundir() +{ + version=$(. /etc/os-release && printf '%s\n' "${VERSION%% *}") + if [ "$version" = "12" ]; then + echo "/run/ripe-atlas" + else + echo "/var/run/ripe-atlas" + fi +} + +case "${1}" in + purge|remove|upgrade) + rm -f $(fix_rundir)/status/* ${atlas_newdir}/reg_servers.sh \ + 1>/dev/null 2>&1 || : + if [ "${1}" = 'purge' ] || [ "${1}" = 'remove' ]; then + rm -rf $(fix_rundir) /var/spool/ripe-atlas \ + 1>/dev/null 2>&1 || : + fi + if [ "${1}" = 'purge' ]; then + rm -rf ${atlas_newdir} 1>/dev/null 2>&1 || : + fi + systemctl daemon-reload 1>/dev/null 2>&1 + ;; + + failed-upgrade|abort-install|abort-upgrade|disappear) + ;; + + *) + echo "postrm called with unknown argument '$1'" >&2 + exit 1 + ;; +esac + +exit 0 diff --git a/debian/ripe-atlas-anchor.prerm b/debian/ripe-atlas-anchor.prerm new file mode 100755 index 00000000..66c16038 --- /dev/null +++ b/debian/ripe-atlas-anchor.prerm @@ -0,0 +1,87 @@ +#!/bin/sh +# prerm script for ripe-atlas-anchor. +# +# See: dh_installdeb(1). + +set -e + +# Summary of how this script can be called: +# * 'remove' +# * 'purge' +# * 'upgrade' +# * 'failed-upgrade' +# * 'abort-install' +# * 'abort-install' +# * 'abort-upgrade' +# * 'disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package. + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +atlas_upgradedir='/var/lib/ripe-atlas-upgrade' +service_name='ripe-atlas.service' + +get_state() +{ + if [ -f "${atlas_upgradedir}/$1" ]; then + return 0 + else + return 1 + fi +} + +init_state() +{ + mkdir -p "${atlas_upgradedir}" + if systemctl "$1" --quiet "${service_name}" 1>/dev/null 2>&1; then + touch "${atlas_upgradedir}/$1" 2>/dev/null + else + rm -f "${atlas_upgradedir}/$1" 2>/dev/null + fi +} + +clear_state() +{ + rm -rf "${atlas_upgradedir}" 2>/dev/null +} + +case "${1}" in + purge) + clear_state + ;; + + remove|upgrade) + if ! [ -f "${atlas_upgradedir}/is-active" ]; then + init_state "is-active" + fi + + if ! [ -f "${atlas_upgradedir}/is-enabled" ]; then + init_state "is-enabled" + fi + + if get_state "is-active"; then + systemctl stop ${service_name} 1>/dev/null 2>&1 + fi + if get_state "is-enabled"; then + systemctl disable ${service_name} 1>/dev/null 2>&1 + fi + if [ "${1}" = "remove" ]; then + clear_state + fi + ;; + + failed-upgrade|abort-install|abort-upgrade|disappear) + ;; + + *) + echo "prerm called with unknown argument '$1'" >&2 + exit 1 + ;; +esac + +exit 0 diff --git a/debian/ripe-atlas-common.postinst b/debian/ripe-atlas-common.postinst index a9706ca1..97a43ffb 100755 --- a/debian/ripe-atlas-common.postinst +++ b/debian/ripe-atlas-common.postinst @@ -23,20 +23,24 @@ set -e #DEBHELPER# -ATLAS_MEAS_OWN='ripe-atlas-measurement:ripe-atlas' -ATLAS_MAIN_OWN='ripe-atlas:ripe-atlas' -ATLAS_CONF_DIR='etc/ripe-atlas' -ATLAS_MEAS_DIR='usr/libexec/ripe-atlas/measurement' -ATLAS_CONF_DIRS="${ATLAS_CONF_DIR}" +atlas_measurement='ripe-atlas-measurement' +atlas_user='ripe-atlas' +atlas_group='ripe-atlas' +atlas_measdir='/usr/libexec/ripe-atlas/measurement' +atlas_newdir='/etc/ripe-atlas' +atlas_newmode="${atlas_newdir}/mode" case "${1}" in configure) - chown -R ${ATLAS_MAIN_OWN} ${ATLAS_CONF_DIRS} - chmod 0770 ${ATLAS_CONF_DIRS} - - chown ${ATLAS_MEAS_OWN} ${ATLAS_MEAS_DIR}/busybox - chmod 4750 ${ATLAS_MEAS_DIR}/busybox - setcap cap_net_raw=ep ${ATLAS_MEAS_DIR}/busybox + chown -R ${atlas_user}:${atlas_group} ${atlas_newdir} + chmod 0770 ${atlas_newdir} + chown -R ${atlas_measurement}:${atlas_group} ${atlas_measdir}/busybox + chmod 4750 ${atlas_measdir}/busybox + setcap cap_net_raw=ep ${atlas_measdir}/busybox + + if [ -f "${atlas_newmode}.atlasswprobe" ]; then + mv "${atlas_newmode}.atlasswprobe" "${atlas_newmode}" + fi ;; abort-upgrade|abort-remove|abort-deconfigure) diff --git a/debian/ripe-atlas-common.postrm b/debian/ripe-atlas-common.postrm index 3dbfb25d..db3e7566 100755 --- a/debian/ripe-atlas-common.postrm +++ b/debian/ripe-atlas-common.postrm @@ -1,15 +1,10 @@ #!/bin/sh -# prerm script for ripe-atlas-common. +# postrm script for ripe-atlas-common. # # See: dh_installdeb(1). set -e -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - # Summary of how this script can be called: # * 'remove' # * 'purge' @@ -23,32 +18,29 @@ set -e # for details, see https://www.debian.org/doc/debian-policy/ or # the debian-policy package. -case "${1}" in - purge) - rm -f \ - run/ripe-atlas/pids/* \ - run/ripe-atlas/status/* \ - var/spool/ripe-atlas/crons/* \ - var/spool/ripe-atlas/crons/*/* \ - var/spool/ripe-atlas/data/* \ - var/spool/ripe-atlas/data/*/* \ - etc/ripe-atlas/probe_key \ - etc/ripe-atlas/probe_key.pub \ - etc/ripe-atlas/mode \ - 1>/dev/null 2>&1 || true - ;; +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. - remove|upgrade) - rm -f \ - run/ripe-atlas/status/* \ - 1>/dev/null 2>&1 || true - ;; +#DEBHELPER# - failed-upgrade|abort-install|abort-upgrade|disappear) +atlas_newdir='/etc/ripe-atlas' + +fix_rundir() +{ + version=$(. /etc/os-release && printf '%s\n' "${VERSION%% *}") + if [ "$version" = "12" ]; then + echo "/run/ripe-atlas" + else + echo "/var/run/ripe-atlas" + fi +} + +case "${1}" in + purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) ;; *) - echo "prerm called with unknown argument '$1'" >&2 + echo "postrm called with unknown argument '$1'" >&2 exit 1 ;; esac diff --git a/debian/ripe-atlas-common.preinst b/debian/ripe-atlas-common.preinst index fb09d57d..48faf1d2 100755 --- a/debian/ripe-atlas-common.preinst +++ b/debian/ripe-atlas-common.preinst @@ -5,38 +5,73 @@ set -e +# Summary of how this script can be called: +# * 'install' +# * 'install' +# * 'upgrade' +# * 'abort-upgrade' +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package. + # dh_installdeb will replace this with shell code automatically # generated by other debhelper scripts. #DEBHELPER# -atlas_olddir='var/atlas-probe' +atlas_measurement='ripe-atlas-measurement' +atlas_user='ripe-atlas' +atlas_group='ripe-atlas' +atlas_upgradedir='/var/lib/ripe-atlas-upgrade' +atlas_olddir='/var/atlas-probe' atlas_oldkey="${atlas_olddir}/etc/probe_key" atlas_oldmode="${atlas_olddir}/state/mode" -atlas_newdir='etc/ripe-atlas' +atlas_oldconfig="${atlas_olddir}/state/config.txt" +atlas_newdir='/etc/ripe-atlas' atlas_newkey="${atlas_newdir}/probe_key" atlas_newmode="${atlas_newdir}/mode" -# Summary of how this script can be called: -# * 'install' -# * 'install' -# * 'upgrade' -# * 'abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package. +atlas_newconfig="${atlas_newdir}/config.txt" + +migrate_file() +{ + if ( [ -f "$1" ] && ! cmp -s "$1" "$2" 1>/dev/null 2>&1 ); then + install -D -p -m "$3" -o "$4" -g "$5" "$1" "$2" 1>/dev/null 2>&1 + fi +} + +fix_rundir() +{ + version=$(. /etc/os-release && printf '%s\n' "${VERSION%% *}") + if [ "$version" = "12" ]; then + echo "/run/ripe-atlas" + else + echo "/var/run/ripe-atlas" + fi +} case "$1" in - configure) - cat etc/passwd - mkdir -p -m 0770 "${atlas_newdir}" - cp "${atlas_oldkey}" "${atlas_newkey}" 1>/dev/null 2>&1 || true - cp "${atlas_oldkey}.pub" "${atlas_newkey}.pub" 1>/dev/null 2>&1 || true - cp "${atlas_oldmode}" "${atlas_newmode}" 1>/dev/null 2>&1 || true - chmod 664 "${atlas_newkey}.pub" 1>/dev/null 2>&1 || true - chmod 400 "${atlas_newkey}" 1>/dev/null 2>&1 || true - chown -R ripe-atlas:ripe-atlas "${atlas_newdir}" 1>/dev/null 2>&1 + install) + /usr/bin/systemd-sysusers --replace=/usr/lib/sysusers.d/ripe-atlas.conf - </dev/null 2>&1 + fi + migrate_file "${atlas_oldkey}" "${atlas_newkey}" 0600 \ + "${atlas_user}" "${atlas_group}" + migrate_file "${atlas_oldkey}.pub" "${atlas_newkey}.pub" 0644 \ + "${atlas_user}" "${atlas_group}" + migrate_file "${atlas_oldmode}" "${atlas_newmode}.atlasswprobe" 0644 \ + "${atlas_user}" "${atlas_group}" + migrate_file "${atlas_oldconfig}" "${atlas_newconfig}" 0644 \ + "${atlas_user}" "${atlas_group}" ;; - install|upgrade|abort-upgrade) + upgrade|abort-upgrade) ;; *) diff --git a/debian/ripe-atlas-common.prerm b/debian/ripe-atlas-common.prerm index ed2ad4b3..c2d8c107 100755 --- a/debian/ripe-atlas-common.prerm +++ b/debian/ripe-atlas-common.prerm @@ -1,15 +1,10 @@ #!/bin/sh -# prerm script for ripe-atlas-anchor. +# prerm script for ripe-atlas-common. # # See: dh_installdeb(1). set -e -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - # Summary of how this script can be called: # * 'remove' # * 'purge' @@ -23,11 +18,65 @@ set -e # for details, see https://www.debian.org/doc/debian-policy/ or # the debian-policy package. +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +atlas_newdir='/etc/ripe-atlas' +atlas_upgradedir='/var/lib/ripe-atlas-upgrade' +service_name='ripe-atlas.service' + +get_state() +{ + if [ -f "${atlas_upgradedir}/$1" ]; then + return 0 + else + return 1 + fi +} + +init_state() +{ + mkdir -p "${atlas_upgradedir}" + if systemctl "$1" --quiet "${service_name}" 1>/dev/null 2>&1; then + touch "${atlas_upgradedir}/$1" 2>/dev/null + else + rm -f "${atlas_upgradedir}/$1" 2>/dev/null + fi +} + +clear_state() +{ + rm -rf "${atlas_upgradedir}" 2>/dev/null +} + + case "${1}" in - purge|remove|upgrade) - rm -f \ - etc/ripe-atlas/reg_servers.sh \ - 1>/dev/null 2>&1 + purge) + clear_state + ;; + + remove|upgrade) + if ! [ -f "${atlas_upgradedir}/is-active" ]; then + init_state "is-active" + fi + + if ! [ -f "${atlas_upgradedir}/is-enabled" ]; then + init_state "is-enabled" + fi + + if get_state "is-active"; then + systemctl stop "${service_name}" 1>/dev/null 2>&1 + fi + + if get_state "is-enabled"; then + systemctl disable "${service_name}" 1>/dev/null 2>&1 + fi + + if [ "${1}" = "remove" ]; then + clear_state + fi ;; failed-upgrade|abort-install|abort-upgrade|disappear) diff --git a/debian/ripe-atlas-probe.install b/debian/ripe-atlas-probe.install index ffed90d8..f624d3ce 100644 --- a/debian/ripe-atlas-probe.install +++ b/debian/ripe-atlas-probe.install @@ -1,2 +1,3 @@ config/probe/known_hosts.reg usr/share/ripe-atlas/ config/probe/reg_servers.sh.prod usr/libexec/ripe-atlas/scripts/ +config/common/ripe-atlas.service usr/lib/systemd/system/ diff --git a/debian/ripe-atlas-probe.postinst b/debian/ripe-atlas-probe.postinst index f222fec5..40b0e67a 100755 --- a/debian/ripe-atlas-probe.postinst +++ b/debian/ripe-atlas-probe.postinst @@ -23,23 +23,42 @@ set -e #DEBHELPER# -atlas_olddir='var/atlas-probe' -atlas_newdir='etc/ripe-atlas' -atlas_newkey="${atlas_newdir}/probe_key" -atlas_newmode="${atlas_newdir}/mode" +service_name='ripe-atlas.service' +atlas_upgradedir='/var/lib/ripe-atlas-upgrade' + +get_state() +{ + if [ -f "${atlas_upgradedir}/$1" ]; then + return 0 + else + return 1 + fi +} + +init_state() +{ + mkdir -p "${atlas_upgradedir}" + if systemctl "$1" --quiet "${service_name}" 1>/dev/null 2>&1; then + touch "${atlas_upgradedir}/$1" 2>/dev/null + else + rm -f "${atlas_upgradedir}/$1" 2>/dev/null + fi +} + +clear_state() +{ + rm -rf "${atlas_upgradedir}" 2>/dev/null +} case "${1}" in configure) - # This will be regenerated when the service restarts - rm -f etc/ripe-atlas/reg_servers.sh 1>/dev/null 2>&1 - - # clean up old atlas installation, it is now obsolete - if ( [ -f "%{atlas_newkey}" ] && - [ -f "%{atlas_newkey}.pub" ] && - [ -f "%{atlas_newmode}" ] && - [ -d "%{atlas_olddir}" ] ); then - rm -rf "%{atlas_olddir}" + if get_state "is-enabled"; then + systemctl enable ${service_name} 1>/dev/null 2>&1 + fi + if get_state "is-active"; then + systemctl start ${service_name} 1>/dev/null 2>&1 fi + clear_state ;; abort-upgrade|abort-remove|abort-deconfigure) diff --git a/debian/ripe-atlas-probe.postrm b/debian/ripe-atlas-probe.postrm index 294d664a..1d695692 100755 --- a/debian/ripe-atlas-probe.postrm +++ b/debian/ripe-atlas-probe.postrm @@ -5,11 +5,6 @@ set -e -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - # Summary of how this script can be called: # * 'remove' # * 'purge' @@ -23,11 +18,35 @@ set -e # for details, see https://www.debian.org/doc/debian-policy/ or # the debian-policy package. +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +atlas_newdir='/etc/ripe-atlas' + +fix_rundir() +{ + version=$(. /etc/os-release && printf '%s\n' "${VERSION%% *}") + if [ "$version" = "12" ]; then + echo "/run/ripe-atlas" + else + echo "/var/run/ripe-atlas" + fi +} + case "${1}" in purge|remove|upgrade) - rm -f \ - etc/ripe-atlas/reg_servers.sh \ - 1>/dev/null 2>&1 + rm -f $(fix_rundir)/status/* ${atlas_newdir}/reg_servers.sh \ + 1>/dev/null 2>&1 || : + if [ "${1}" = 'purge' ] || [ "${1}" = 'remove' ]; then + rm -rf $(fix_rundir) /var/spool/ripe-atlas \ + 1>/dev/null 2>&1 || : + fi + if [ "${1}" = 'purge' ]; then + rm -rf ${atlas_newdir} 1>/dev/null 2>&1 || : + fi + systemctl daemon-reload 1>/dev/null 2>&1 ;; failed-upgrade|abort-install|abort-upgrade|disappear) diff --git a/debian/ripe-atlas-probe.prerm b/debian/ripe-atlas-probe.prerm new file mode 100755 index 00000000..3e0710aa --- /dev/null +++ b/debian/ripe-atlas-probe.prerm @@ -0,0 +1,89 @@ +#!/bin/sh +# prerm script for ripe-atlas-probe. +# +# See: dh_installdeb(1). + +set -e + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +# Summary of how this script can be called: +# * 'remove' +# * 'purge' +# * 'upgrade' +# * 'failed-upgrade' +# * 'abort-install' +# * 'abort-install' +# * 'abort-upgrade' +# * 'disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package. + +atlas_upgradedir='/var/lib/ripe-atlas-upgrade' +service_name='ripe-atlas.service' + +get_state() +{ + if [ -f "${atlas_upgradedir}/$1" ]; then + return 0 + else + return 1 + fi +} + +init_state() +{ + mkdir -p "${atlas_upgradedir}" + if systemctl "$1" --quiet "${service_name}" 1>/dev/null 2>&1; then + touch "${atlas_upgradedir}/$1" 2>/dev/null + else + rm -f "${atlas_upgradedir}/$1" 2>/dev/null + fi +} + +clear_state() +{ + rm -rf "${atlas_upgradedir}" 2>/dev/null +} + +case "${1}" in + purge) + clear_state + ;; + + remove|upgrade) + if ! [ -f "${atlas_upgradedir}/is-active" ]; then + init_state "is-active" + fi + + if ! [ -f "${atlas_upgradedir}/is-enabled" ]; then + init_state "is-enabled" + fi + + if get_state "is-active"; then + systemctl stop ${service_name} 1>/dev/null 2>&1 + fi + + if get_state "is-enabled"; then + systemctl disable ${service_name} 1>/dev/null 2>&1 + fi + + if [ "${1}" = "remove" ]; then + clear_state + fi + ;; + + failed-upgrade|abort-install|abort-upgrade|disappear) + ;; + + *) + echo "prerm called with unknown argument '$1'" >&2 + exit 1 + ;; +esac + +exit 0 diff --git a/debian/rules b/debian/rules index 0a2a202b..4f9acf35 100755 --- a/debian/rules +++ b/debian/rules @@ -21,6 +21,7 @@ override_dh_auto_configure: --enable-systemd \ --disable-chown \ --disable-setcap-install \ + --with-shell-fixup=/usr/bin/bash \ --with-install-mode=probe # Installing/restarting systemd will conflict with the tmpusers being created @@ -33,11 +34,6 @@ sysusers: --name=ripe-atlas override_dh_installsystemd: - dh_installsystemd \ - -pripe-atlas-common \ - --no-enable \ - --no-start \ - --name=ripe-atlas override_dh_installtmpfiles: sysusers dh_installtmpfiles \ @@ -51,7 +47,7 @@ override_dh_auto_install: # Probe specific files added in other package rm -f debian/ripe-atlas-common/usr/libexec/ripe-atlas/scripts/reg_servers.sh.prod rm -f debian/ripe-atlas-common/usr/share/ripe-atlas/known_hosts.reg - + rm -f debian/ripe-atlas-common/usr/lib/systemd/system/ripe-atlas.service # The only tests that exist are in libevent and these # do not work override_dh_auto_test: diff --git a/debian/upstream/metadata b/debian/upstream/metadata index af5a2957..c390afd1 100644 --- a/debian/upstream/metadata +++ b/debian/upstream/metadata @@ -1,6 +1,6 @@ Bug-Database: https://github.com/RIPE-NCC/ripe-atlas-software-probe/issues Bug-Submit: https://github.com/RIPE-NCC/ripe-atlas-software-probe/issues/new -Changelog: https://github.com/RIPE-NCC/ripe-atlas-software-probe/blob/master/CHANGES -Documentation: https://github.com/RIPE-NCC/ripe-atlas-software-probe/wiki +Changelog: https://github.com/RIPE-NCC/ripe-atlas-software-probe/blob/master/CHANGES.rst +Documentation: https://atlas.ripe.net/docs/ Repository-Browse: https://github.com/RIPE-NCC/ripe-atlas-software-probe Repository: https://github.com/RIPE-NCC/ripe-atlas-software-probe.git diff --git a/debian/watch b/debian/watch index 6755e6a9..6d479cbf 100644 --- a/debian/watch +++ b/debian/watch @@ -2,5 +2,5 @@ version=4 # Direct Git. -opts="mode=git" http://git.example.com/ripe-atlas-software-probe.git \ +opts="mode=git" https://github.com/RIPE-NCC/ripe-atlas-software-probe.git \ refs/tags/v([\d\.]+) diff --git a/probe-busybox b/probe-busybox deleted file mode 160000 index d72872cc..00000000 --- a/probe-busybox +++ /dev/null @@ -1 +0,0 @@ -Subproject commit d72872cc6bb9ed5da47dcf1dc726fd8a36c6058f diff --git a/probe-busybox/.config b/probe-busybox/.config new file mode 100644 index 00000000..2bb2c5e4 --- /dev/null +++ b/probe-busybox/.config @@ -0,0 +1,213 @@ +# +# Automatically generated make config: don't edit +# Busybox version: 1.26.2 +# Wed Dec 14 02:15:43 2022 +# +CONFIG_HAVE_DOT_CONFIG=y + +# +# Busybox Settings +# +CONFIG_DESKTOP=y +# CONFIG_EXTRA_COMPAT is not set +CONFIG_INCLUDE_SUSv2=y +# CONFIG_USE_PORTABLE_CODE is not set +CONFIG_PLATFORM_LINUX=y +CONFIG_SHOW_USAGE=y +CONFIG_FEATURE_VERBOSE_USAGE=y +CONFIG_FEATURE_COMPRESS_USAGE=y +CONFIG_BUSYBOX=y +CONFIG_FEATURE_INSTALLER=y +# CONFIG_INSTALL_NO_USR is not set +# CONFIG_PAM is not set +CONFIG_LONG_OPTS=y +CONFIG_FEATURE_DEVPTS=y +# CONFIG_FEATURE_CLEAN_UP is not set +# CONFIG_FEATURE_UTMP is not set +# CONFIG_FEATURE_WTMP is not set +CONFIG_FEATURE_PIDFILE=y +CONFIG_PID_FILE_PATH="/var/run" +CONFIG_FEATURE_SUID=y +CONFIG_FEATURE_SUID_CONFIG=y +CONFIG_FEATURE_SUID_CONFIG_QUIET=y +# CONFIG_SELINUX is not set +# CONFIG_FEATURE_PREFER_APPLETS is not set +CONFIG_BUSYBOX_EXEC_PATH="/proc/self/exe" +CONFIG_FEATURE_SYSLOG=y +# CONFIG_FEATURE_HAVE_RPC is not set + +# +# Build Options +# +# CONFIG_STATIC is not set +# CONFIG_PIE is not set +# CONFIG_NOMMU is not set +# CONFIG_BUILD_LIBBUSYBOX is not set +# CONFIG_FEATURE_INDIVIDUAL is not set +# CONFIG_FEATURE_SHARED_BUSYBOX is not set +CONFIG_LFS=y +CONFIG_CROSS_COMPILER_PREFIX="" +CONFIG_SYSROOT="" +CONFIG_EXTRA_CFLAGS="-Ilibevent-2.1.11-stable/include" +CONFIG_EXTRA_LDFLAGS="" +CONFIG_EXTRA_LDLIBS="" + +# +# Installation Options ("make install" behavior) +# +CONFIG_INSTALL_APPLET_SYMLINKS=y +# CONFIG_INSTALL_APPLET_HARDLINKS is not set +# CONFIG_INSTALL_APPLET_SCRIPT_WRAPPERS is not set +# CONFIG_INSTALL_APPLET_DONT is not set +# CONFIG_INSTALL_SH_APPLET_SYMLINK is not set +# CONFIG_INSTALL_SH_APPLET_HARDLINK is not set +# CONFIG_INSTALL_SH_APPLET_SCRIPT_WRAPPER is not set +CONFIG_PREFIX="./_install" + +# +# Debugging Options +# +# CONFIG_DEBUG is not set +# CONFIG_DEBUG_PESSIMIZE is not set +# CONFIG_DEBUG_SANITIZE is not set +# CONFIG_UNIT_TEST is not set +# CONFIG_WERROR is not set +CONFIG_NO_DEBUG_LIB=y +# CONFIG_DMALLOC is not set +# CONFIG_EFENCE is not set + +# +# Busybox Library Tuning +# +# CONFIG_FEATURE_USE_BSS_TAIL is not set +CONFIG_FEATURE_BUFFERS_USE_MALLOC=y +# CONFIG_FEATURE_BUFFERS_GO_ON_STACK is not set +# CONFIG_FEATURE_BUFFERS_GO_IN_BSS is not set +CONFIG_PASSWORD_MINLEN=6 +CONFIG_MD5_SMALL=1 +CONFIG_SHA3_SMALL=1 +# CONFIG_FEATURE_FAST_TOP is not set +# CONFIG_FEATURE_ETC_NETWORKS is not set +# CONFIG_FEATURE_USE_TERMIOS is not set +CONFIG_FEATURE_EDITING=y +CONFIG_FEATURE_EDITING_MAX_LEN=1024 +# CONFIG_FEATURE_EDITING_VI is not set +CONFIG_FEATURE_EDITING_HISTORY=255 +CONFIG_FEATURE_EDITING_SAVEHISTORY=y +# CONFIG_FEATURE_EDITING_SAVE_ON_EXIT is not set +CONFIG_FEATURE_REVERSE_SEARCH=y +CONFIG_FEATURE_TAB_COMPLETION=y +CONFIG_FEATURE_USERNAME_COMPLETION=y +CONFIG_FEATURE_EDITING_FANCY_PROMPT=y +# CONFIG_FEATURE_EDITING_ASK_TERMINAL is not set +# CONFIG_LOCALE_SUPPORT is not set +CONFIG_UNICODE_SUPPORT=y +# CONFIG_UNICODE_USING_LOCALE is not set +# CONFIG_FEATURE_CHECK_UNICODE_IN_ENV is not set +CONFIG_SUBST_WCHAR=63 +CONFIG_LAST_SUPPORTED_WCHAR=767 +# CONFIG_UNICODE_COMBINING_WCHARS is not set +# CONFIG_UNICODE_WIDE_WCHARS is not set +# CONFIG_UNICODE_BIDI_SUPPORT is not set +# CONFIG_UNICODE_NEUTRAL_TABLE is not set +# CONFIG_UNICODE_PRESERVE_BROKEN is not set +CONFIG_FEATURE_NON_POSIX_CP=y +# CONFIG_FEATURE_VERBOSE_CP_MESSAGE is not set +CONFIG_FEATURE_USE_SENDFILE=y +CONFIG_FEATURE_COPYBUF_KB=4 +CONFIG_FEATURE_SKIP_ROOTFS=y +CONFIG_MONOTONIC_SYSCALL=y +CONFIG_IOCTL_HEX2STR_ERROR=y +CONFIG_FEATURE_HWIB=y + +# +# Applets +# + +# +# Archival Utilities +# +# CONFIG_FEATURE_SEAMLESS_XZ is not set +# CONFIG_FEATURE_SEAMLESS_LZMA is not set +# CONFIG_FEATURE_SEAMLESS_BZ2 is not set +# CONFIG_FEATURE_SEAMLESS_GZ is not set +# CONFIG_FEATURE_SEAMLESS_Z is not set +# CONFIG_UNCOMPRESS is not set +# CONFIG_GUNZIP is not set +# CONFIG_ZCAT is not set +# CONFIG_FEATURE_GUNZIP_LONG_OPTIONS is not set +# CONFIG_BUNZIP2 is not set +# CONFIG_BZCAT is not set +# CONFIG_UNLZMA is not set +# CONFIG_LZCAT is not set +# CONFIG_LZMA is not set +# CONFIG_FEATURE_LZMA_FAST is not set +# CONFIG_UNXZ is not set +# CONFIG_XZCAT is not set +# CONFIG_XZ is not set +# CONFIG_GZIP is not set +# CONFIG_FEATURE_GZIP_LONG_OPTIONS is not set +CONFIG_GZIP_FAST=0 +# CONFIG_FEATURE_GZIP_LEVELS is not set + +# +# Coreutils +# +CONFIG_BUDDYINFO=y +CONFIG_CONDMV=y +CONFIG_DATE=y +# CONFIG_FEATURE_DATE_ISOFMT is not set +# CONFIG_FEATURE_DATE_NANO is not set +# CONFIG_FEATURE_DATE_COMPAT is not set +CONFIG_DFRM=y + +# +# Common options +# +CONFIG_FEATURE_VERBOSE=y +# CONFIG_FEATURE_PRESERVE_HARDLINKS is not set +# CONFIG_FEATURE_AUTOWIDTH is not set +# CONFIG_FEATURE_HUMAN_READABLE is not set + +# +# Eperd +# +CONFIG_EOOQD=y +CONFIG_EPERD=y +CONFIG_EVHTTPGET=y +CONFIG_FEATURE_EVHTTPGET_HTTPS=y +CONFIG_EVNTP=y +CONFIG_EVPING=y +CONFIG_EVSSLGETCERT=y +CONFIG_EVTDIG=y +CONFIG_FEATURE_EVTDIG_TLS=y +# CONFIG_FEATURE_EVTDIG_DEBUG is not set +CONFIG_EVTLSGETCERT=y +# CONFIG_EVTLSSCAN is not set +CONFIG_EVTRACEROUTE=y + +# +# Miscellaneous Utilities +# +CONFIG_ONLYUPTIME=y +CONFIG_PERD=y +CONFIG_FEATURE_PERD_D=y +CONFIG_RCHOOSE=y +CONFIG_RPTUPTIME=y + +# +# Networking Utilities +# +CONFIG_FEATURE_IPV6=y +# CONFIG_FEATURE_UNIX_LOCAL is not set +CONFIG_FEATURE_PREFER_IPV4_ADDRESS=y +# CONFIG_VERBOSE_RESOLUTION_ERRORS is not set +CONFIG_ATLASINIT=y +CONFIG_HTTPPOST=y +CONFIG_RPTADDRS=y +CONFIG_RPTRA6=y +CONFIG_RXTXRPT=y +CONFIG_TELNETD=y +CONFIG_FEATURE_TELNETD_STANDALONE=y +CONFIG_FEATURE_TELNETD_INETD_WAIT=y +CONFIG_IFUPDOWN_UDHCPC_CMD_OPTIONS="" diff --git a/probe-busybox/AUTHORS b/probe-busybox/AUTHORS new file mode 100644 index 00000000..320f7979 --- /dev/null +++ b/probe-busybox/AUTHORS @@ -0,0 +1,186 @@ +List of the authors of code contained in BusyBox. + +If you have code in BusyBox, you should be listed here. If you should be +listed, or the description of what you have done needs more detail, or is +incorrect, _please_ let me know. + + -Erik + +----------- + +Peter Willis + eject + +Emanuele Aina + run-parts + +Erik Andersen + Tons of new stuff, major rewrite of most of the + core apps, tons of new apps as noted in header files. + Lots of tedious effort writing these boring docs that + nobody is going to actually read. + +Laurence Anderson + rpm2cpio, unzip, get_header_cpio, read_gz interface, rpm + +Jeff Angielski + ftpput, ftpget + +Enrik Berkhan + setconsole + +Jim Bauer + modprobe shell dependency + +Edward Betts + expr, hostid, logname, whoami + +John Beppu + du, nslookup, sort + +David Brownell + zcip + +Brian Candler + tiny-ls(ls) + +Randolph Chung + fbset, ping, hostname + +Dave Cinege + more(v2), makedevs, dutmp, modularization, auto links file, + various fixes, Linux Router Project maintenance + +Jordan Crouse + ipcalc + +Magnus Damm + tftp client + insmod powerpc support + +Larry Doolittle + pristine source directory compilation, lots of patches and fixes. + +Glenn Engel + httpd + +Gennady Feldman + Sysklogd (single threaded syslogd, IPC Circular buffer support, + logread), various fixes. + +Robert Griebl + modprobe, hwclock, suid/sgid handling, tinylogin integration + many bugfixes and enhancements + +Karl M. Hegbloom + cp_mv.c, the test suite, various fixes to utility.c, &c. + +Daniel Jacobowitz + mktemp.c + +Matt Kraai + documentation, bugfixes, test suite + +Rob Landley + Became busybox maintainer in 2006. + + sed (major rewrite in 2003, and I now maintain the thing) + bunzip2 (complete from-scratch rewrite, then mjn3 optimized the result) + sort (more or less from scratch rewrite in 2004, I now maintain it) + mount (rewrite in 2005, I maintain the new one) + +Stephan Linz + ipcalc, Red Hat equivalence + +John Lombardo + tr + +Glenn McGrath + Common unarchiving code and unarchiving applets, ifupdown, ftpgetput, + nameif, sed, patch, fold, install, uudecode. + Various bugfixes, review and apply numerous patches. + +Manuel Novoa III + cat, head, mkfifo, mknod, rmdir, sleep, tee, tty, uniq, usleep, wc, yes, + mesg, vconfig, nice, renice, + make_directory, parse_mode, dirname, mode_string, + get_last_path_component, simplify_path, and a number trivial libbb routines + + also bug fixes, partial rewrites, and size optimizations in + ash, basename, cal, cmp, cp, df, du, echo, env, ln, logname, md5sum, mkdir, + mv, realpath, rm, sort, tail, touch, uname, watch, arith, human_readable, + interface, dutmp, ifconfig, route + +Vladimir Oleynik + cmdedit; bb_mkdep, xargs(current), httpd(current); + ports: ash, crond, fdisk (initial, unmaintained now), inetd, stty, traceroute, + top; + locale, various fixes + and irreconcilable critic of everything not perfect. + +Bruce Perens + Original author of BusyBox in 1995, 1996. Some of his code can + still be found hiding here and there... + +Rodney Radford + ipcs, ipcrm + +Tim Riker + bug fixes, member of fan club + +Kent Robotti + reset, tons and tons of bug reports and patches. + +Chip Rosenthal , + wget - Contributed by permission of Covad Communications + +Pavel Roskin + Lots of bugs fixes and patches. + +Gyepi Sam + Remote logging feature for syslogd + +Rob Sullivan + comm + +Linus Torvalds + mkswap, fsck.minix, mkfs.minix + +Linus Walleij + fbset and fbsplash config RGBA parsing + rewrite of mdev helper to create devices from /sys/dev + +Mark Whitley + grep, sed, cut, xargs(previous), + style-guide, new-applet-HOWTO, bug fixes, etc. + +Charles P. Wright + gzip, mini-netcat(nc) + +Enrique Zanardi + tarcat (since removed), loadkmap, various fixes, Debian maintenance + +Tito Ragusa + devfsd and size optimizations in strings, openvt, chvt, deallocvt, hdparm, + fdformat, lsattr, chattr, id and eject. + +Paul Fox + vi editing mode for ash, various other patches/fixes + +Roberto A. Foglietta + port: dnsd + +Bernhard Reutner-Fischer + misc + +Mike Frysinger + initial e2fsprogs, printenv, setarch, sum, misc + +Jie Zhang + fixed two bugs in msh and hush (exitcode of killed processes) + +Philip Homburg + eperd evtraceroute evping perd condmv httpget httppost + +Antony Antony + evtdig buddyinfo findpid diff --git a/probe-busybox/Config.in b/probe-busybox/Config.in new file mode 100644 index 00000000..9ee04cff --- /dev/null +++ b/probe-busybox/Config.in @@ -0,0 +1,700 @@ +# +# For a description of the syntax of this configuration file, +# see scripts/kbuild/config-language.txt. +# + +mainmenu "BusyBox Configuration" + +config HAVE_DOT_CONFIG + bool + default y + +menu "Busybox Settings" + +config DESKTOP + bool "Enable options for full-blown desktop systems" + default y + help + Enable options and features which are not essential. + Select this if you plan to use busybox on full-blown desktop machine + with common Linux distro, which needs higher level of command-line + compatibility. + + If you are preparing your build to be used on an embedded box + where you have tighter control over the entire set of userspace + tools, you can unselect this option for smaller code size. + +config EXTRA_COMPAT + bool "Provide compatible behavior for rare corner cases (bigger code)" + default n + help + This option makes grep, sed etc handle rare corner cases + (embedded NUL bytes and such). This makes code bigger and uses + some GNU extensions in libc. You probably only need this option + if you plan to run busybox on desktop. + +config INCLUDE_SUSv2 + bool "Enable obsolete features removed before SUSv3" + default y + help + This option will enable backwards compatibility with SuSv2, + specifically, old-style numeric options ('command -1 ') + will be supported in head, tail, and fold. (Note: should + affect renice too.) + +config USE_PORTABLE_CODE + bool "Avoid using GCC-specific code constructs" + default n + help + Use this option if you are trying to compile busybox with + compiler other than gcc. + If you do use gcc, this option may needlessly increase code size. + +config PLATFORM_LINUX + bool "Enable Linux-specific applets and features" + default y + help + For the most part, busybox requires only POSIX compatibility + from the target system, but some applets and features use + Linux-specific interfaces. + + Answering 'N' here will disable such applets and hide the + corresponding configuration options. + +config SHOW_USAGE + bool "Show applet usage messages" + default y + help + Enabling this option, BusyBox applets will show terse help messages + when invoked with wrong arguments. + If you do not want to show any (helpful) usage message when + issuing wrong command syntax, you can say 'N' here, + saving approximately 7k. + +config FEATURE_VERBOSE_USAGE + bool "Show verbose applet usage messages" + default y + depends on SHOW_USAGE + help + All BusyBox applets will show verbose help messages when + busybox is invoked with --help. This will add a lot of text to the + busybox binary. In the default configuration, this will add about + 13k, but it can add much more depending on your configuration. + +config FEATURE_COMPRESS_USAGE + bool "Store applet usage messages in compressed form" + default y + depends on SHOW_USAGE + help + Store usage messages in .bz compressed form, uncompress them + on-the-fly when --help is called. + + If you have a really tiny busybox with few applets enabled (and + bunzip2 isn't one of them), the overhead of the decompressor might + be noticeable. Also, if you run executables directly from ROM + and have very little memory, this might not be a win. Otherwise, + you probably want this. + +config BUSYBOX + bool "Include busybox applet" + default y + help + The busybox applet provides general help regarding busybox and + allows the included applets to be listed. It's also required + if applet links are to be installed at runtime. + + If you can live without these features disabling this will save + some space. + +config FEATURE_INSTALLER + bool "Support --install [-s] to install applet links at runtime" + default y + depends on BUSYBOX + help + Enable 'busybox --install [-s]' support. This will allow you to use + busybox at runtime to create hard links or symlinks for all the + applets that are compiled into busybox. + +config INSTALL_NO_USR + bool "Don't use /usr" + default n + help + Disable use of /usr. busybox --install and "make install" + will install applets only to /bin and /sbin, + never to /usr/bin or /usr/sbin. + +config PAM + bool "Support for PAM (Pluggable Authentication Modules)" + default n + help + Use PAM in some busybox applets (currently login and httpd) instead + of direct access to password database. + +config LONG_OPTS + bool "Support for --long-options" + default y + help + Enable this if you want busybox applets to use the gnu --long-option + style, in addition to single character -a -b -c style options. + +config FEATURE_DEVPTS + bool "Use the devpts filesystem for Unix98 PTYs" + default y + help + Enable if you want BusyBox to use Unix98 PTY support. If enabled, + busybox will use /dev/ptmx for the master side of the pseudoterminal + and /dev/pts/ for the slave side. Otherwise, BSD style + /dev/ttyp will be used. To use this option, you should have + devpts mounted. + +config FEATURE_CLEAN_UP + bool "Clean up all memory before exiting (usually not needed)" + default n + help + As a size optimization, busybox normally exits without explicitly + freeing dynamically allocated memory or closing files. This saves + space since the OS will clean up for us, but it can confuse debuggers + like valgrind, which report tons of memory and resource leaks. + + Don't enable this unless you have a really good reason to clean + things up manually. + +config FEATURE_UTMP + bool "Support utmp file" + default y + help + The file /var/run/utmp is used to track who is currently logged in. + With this option on, certain applets (getty, login, telnetd etc) + will create and delete entries there. + "who" applet requires this option. + +config FEATURE_WTMP + bool "Support wtmp file" + default y + depends on FEATURE_UTMP + help + The file /var/run/wtmp is used to track when users have logged into + and logged out of the system. + With this option on, certain applets (getty, login, telnetd etc) + will append new entries there. + "last" applet requires this option. + +config FEATURE_PIDFILE + bool "Support writing pidfiles" + default y + help + This option makes some applets (e.g. crond, syslogd, inetd) write + a pidfile at the configured PID_FILE_PATH. It has no effect + on applets which require pidfiles to run. + +config PID_FILE_PATH + string "Path to directory for pidfile" + default "/var/run" + depends on FEATURE_PIDFILE + help + This is the default path where pidfiles are created. Applets which + allow you to set the pidfile path on the command line will override + this value. The option has no effect on applets that require you to + specify a pidfile path. + +config FEATURE_SUID + bool "Support for SUID/SGID handling" + default y + help + With this option you can install the busybox binary belonging + to root with the suid bit set, enabling some applets to perform + root-level operations even when run by ordinary users + (for example, mounting of user mounts in fstab needs this). + + Busybox will automatically drop privileges for applets + that don't need root access. + + If you are really paranoid and don't want to do this, build two + busybox binaries with different applets in them (and the appropriate + symlinks pointing to each binary), and only set the suid bit on the + one that needs it. + + The applets which require root rights (need suid bit or + to be run by root) and will refuse to execute otherwise: + crontab, login, passwd, su, vlock, wall. + + The applets which will use root rights if they have them + (via suid bit, or because run by root), but would try to work + without root right nevertheless: + findfs, ping[6], traceroute[6], mount. + + Note that if you DONT select this option, but DO make busybox + suid root, ALL applets will run under root, which is a huge + security hole (think "cp /some/file /etc/passwd"). + +config FEATURE_SUID_CONFIG + bool "Runtime SUID/SGID configuration via /etc/busybox.conf" + default y + depends on FEATURE_SUID + help + Allow the SUID / SGID state of an applet to be determined at runtime + by checking /etc/busybox.conf. (This is sort of a poor man's sudo.) + The format of this file is as follows: + + APPLET = [Ssx-][Ssx-][x-] [USER.GROUP] + + s: USER or GROUP is allowed to execute APPLET. + APPLET will run under USER or GROUP + (reagardless of who's running it). + S: USER or GROUP is NOT allowed to execute APPLET. + APPLET will run under USER or GROUP. + This option is not very sensical. + x: USER/GROUP/others are allowed to execute APPLET. + No UID/GID change will be done when it is run. + -: USER/GROUP/others are not allowed to execute APPLET. + + An example might help: + + [SUID] + su = ssx root.0 # applet su can be run by anyone and runs with + # euid=0/egid=0 + su = ssx # exactly the same + + mount = sx- root.disk # applet mount can be run by root and members + # of group disk (but not anyone else) + # and runs with euid=0 (egid is not changed) + + cp = --- # disable applet cp for everyone + + The file has to be owned by user root, group root and has to be + writeable only by root: + (chown 0.0 /etc/busybox.conf; chmod 600 /etc/busybox.conf) + The busybox executable has to be owned by user root, group + root and has to be setuid root for this to work: + (chown 0.0 /bin/busybox; chmod 4755 /bin/busybox) + + Robert 'sandman' Griebl has more information here: + . + +config FEATURE_SUID_CONFIG_QUIET + bool "Suppress warning message if /etc/busybox.conf is not readable" + default y + depends on FEATURE_SUID_CONFIG + help + /etc/busybox.conf should be readable by the user needing the SUID, + check this option to avoid users to be notified about missing + permissions. + +config SELINUX + bool "Support NSA Security Enhanced Linux" + default n + select PLATFORM_LINUX + help + Enable support for SELinux in applets ls, ps, and id. Also provide + the option of compiling in SELinux applets. + + If you do not have a complete SELinux userland installed, this stuff + will not compile. Specifially, libselinux 1.28 or better is + directly required by busybox. If the installation is located in a + non-standard directory, provide it by invoking make as follows: + CFLAGS=-I \ + LDFLAGS=-L \ + make + + Most people will leave this set to 'N'. + +config FEATURE_PREFER_APPLETS + bool "exec prefers applets" + default n + help + This is an experimental option which directs applets about to + call 'exec' to try and find an applicable busybox applet before + searching the PATH. This is typically done by exec'ing + /proc/self/exe. + This may affect shell, find -exec, xargs and similar applets. + They will use applets even if /bin/ -> busybox link + is missing (or is not a link to busybox). However, this causes + problems in chroot jails without mounted /proc and with ps/top + (command name can be shown as 'exe' for applets started this way). + +config BUSYBOX_EXEC_PATH + string "Path to BusyBox executable" + default "/proc/self/exe" + help + When Busybox applets need to run other busybox applets, BusyBox + sometimes needs to exec() itself. When the /proc filesystem is + mounted, /proc/self/exe always points to the currently running + executable. If you haven't got /proc, set this to wherever you + want to run BusyBox from. + +# These are auto-selected by other options + +config FEATURE_SYSLOG + bool #No description makes it a hidden option + default n + #help + # This option is auto-selected when you select any applet which may + # send its output to syslog. You do not need to select it manually. + +config FEATURE_HAVE_RPC + bool #No description makes it a hidden option + default n + #help + # This is automatically selected if any of enabled applets need it. + # You do not need to select it manually. + +comment 'Build Options' + +config STATIC + bool "Build BusyBox as a static binary (no shared libs)" + default n + help + If you want to build a static BusyBox binary, which does not + use or require any shared libraries, then enable this option. + This can cause BusyBox to be considerably larger, so you should + leave this option false unless you have a good reason (i.e. + your target platform does not support shared libraries, or + you are building an initrd which doesn't need anything but + BusyBox, etc). + + Most people will leave this set to 'N'. + +config PIE + bool "Build BusyBox as a position independent executable" + default n + depends on !STATIC + help + Hardened code option. PIE binaries are loaded at a different + address at each invocation. This has some overhead, + particularly on x86-32 which is short on registers. + + Most people will leave this set to 'N'. + +config NOMMU + bool "Force NOMMU build" + default n + help + Busybox tries to detect whether architecture it is being + built against supports MMU or not. If this detection fails, + or if you want to build NOMMU version of busybox for testing, + you may force NOMMU build here. + + Most people will leave this set to 'N'. + +# PIE can be made to work with BUILD_LIBBUSYBOX, but currently +# build system does not support that +config BUILD_LIBBUSYBOX + bool "Build shared libbusybox" + default n + depends on !FEATURE_PREFER_APPLETS && !PIE && !STATIC + help + Build a shared library libbusybox.so.N.N.N which contains all + busybox code. + + This feature allows every applet to be built as a tiny + separate executable. Enabling it for "one big busybox binary" + approach serves no purpose and increases code size. + You should almost certainly say "no" to this. + +### config FEATURE_FULL_LIBBUSYBOX +### bool "Feature-complete libbusybox" +### default n if !FEATURE_SHARED_BUSYBOX +### depends on BUILD_LIBBUSYBOX +### help +### Build a libbusybox with the complete feature-set, disregarding +### the actually selected config. +### +### Normally, libbusybox will only contain the features which are +### used by busybox itself. If you plan to write a separate +### standalone application which uses libbusybox say 'Y'. +### +### Note: libbusybox is GPL, not LGPL, and exports no stable API that +### might act as a copyright barrier. We can and will modify the +### exported function set between releases (even minor version number +### changes), and happily break out-of-tree features. +### +### Say 'N' if in doubt. + +config FEATURE_INDIVIDUAL + bool "Produce a binary for each applet, linked against libbusybox" + default y + depends on BUILD_LIBBUSYBOX + help + If your CPU architecture doesn't allow for sharing text/rodata + sections of running binaries, but allows for runtime dynamic + libraries, this option will allow you to reduce memory footprint + when you have many different applets running at once. + + If your CPU architecture allows for sharing text/rodata, + having single binary is more optimal. + + Each applet will be a tiny program, dynamically linked + against libbusybox.so.N.N.N. + + You need to have a working dynamic linker. + +config FEATURE_SHARED_BUSYBOX + bool "Produce additional busybox binary linked against libbusybox" + default y + depends on BUILD_LIBBUSYBOX + help + Build busybox, dynamically linked against libbusybox.so.N.N.N. + + You need to have a working dynamic linker. + +### config BUILD_AT_ONCE +### bool "Compile all sources at once" +### default n +### help +### Normally each source-file is compiled with one invocation of +### the compiler. +### If you set this option, all sources are compiled at once. +### This gives the compiler more opportunities to optimize which can +### result in smaller and/or faster binaries. +### +### Setting this option will consume alot of memory, e.g. if you +### enable all applets with all features, gcc uses more than 300MB +### RAM during compilation of busybox. +### +### This option is most likely only beneficial for newer compilers +### such as gcc-4.1 and above. +### +### Say 'N' unless you know what you are doing. + +config LFS + bool "Build with Large File Support (for accessing files > 2 GB)" + default y + help + If you want to build BusyBox with large file support, then enable + this option. This will have no effect if your kernel or your C + library lacks large file support for large files. Some of the + programs that can benefit from large file support include dd, gzip, + cp, mount, tar, and many others. If you want to access files larger + than 2 Gigabytes, enable this option. Otherwise, leave it set to 'N'. + +config CROSS_COMPILER_PREFIX + string "Cross Compiler prefix" + default "" + help + If you want to build BusyBox with a cross compiler, then you + will need to set this to the cross-compiler prefix, for example, + "i386-uclibc-". + + Note that CROSS_COMPILE environment variable or + "make CROSS_COMPILE=xxx ..." will override this selection. + + Native builds leave this empty. + +config SYSROOT + string "Path to sysroot" + default "" + help + If you want to build BusyBox with a cross compiler, then you + might also need to specify where /usr/include and /usr/lib + will be found. + + For example, BusyBox can be built against an installed + Android NDK, platform version 9, for ARM ABI with + + CONFIG_SYSROOT=/opt/android-ndk/platforms/android-9/arch-arm + + Native builds leave this empty. + +config EXTRA_CFLAGS + string "Additional CFLAGS" + default "" + help + Additional CFLAGS to pass to the compiler verbatim. + +config EXTRA_LDFLAGS + string "Additional LDFLAGS" + default "" + help + Additional LDFLAGS to pass to the linker verbatim. + +config EXTRA_LDLIBS + string "Additional LDLIBS" + default "" + help + Additional LDLIBS to pass to the linker with -l. + +comment 'Installation Options ("make install" behavior)' + +choice + prompt "What kind of applet links to install" + default INSTALL_APPLET_SYMLINKS + help + Choose what kind of links to applets are created by "make install". + +config INSTALL_APPLET_SYMLINKS + bool "as soft-links" + help + Install applets as soft-links to the busybox binary. This needs some + free inodes on the filesystem, but might help with filesystem + generators that can't cope with hard-links. + +config INSTALL_APPLET_HARDLINKS + bool "as hard-links" + help + Install applets as hard-links to the busybox binary. This might + count on a filesystem with few inodes. + +config INSTALL_APPLET_SCRIPT_WRAPPERS + bool "as script wrappers" + help + Install applets as script wrappers that call the busybox binary. + +config INSTALL_APPLET_DONT + bool "not installed" + help + Do not install applet links. Useful when you plan to use + busybox --install for installing links, or plan to use + a standalone shell and thus don't need applet links. + +endchoice + +choice + prompt "/bin/sh applet link" + default INSTALL_SH_APPLET_SYMLINK + depends on INSTALL_APPLET_SCRIPT_WRAPPERS + help + Choose how you install /bin/sh applet link. + +config INSTALL_SH_APPLET_SYMLINK + bool "as soft-link" + help + Install /bin/sh applet as soft-link to the busybox binary. + +config INSTALL_SH_APPLET_HARDLINK + bool "as hard-link" + help + Install /bin/sh applet as hard-link to the busybox binary. + +config INSTALL_SH_APPLET_SCRIPT_WRAPPER + bool "as script wrapper" + help + Install /bin/sh applet as script wrapper that calls + the busybox binary. + +endchoice + +config PREFIX + string "BusyBox installation prefix" + default "./_install" + help + Define your directory to install BusyBox files/subdirs in. + +comment 'Debugging Options' + +config DEBUG + bool "Build BusyBox with extra Debugging symbols" + default n + help + Say Y here if you wish to examine BusyBox internals while applets are + running. This increases the size of the binary considerably, and + should only be used when doing development. If you are doing + development and want to debug BusyBox, answer Y. + + Most people should answer N. + +config DEBUG_PESSIMIZE + bool "Disable compiler optimizations" + default n + depends on DEBUG + help + The compiler's optimization of source code can eliminate and reorder + code, resulting in an executable that's hard to understand when + stepping through it with a debugger. This switches it off, resulting + in a much bigger executable that more closely matches the source + code. + +config DEBUG_SANITIZE + bool "Enable runtime sanitizers (ASAN/LSAN/USAN/etc...)" + default n + help + Say Y here if you want to enable runtime sanitizers. These help + catch bad memory accesses (e.g. buffer overflows), but will make + the executable larger and slow down runtime a bit. + + If you aren't developing/testing busybox, say N here. + +config UNIT_TEST + bool "Build unit tests" + default n + help + Say Y here if you want to build unit tests (both the framework and + test cases) as a Busybox applet. This results in bigger code, so you + probably don't want this option in production builds. + +config WERROR + bool "Abort compilation on any warning" + default n + help + Selecting this will add -Werror to gcc command line. + + Most people should answer N. + +choice + prompt "Additional debugging library" + default NO_DEBUG_LIB + help + Using an additional debugging library will make BusyBox become + considerable larger and will cause it to run more slowly. You + should always leave this option disabled for production use. + + dmalloc support: + ---------------- + This enables compiling with dmalloc ( http://dmalloc.com/ ) + which is an excellent public domain mem leak and malloc problem + detector. To enable dmalloc, before running busybox you will + want to properly set your environment, for example: + export DMALLOC_OPTIONS=debug=0x34f47d83,inter=100,log=logfile + The 'debug=' value is generated using the following command + dmalloc -p log-stats -p log-non-free -p log-bad-space \ + -p log-elapsed-time -p check-fence -p check-heap \ + -p check-lists -p check-blank -p check-funcs -p realloc-copy \ + -p allow-free-null + + Electric-fence support: + ----------------------- + This enables compiling with Electric-fence support. Electric + fence is another very useful malloc debugging library which uses + your computer's virtual memory hardware to detect illegal memory + accesses. This support will make BusyBox be considerable larger + and run slower, so you should leave this option disabled unless + you are hunting a hard to find memory problem. + + +config NO_DEBUG_LIB + bool "None" + +config DMALLOC + bool "Dmalloc" + +config EFENCE + bool "Electric-fence" + +endchoice + +endmenu + +source libbb/Config.in + +comment "Applets" + +source archival/Config.in +source coreutils/Config.in +#source console-tools/Config.in +#source debianutils/Config.in +#source editors/Config.in +source eperd/Config.in +#source findutils/Config.in +#source init/Config.in +#source loginutils/Config.in +#source e2fsprogs/Config.in +#source modutils/Config.in +#source util-linux/Config.in +source miscutils/Config.in +source networking/Config.in +#source printutils/Config.in +#source mailutils/Config.in +#source procps/Config.in +#source runit/Config.in +#source selinux/Config.in +#source shell/Config.in +#source sysklogd/Config.in diff --git a/probe-busybox/INSTALL b/probe-busybox/INSTALL new file mode 100644 index 00000000..20de22fd --- /dev/null +++ b/probe-busybox/INSTALL @@ -0,0 +1,147 @@ +Building: +========= + +The BusyBox build process is similar to the Linux kernel build: + + cd libevent-2.1.11-stable # first build libevent + autoreconf --install + ./configure --prefix=/usr/local/atlas + make install + cd ../ + make menuconfig # This creates a file called ".config" + make # This creates the "busybox" executable + make CONFIG_PREFIX=/usr/local//atlas/bb-13.3 install #RIPE NCC atlas install + +The full list of configuration and install options is available by typing: + + make help + +Quick Start: +============ + +The easy way to try out BusyBox for the first time, without having to install +it, is to enable all features and then use "standalone shell" mode with a +blank command $PATH. + +To enable all features, use "make defconfig", which produces the largest +general-purpose configuration. It's allyesconfig minus debugging options, +optional packaging choices, and a few special-purpose features requiring +extra configuration to use. Then enable "standalone shell" feature: + + make defconfig + make menuconfig + # select Busybox Settings + # then General Configuration + # then exec prefers applets + # exit back to top level menu + # select Shells + # then Standalone shell + # exit back to top level menu + # exit and save new configuration + # OR + # use these commands to modify .config directly: + sed -e 's/.*FEATURE_PREFER_APPLETS.*/CONFIG_FEATURE_PREFER_APPLETS=y/' -i .config + sed -e 's/.*FEATURE_SH_STANDALONE.*/CONFIG_FEATURE_SH_STANDALONE=y/' -i .config + make + PATH= ./busybox ash + +Standalone shell mode causes busybox's built-in command shell to run +any built-in busybox applets directly, without looking for external +programs by that name. Supplying an empty command path (as above) means +the only commands busybox can find are the built-in ones. + +Note that the standalone shell requires CONFIG_BUSYBOX_EXEC_PATH +to be set appropriately, depending on whether or not /proc/self/exe is +available. If you do not have /proc, then point that config option +to the location of your busybox binary, usually /bin/busybox. +Another solution is to patch the kernel (see +examples/linux-*_proc_self_exe.patch) to make exec("/proc/self/exe") +always work. + +Configuring Busybox: +==================== + +Busybox is optimized for size, but enabling the full set of functionality +still results in a fairly large executable -- more than 1 megabyte when +statically linked. To save space, busybox can be configured with only the +set of applets needed for each environment. The minimal configuration, with +all applets disabled, produces a 4k executable. (It's useless, but very small.) + +The manual configurator "make menuconfig" modifies the existing configuration. +(For systems without ncurses, try "make config" instead.) The two most +interesting starting configurations are "make allnoconfig" (to start with +everything disabled and add just what you need), and "make defconfig" (to +start with everything enabled and remove what you don't need). If menuconfig +is run without an existing configuration, make defconfig will run first to +create a known starting point. + +Other starting configurations (mostly used for testing purposes) include +"make allbareconfig" (enables all applets but disables all optional features), +"make allyesconfig" (enables absolutely everything including debug features), +and "make randconfig" (produce a random configuration). The configs/ directory +contains a number of additional configuration files ending in _defconfig which +are useful in specific cases. "make help" will list them. + +Configuring BusyBox produces a file ".config", which can be saved for future +use. Run "make oldconfig" to bring a .config file from an older version of +busybox up to date. + +Installing Busybox: +=================== + +Busybox is a single executable that can behave like many different commands, +and BusyBox uses the name it was invoked under to determine the desired +behavior. (Try "mv busybox ls" and then "./ls -l".) + +Installing busybox consists of creating symlinks (or hardlinks) to the busybox +binary for each applet enabled in busybox, and making sure these symlinks are +in the shell's command $PATH. Running "make install" creates these symlinks, +or "make install-hardlinks" creates hardlinks instead (useful on systems with +a limited number of inodes). This install process uses the file +"busybox.links" (created by make), which contains the list of enabled applets +and the path at which to install them. + +Installing links to busybox is not always necessary. The special applet name +"busybox" (or with any optional suffix, such as "busybox-static") uses the +first argument to determine which applet to behave as, for example +"./busybox cat LICENSE". (Running the busybox applet with no arguments gives +a list of all enabled applets.) The standalone shell can also call busybox +applets without links to busybox under other names in the filesystem. You can +also configure a standalone install capability into the busybox base applet, +and then install such links at runtime with one of "busybox --install" (for +hardlinks) or "busybox --install -s" (for symlinks). + +If you enabled the busybox shared library feature (libbusybox.so) and want +to run tests without installing, set your LD_LIBRARY_PATH accordingly when +running the executable: + + LD_LIBRARY_PATH=`pwd` ./busybox + +Building out-of-tree: +===================== + +By default, the BusyBox build puts its temporary files in the source tree. +Building from a read-only source tree, or building multiple configurations from +the same source directory, requires the ability to put the temporary files +somewhere else. + +To build out of tree, cd to an empty directory and configure busybox from there: + + make KBUILD_SRC=/path/to/source -f /path/to/source/Makefile defconfig + make + make install + +Alternately, use the O=$BUILDPATH option (with an absolute path) during the +configuration step, as in: + + make O=/some/empty/directory allyesconfig + cd /some/empty/directory + make + make CONFIG_PREFIX=. install + +More Information: +================= + +Se also the busybox FAQ, under the questions "How can I get started using +BusyBox" and "How do I build a BusyBox-based system?" The BusyBox FAQ is +available from http://www.busybox.net/FAQ.html diff --git a/probe-busybox/LICENSE b/probe-busybox/LICENSE new file mode 100644 index 00000000..6f50a716 --- /dev/null +++ b/probe-busybox/LICENSE @@ -0,0 +1,348 @@ +--- A note on GPL versions + +BusyBox is distributed under version 2 of the General Public License (included +in its entirety, below). Version 2 is the only version of this license which +this version of BusyBox (or modified versions derived from this one) may be +distributed under. + +------------------------------------------------------------------------ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/probe-busybox/Makefile b/probe-busybox/Makefile new file mode 100644 index 00000000..64be6d7f --- /dev/null +++ b/probe-busybox/Makefile @@ -0,0 +1,1343 @@ +VERSION = 1 +PATCHLEVEL = 26 +SUBLEVEL = 2 +EXTRAVERSION = +NAME = Unnamed + +# *DOCUMENTATION* +# To see a list of typical targets execute "make help" +# More info can be located in ./README +# Comments in this file are targeted only to the developer, do not +# expect to learn how to build the kernel reading this file. + +# Do not print "Entering directory ..." +MAKEFLAGS += --no-print-directory + +# We are using a recursive build, so we need to do a little thinking +# to get the ordering right. +# +# Most importantly: sub-Makefiles should only ever modify files in +# their own directory. If in some directory we have a dependency on +# a file in another dir (which doesn't happen often, but it's often +# unavoidable when linking the built-in.o targets which finally +# turn into busybox), we will call a sub make in that other dir, and +# after that we are sure that everything which is in that other dir +# is now up to date. +# +# The only cases where we need to modify files which have global +# effects are thus separated out and done before the recursive +# descending is started. They are now explicitly listed as the +# prepare rule. + +# To put more focus on warnings, be less verbose as default +# Use 'make V=1' to see the full commands + +ifdef V + ifeq ("$(origin V)", "command line") + KBUILD_VERBOSE = $(V) + endif +endif +ifndef KBUILD_VERBOSE + KBUILD_VERBOSE = 0 +endif + +# Call sparse as part of compilation of C files +# Use 'make C=1' to enable sparse checking + +ifdef C + ifeq ("$(origin C)", "command line") + KBUILD_CHECKSRC = $(C) + endif +endif +ifndef KBUILD_CHECKSRC + KBUILD_CHECKSRC = 0 +endif + +# Use make M=dir to specify directory of external module to build +# Old syntax make ... SUBDIRS=$PWD is still supported +# Setting the environment variable KBUILD_EXTMOD take precedence +ifdef SUBDIRS + KBUILD_EXTMOD ?= $(SUBDIRS) +endif +ifdef M + ifeq ("$(origin M)", "command line") + KBUILD_EXTMOD := $(M) + endif +endif + + +# kbuild supports saving output files in a separate directory. +# To locate output files in a separate directory two syntaxes are supported. +# In both cases the working directory must be the root of the kernel src. +# 1) O= +# Use "make O=dir/to/store/output/files/" +# +# 2) Set KBUILD_OUTPUT +# Set the environment variable KBUILD_OUTPUT to point to the directory +# where the output files shall be placed. +# export KBUILD_OUTPUT=dir/to/store/output/files/ +# make +# +# The O= assignment takes precedence over the KBUILD_OUTPUT environment +# variable. + + +# KBUILD_SRC is set on invocation of make in OBJ directory +# KBUILD_SRC is not intended to be used by the regular user (for now) +ifeq ($(KBUILD_SRC),) + +# OK, Make called in directory where kernel src resides +# Do we want to locate output files in a separate directory? +ifdef O + ifeq ("$(origin O)", "command line") + KBUILD_OUTPUT := $(O) + endif +endif + +# That's our default target when none is given on the command line +PHONY := _all +_all: + +ifneq ($(KBUILD_OUTPUT),) +# Invoke a second make in the output directory, passing relevant variables +# check that the output directory actually exists +saved-output := $(KBUILD_OUTPUT) +KBUILD_OUTPUT := $(shell cd $(KBUILD_OUTPUT) && /bin/pwd) +$(if $(KBUILD_OUTPUT),, \ + $(error output directory "$(saved-output)" does not exist)) + +PHONY += $(MAKECMDGOALS) + +$(filter-out _all,$(MAKECMDGOALS)) _all: + $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \ + KBUILD_SRC=$(CURDIR) \ + KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $@ + +# Leave processing to above invocation of make +skip-makefile := 1 +endif # ifneq ($(KBUILD_OUTPUT),) +endif # ifeq ($(KBUILD_SRC),) + +# We process the rest of the Makefile if this is the final invocation of make +ifeq ($(skip-makefile),) + +# If building an external module we do not care about the all: rule +# but instead _all depend on modules +PHONY += all +ifeq ($(KBUILD_EXTMOD),) +_all: all +else +_all: modules +endif + +srctree := $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR)) +TOPDIR := $(srctree) +# FIXME - TOPDIR is obsolete, use srctree/objtree +objtree := $(CURDIR) +src := $(srctree) +obj := $(objtree) + +VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD)) + +export srctree objtree VPATH TOPDIR + + +# Cross compiling and selecting different set of gcc/bin-utils +# --------------------------------------------------------------------------- +# +# When performing cross compilation for other architectures ARCH shall be set +# to the target architecture. (See arch/* for the possibilities). +# ARCH can be set during invocation of make: +# make ARCH=ia64 +# Another way is to have ARCH set in the environment. +# The default ARCH is the host where make is executed. + +# CROSS_COMPILE specify the prefix used for all executables used +# during compilation. Only gcc and related bin-utils executables +# are prefixed with $(CROSS_COMPILE). +# CROSS_COMPILE can be set on the command line +# make CROSS_COMPILE=ia64-linux- +# Alternatively CROSS_COMPILE can be set in the environment. +# Default value for CROSS_COMPILE is not to prefix executables +# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile + +CROSS_COMPILE ?= +# bbox: we may have CONFIG_CROSS_COMPILER_PREFIX in .config, +# and it has not been included yet... thus using an awkward syntax. +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := $(shell grep ^CONFIG_CROSS_COMPILER_PREFIX .config 2>/dev/null) +CROSS_COMPILE := $(subst CONFIG_CROSS_COMPILER_PREFIX=,,$(CROSS_COMPILE)) +CROSS_COMPILE := $(subst ",,$(CROSS_COMPILE)) +#") +endif + +# SUBARCH tells the usermode build what the underlying arch is. That is set +# first, and if a usermode build is happening, the "ARCH=um" on the command +# line overrides the setting of ARCH below. If a native build is happening, +# then ARCH is assigned, getting whatever value it gets normally, and +# SUBARCH is subsequently ignored. + +ifneq ($(CROSS_COMPILE),) +SUBARCH := $(shell echo $(CROSS_COMPILE) | cut -d- -f1) +else +SUBARCH := $(shell uname -m) +endif +SUBARCH := $(shell echo $(SUBARCH) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ + -e s/arm.*/arm/ -e s/sa110/arm/ \ + -e s/s390x/s390/ -e s/parisc64/parisc/ \ + -e s/ppc.*/powerpc/ -e s/mips.*/mips/ ) + +ARCH ?= $(SUBARCH) + +# Architecture as present in compile.h +UTS_MACHINE := $(ARCH) + +# SHELL used by kbuild +CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ + else if [ -x /bin/bash ]; then echo /bin/bash; \ + else echo sh; fi ; fi) + +# Decide whether to build built-in, modular, or both. +# Normally, just do built-in. + +KBUILD_MODULES := +KBUILD_BUILTIN := 1 + +# If we have only "make modules", don't compile built-in objects. +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are uptodate before we record them. + +ifeq ($(MAKECMDGOALS),modules) + KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) +endif + +# If we have "make modules", compile modules +# in addition to whatever we do anyway. +# Just "make" or "make all" shall build modules as well + +ifneq ($(filter all _all modules,$(MAKECMDGOALS)),) + KBUILD_MODULES := 1 +endif + +ifeq ($(MAKECMDGOALS),) + KBUILD_MODULES := 1 +endif + +export KBUILD_MODULES KBUILD_BUILTIN +export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD + +# Beautify output +# --------------------------------------------------------------------------- +# +# Normally, we echo the whole command before executing it. By making +# that echo $($(quiet)$(cmd)), we now have the possibility to set +# $(quiet) to choose other forms of output instead, e.g. +# +# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@ +# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< +# +# If $(quiet) is empty, the whole command will be printed. +# If it is set to "quiet_", only the short version will be printed. +# If it is set to "silent_", nothing wil be printed at all, since +# the variable $(silent_cmd_cc_o_c) doesn't exist. +# +# A simple variant is to prefix commands with $(Q) - that's useful +# for commands that shall be hidden in non-verbose mode. +# +# $(Q)ln $@ :< +# +# If KBUILD_VERBOSE equals 0 then the above command will be hidden. +# If KBUILD_VERBOSE equals 1 then the above command is displayed. + +ifeq ($(KBUILD_VERBOSE),1) + quiet = + Q = +else + quiet=quiet_ + Q = @ +endif + +# If the user is running make -s (silent mode), suppress echoing of +# commands + +ifneq ($(findstring s,$(MAKEFLAGS)),) + quiet=silent_ +endif + +export quiet Q KBUILD_VERBOSE + + +# Look for make include files relative to root of kernel src +MAKEFLAGS += --include-dir=$(srctree) + +HOSTCC = gcc +HOSTCXX = g++ +HOSTCFLAGS := +HOSTCXXFLAGS := +# We need some generic definitions +include $(srctree)/scripts/Kbuild.include + +HOSTCFLAGS += $(call hostcc-option,-Wall -Wstrict-prototypes -O2 -fomit-frame-pointer,) +HOSTCXXFLAGS += -O2 + +# For maximum performance (+ possibly random breakage, uncomment +# the following) + +MAKEFLAGS += -rR + +# Make variables (CC, etc...) + +AS = $(CROSS_COMPILE)as +CC = $(CROSS_COMPILE)gcc +LD = $(CC) -nostdlib +CPP = $(CC) -E +AR = $(CROSS_COMPILE)ar +NM = $(CROSS_COMPILE)nm +STRIP = $(CROSS_COMPILE)strip +OBJCOPY = $(CROSS_COMPILE)objcopy +OBJDUMP = $(CROSS_COMPILE)objdump +PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config +AWK = awk +GENKSYMS = scripts/genksyms/genksyms +DEPMOD = /sbin/depmod +KALLSYMS = scripts/kallsyms +PERL = perl +CHECK = sparse + +CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ -Wbitwise $(CF) +MODFLAGS = -DMODULE +CFLAGS_MODULE = $(MODFLAGS) +AFLAGS_MODULE = $(MODFLAGS) +LDFLAGS_MODULE = -r +CFLAGS_KERNEL = +AFLAGS_KERNEL = + + +# Use LINUXINCLUDE when you must reference the include/ directory. +# Needed to be compatible with the O= option +CFLAGS := $(CFLAGS) +# Added only to final link stage of busybox binary +CFLAGS_busybox := $(CFLAGS_busybox) +CPPFLAGS := $(CPPFLAGS) +AFLAGS := $(AFLAGS) +LDFLAGS := $(LDFLAGS) +LDLIBS := + +# Read KERNELRELEASE from .kernelrelease (if it exists) +KERNELRELEASE = $(shell cat .kernelrelease 2> /dev/null) +KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) + +export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION \ + ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \ + CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE \ + HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS + +export CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS +export CFLAGS CFLAGS_KERNEL CFLAGS_MODULE +export AFLAGS AFLAGS_KERNEL AFLAGS_MODULE +export FLTFLAGS + +# When compiling out-of-tree modules, put MODVERDIR in the module +# tree rather than in the kernel tree. The kernel tree might +# even be read-only. +export MODVERDIR := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_versions + +# Files to ignore in find ... statements + +RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS -o -name .pc -o -name .hg -o -name .git \) -prune -o +export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg --exclude .git + +# =========================================================================== +# Rules shared between *config targets and build targets + +# Basic helpers built in scripts/ +PHONY += scripts_basic +scripts_basic: + $(Q)$(MAKE) $(build)=scripts/basic + +# To avoid any implicit rule to kick in, define an empty command. +scripts/basic/%: scripts_basic ; + +# This target generates Kbuild's and Config.in's from *.c files +PHONY += gen_build_files +gen_build_files: $(wildcard $(srctree)/*/*.c) $(wildcard $(srctree)/*/*/*.c) + $(Q)$(srctree)/scripts/gen_build_files.sh $(srctree) $(objtree) + +# bbox: we have helpers in applets/ +# we depend on scripts_basic, since scripts/basic/fixdep +# must be built before any other host prog +PHONY += applets_dir +applets_dir: scripts_basic gen_build_files + $(Q)$(MAKE) $(build)=applets + +applets/%: applets_dir ; + +PHONY += outputmakefile +# outputmakefile generates a Makefile in the output directory, if using a +# separate output directory. This allows convenient use of make in the +# output directory. +outputmakefile: +ifneq ($(KBUILD_SRC),) + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile \ + $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL) +endif + +# To make sure we do not include .config for any of the *config targets +# catch them early, and hand them over to scripts/kconfig/Makefile +# It is allowed to specify more targets when calling make, including +# mixing *config targets and build targets. +# For example 'make oldconfig all'. +# Detect when mixed targets is specified, and make a second invocation +# of make so .config is not included in this case either (for *config). + +no-dot-config-targets := clean mrproper distclean \ + cscope TAGS tags help %docs +#bbox# check% is removed from above + +config-targets := 0 +mixed-targets := 0 +dot-config := 1 + +ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),) + ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),) + dot-config := 0 + endif +endif + +ifeq ($(KBUILD_EXTMOD),) + ifneq ($(filter config %config,$(MAKECMDGOALS)),) + config-targets := 1 + ifneq ($(filter-out config %config,$(MAKECMDGOALS)),) + mixed-targets := 1 + endif + endif +endif + +ifeq ($(mixed-targets),1) +# =========================================================================== +# We're called with mixed targets (*config and build targets). +# Handle them one by one. + +%:: FORCE + $(Q)$(MAKE) -C $(srctree) KBUILD_SRC= $@ + +else +ifeq ($(config-targets),1) +# =========================================================================== +# *config targets only - make sure prerequisites are updated, and descend +# in scripts/kconfig to make the *config target + +# Read arch specific Makefile to set KBUILD_DEFCONFIG as needed. +# KBUILD_DEFCONFIG may point out an alternative default configuration +# used for 'make defconfig' +-include $(srctree)/arch/$(ARCH)/Makefile +export KBUILD_DEFCONFIG + +config: scripts_basic outputmakefile gen_build_files FORCE + $(Q)mkdir -p include + $(Q)$(MAKE) $(build)=scripts/kconfig $@ + $(Q)$(MAKE) -C $(srctree) KBUILD_SRC= .kernelrelease + +%config: scripts_basic outputmakefile gen_build_files FORCE + $(Q)mkdir -p include + $(Q)$(MAKE) $(build)=scripts/kconfig $@ + $(Q)$(MAKE) -C $(srctree) KBUILD_SRC= .kernelrelease + +else +# =========================================================================== +# Build targets only - this includes busybox, arch specific targets, clean +# targets and others. In general all targets except *config targets. + +ifeq ($(KBUILD_EXTMOD),) +# Additional helpers built in scripts/ +# Carefully list dependencies so we do not try to build scripts twice +# in parallel +PHONY += scripts +scripts: gen_build_files scripts_basic include/config/MARKER + $(Q)$(MAKE) $(build)=$(@) + +scripts_basic: include/autoconf.h + +# Objects we will link into busybox / subdirs we need to visit +core-y := \ + applets/ \ + +libs-y := \ + archival/ \ + archival/libarchive/ \ + console-tools/ \ + coreutils/ \ + coreutils/libcoreutils/ \ + debianutils/ \ + e2fsprogs/ \ + editors/ \ + eperd/ \ + findutils/ \ + init/ \ + libbb/ \ + libpwdgrp/ \ + loginutils/ \ + mailutils/ \ + miscutils/ \ + modutils/ \ + networking/ \ + networking/libiproute/ \ + networking/udhcp/ \ + printutils/ \ + procps/ \ + runit/ \ + selinux/ \ + shell/ \ + sysklogd/ \ + util-linux/ \ + util-linux/volume_id/ \ + +libs-y := \ + archival/ \ + archival/libarchive/ \ + coreutils/ \ + eperd/ \ + libbb/ \ + libpwdgrp/ \ + miscutils/ \ + networking/ \ + libevent-2.1.11-stable/.libs/libevent.a \ + libevent-2.1.11-stable/.libs/libevent_openssl.a \ + +endif # KBUILD_EXTMOD + +ifeq ($(dot-config),1) +# In this section, we need .config + +# Read in dependencies to all Kconfig* files, make sure to run +# oldconfig if changes are detected. +-include .kconfig.d + +-include .config + +# If .config needs to be updated, it will be done via the dependency +# that autoconf has on .config. +# To avoid any implicit rule to kick in, define an empty command +.config .kconfig.d: ; + +# Now we can define CFLAGS etc according to .config +include $(srctree)/Makefile.flags + +# If .config is newer than include/autoconf.h, someone tinkered +# with it and forgot to run make oldconfig. +# If kconfig.d is missing then we are probarly in a cleaned tree so +# we execute the config step to be sure to catch updated Kconfig files +include/autoconf.h: .kconfig.d .config $(wildcard $(srctree)/*/*.c) $(wildcard $(srctree)/*/*/*.c) | gen_build_files + $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig + +include/usage.h: gen_build_files + +else +# Dummy target needed, because used as prerequisite +include/autoconf.h: ; +endif + +# The all: target is the default when no target is given on the +# command line. +# This allow a user to issue only 'make' to build a kernel including modules +# Defaults busybox but it is usually overridden in the arch makefile +all: busybox # doc + +-include $(srctree)/arch/$(ARCH)/Makefile + +# arch Makefile may override CC so keep this after arch Makefile is included +#bbox# NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) +CHECKFLAGS += $(NOSTDINC_FLAGS) + +# Default kernel image to build when no specific target is given. +# KBUILD_IMAGE may be overruled on the commandline or +# set in the environment +# Also any assignments in arch/$(ARCH)/Makefile take precedence over +# this default value +export KBUILD_IMAGE ?= busybox + +# +# INSTALL_PATH specifies where to place the updated kernel and system map +# images. Default is /boot, but you can set it to other values +export INSTALL_PATH ?= /boot + +# +# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory +# relocations required by build roots. This is not defined in the +# makefile but the argument can be passed to make if needed. +# + +MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) +export MODLIB + + +ifeq ($(KBUILD_EXTMOD),) +busybox-dirs := $(patsubst %/,%,$(filter %/, $(core-y) $(core-m) $(libs-y) $(libs-m))) + +busybox-alldirs := $(sort $(busybox-dirs) $(patsubst %/,%,$(filter %/, \ + $(core-n) $(core-) $(libs-n) $(libs-) \ + ))) + +core-y := $(patsubst %/, %/built-in.o, $(core-y)) +libs-y1 := $(patsubst %/, %/lib.a, $(libs-y)) +libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y)) +libs-y := $(libs-y1) $(libs-y2) + +# Build busybox +# --------------------------------------------------------------------------- +# busybox is build from the objects selected by $(busybox-init) and +# $(busybox-main). Most are built-in.o files from top-level directories +# in the kernel tree, others are specified in arch/$(ARCH)Makefile. +# Ordering when linking is important, and $(busybox-init) must be first. +# +# busybox +# ^ +# | +# +-< $(busybox-init) +# | +--< init/version.o + more +# | +# +--< $(busybox-main) +# | +--< driver/built-in.o mm/built-in.o + more +# | +# +-< kallsyms.o (see description in CONFIG_KALLSYMS section) +# +# busybox version (uname -v) cannot be updated during normal +# descending-into-subdirs phase since we do not yet know if we need to +# update busybox. +# Therefore this step is delayed until just before final link of busybox - +# except in the kallsyms case where it is done just before adding the +# symbols to the kernel. +# +# System.map is generated to document addresses of all kernel symbols + +busybox-all := $(core-y) $(libs-y) + +# Rule to link busybox - also used during CONFIG_KALLSYMS +# May be overridden by arch/$(ARCH)/Makefile +quiet_cmd_busybox__ ?= LINK $@ + cmd_busybox__ ?= $(srctree)/scripts/trylink \ + "$@" \ + "$(CC)" \ + "$(CFLAGS) $(CFLAGS_busybox)" \ + "$(LDFLAGS) $(EXTRA_LDFLAGS)" \ + "$(core-y)" \ + "$(libs-y)" \ + "$(LDLIBS)" \ + && $(srctree)/scripts/generate_BUFSIZ.sh --post include/common_bufsiz.h + +# Generate System.map +quiet_cmd_sysmap = SYSMAP + cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap + +# Link of busybox +# If CONFIG_KALLSYMS is set .version is already updated +# Generate System.map and verify that the content is consistent +# Use + in front of the busybox_version rule to silent warning with make -j2 +# First command is ':' to allow us to use + in front of the rule +define rule_busybox__ + : + $(call cmd,busybox__) + $(Q)echo 'cmd_$@ := $(cmd_busybox__)' > $(@D)/.$(@F).cmd +endef + + +ifdef CONFIG_KALLSYMS +# Generate section listing all symbols and add it into busybox $(kallsyms.o) +# It's a three stage process: +# o .tmp_busybox1 has all symbols and sections, but __kallsyms is +# empty +# Running kallsyms on that gives us .tmp_kallsyms1.o with +# the right size - busybox version (uname -v) is updated during this step +# o .tmp_busybox2 now has a __kallsyms section of the right size, +# but due to the added section, some addresses have shifted. +# From here, we generate a correct .tmp_kallsyms2.o +# o The correct .tmp_kallsyms2.o is linked into the final busybox. +# o Verify that the System.map from busybox matches the map from +# .tmp_busybox2, just in case we did not generate kallsyms correctly. +# o If CONFIG_KALLSYMS_EXTRA_PASS is set, do an extra pass using +# .tmp_busybox3 and .tmp_kallsyms3.o. This is only meant as a +# temporary bypass to allow the kernel to be built while the +# maintainers work out what went wrong with kallsyms. + +ifdef CONFIG_KALLSYMS_EXTRA_PASS +last_kallsyms := 3 +else +last_kallsyms := 2 +endif + +kallsyms.o := .tmp_kallsyms$(last_kallsyms).o + +define verify_kallsyms + $(Q)$(if $($(quiet)cmd_sysmap), \ + echo ' $($(quiet)cmd_sysmap) .tmp_System.map' &&) \ + $(cmd_sysmap) .tmp_busybox$(last_kallsyms) .tmp_System.map + $(Q)cmp -s System.map .tmp_System.map || \ + (echo Inconsistent kallsyms data; \ + echo Try setting CONFIG_KALLSYMS_EXTRA_PASS; \ + rm .tmp_kallsyms* ; /bin/false ) +endef + +# Update busybox version before link +# Use + in front of this rule to silent warning about make -j1 +# First command is ':' to allow us to use + in front of this rule +cmd_ksym_ld = $(cmd_busybox__) +define rule_ksym_ld + : + +$(call cmd,busybox_version) + $(call cmd,busybox__) + $(Q)echo 'cmd_$@ := $(cmd_busybox__)' > $(@D)/.$(@F).cmd +endef + +# Generate .S file with all kernel symbols +quiet_cmd_kallsyms = KSYM $@ + cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \ + $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@ + +.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE + $(call if_changed_dep,as_o_S) + +.tmp_kallsyms%.S: .tmp_busybox% $(KALLSYMS) + $(call cmd,kallsyms) + +# .tmp_busybox1 must be complete except kallsyms, so update busybox version +.tmp_busybox1: $(busybox-lds) $(busybox-all) FORCE + $(call if_changed_rule,ksym_ld) + +.tmp_busybox2: $(busybox-lds) $(busybox-all) .tmp_kallsyms1.o FORCE + $(call if_changed,busybox__) + +.tmp_busybox3: $(busybox-lds) $(busybox-all) .tmp_kallsyms2.o FORCE + $(call if_changed,busybox__) + +# Needs to visit scripts/ before $(KALLSYMS) can be used. +$(KALLSYMS): scripts ; + +# Generate some data for debugging strange kallsyms problems +debug_kallsyms: .tmp_map$(last_kallsyms) + +.tmp_map%: .tmp_busybox% FORCE + ($(OBJDUMP) -h $< | $(AWK) '/^ +[0-9]/{print $$4 " 0 " $$2}'; $(NM) $<) | sort > $@ + +.tmp_map3: .tmp_map2 + +.tmp_map2: .tmp_map1 + +endif # ifdef CONFIG_KALLSYMS + +# busybox image - including updated kernel symbols +busybox_unstripped: $(busybox-all) FORCE + $(call if_changed_rule,busybox__) + $(Q)rm -f .old_version + +busybox: busybox_unstripped +ifeq ($(SKIP_STRIP),y) + $(Q)cp $< $@ +else + $(Q)$(STRIP) -s --remove-section=.note --remove-section=.comment \ + busybox_unstripped -o $@ +# strip is confused by PIE executable and does not set exec bits + $(Q)chmod a+x $@ +endif + +# The actual objects are generated when descending, +# make sure no implicit rule kicks in +$(sort $(busybox-all)): $(busybox-dirs) ; + +# Handle descending into subdirectories listed in $(busybox-dirs) +# Preset locale variables to speed up the build process. Limit locale +# tweaks to this spot to avoid wrong language settings when running +# make menuconfig etc. +# Error messages still appears in the original language + +PHONY += $(busybox-dirs) +$(busybox-dirs): prepare scripts + $(Q)$(MAKE) $(build)=$@ + +# Build the kernel release string +# The KERNELRELEASE is stored in a file named .kernelrelease +# to be used when executing for example make install or make modules_install +# +# Take the contents of any files called localversion* and the config +# variable CONFIG_LOCALVERSION and append them to KERNELRELEASE. +# LOCALVERSION from the command line override all of this + +nullstring := +space := $(nullstring) # end of line + +___localver = $(objtree)/localversion* $(srctree)/localversion* +__localver = $(sort $(wildcard $(___localver))) +# skip backup files (containing '~') +_localver = $(foreach f, $(__localver), $(if $(findstring ~, $(f)),,$(f))) + +localver = $(subst $(space),, \ + $(shell cat /dev/null $(_localver)) \ + $(patsubst "%",%,$(CONFIG_LOCALVERSION))) + +# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called +# and if the SCM is know a tag from the SCM is appended. +# The appended tag is determinded by the SCM used. +# +# Currently, only git is supported. +# Other SCMs can edit scripts/setlocalversion and add the appropriate +# checks as needed. +ifdef _BB_DISABLED_CONFIG_LOCALVERSION_AUTO + _localver-auto = $(shell $(CONFIG_SHELL) \ + $(srctree)/scripts/setlocalversion $(srctree)) + localver-auto = $(LOCALVERSION)$(_localver-auto) +endif + +localver-full = $(localver)$(localver-auto) + +# Store (new) KERNELRELASE string in .kernelrelease +kernelrelease = $(KERNELVERSION)$(localver-full) +.kernelrelease: FORCE + $(Q)rm -f $@ + $(Q)echo $(kernelrelease) > $@ + + +# Things we need to do before we recursively start building the kernel +# or the modules are listed in "prepare". +# A multi level approach is used. prepareN is processed before prepareN-1. +# archprepare is used in arch Makefiles and when processed asm symlink, +# version.h and scripts_basic is processed / created. + +# Listed in dependency order +PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 + +# prepare-all is deprecated, use prepare as valid replacement +PHONY += prepare-all + +# prepare3 is used to check if we are building in a separate output directory, +# and if so do: +# 1) Check that make has not been executed in the kernel src $(srctree) +# 2) Create the include2 directory, used for the second asm symlink +prepare3: .kernelrelease +ifneq ($(KBUILD_SRC),) + @echo ' Using $(srctree) as source for busybox' + $(Q)if [ -f $(srctree)/.config ]; then \ + echo " $(srctree) is not clean, please run 'make mrproper'";\ + echo " in the '$(srctree)' directory.";\ + /bin/false; \ + fi; + $(Q)if [ ! -d include2 ]; then mkdir -p include2; fi; + $(Q)ln -fsn $(srctree)/include/asm-$(ARCH) include2/asm +endif + +# prepare2 creates a makefile if using a separate output directory +prepare2: prepare3 outputmakefile + +prepare1: prepare2 include/config/MARKER +ifneq ($(KBUILD_MODULES),) + $(Q)mkdir -p $(MODVERDIR) + $(Q)rm -f $(MODVERDIR)/* +endif + +archprepare: prepare1 scripts_basic applets_dir + +prepare0: archprepare FORCE + $(Q)$(MAKE) $(build)=. + +# All the preparing.. +prepare prepare-all: prepare0 + +# Leave this as default for preprocessing busybox.lds.S, which is now +# done in arch/$(ARCH)/kernel/Makefile + +export CPPFLAGS_busybox.lds += -P -C -U$(ARCH) + +# FIXME: The asm symlink changes when $(ARCH) changes. That's +# hard to detect, but I suppose "make mrproper" is a good idea +# before switching between archs anyway. + +#bbox# include/asm: +#bbox# @echo ' SYMLINK $@ -> include/asm-$(ARCH)' +#bbox# $(Q)if [ ! -d include ]; then mkdir -p include; fi; +#bbox# @ln -fsn asm-$(ARCH) $@ + +# Split autoconf.h into include/linux/config/* +quiet_cmd_gen_bbconfigopts = GEN include/bbconfigopts.h + cmd_gen_bbconfigopts = $(srctree)/scripts/mkconfigs include/bbconfigopts.h include/bbconfigopts_bz2.h +quiet_cmd_gen_common_bufsiz = GEN include/common_bufsiz.h + cmd_gen_common_bufsiz = $(srctree)/scripts/generate_BUFSIZ.sh include/common_bufsiz.h +quiet_cmd_split_autoconf = SPLIT include/autoconf.h -> include/config/* + cmd_split_autoconf = scripts/basic/split-include include/autoconf.h include/config +#bbox# piggybacked generation of few .h files +include/config/MARKER: scripts/basic/split-include include/autoconf.h + $(call cmd,split_autoconf) + $(call cmd,gen_bbconfigopts) + $(call cmd,gen_common_bufsiz) + @touch $@ + +# Generate some files +# --------------------------------------------------------------------------- + +# KERNELRELEASE can change from a few different places, meaning version.h +# needs to be updated, so this check is forced on all builds + +uts_len := 64 + +define filechk_version.h + if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \ + echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \ + exit 1; \ + fi; \ + (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\"; \ + echo \#define LINUX_VERSION_CODE `expr $(VERSION) \\* 65536 + $(PATCHLEVEL) \\* 256 + $(SUBLEVEL)`; \ + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'; \ + ) +endef + +# --------------------------------------------------------------------------- + +PHONY += depend dep +depend dep: + @echo '*** Warning: make $@ is unnecessary now.' + +# --------------------------------------------------------------------------- +# Modules + +ifdef _BB_DISABLED_CONFIG_MODULES + +# By default, build modules as well + +all: modules + +# Build modules + +PHONY += modules +modules: $(busybox-dirs) $(if $(KBUILD_BUILTIN),busybox) + @echo ' Building modules, stage 2.'; + $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modpost + + +# Target to prepare building external modules +PHONY += modules_prepare +modules_prepare: prepare scripts + +# Target to install modules +PHONY += modules_install +modules_install: _modinst_ _modinst_post + +PHONY += _modinst_ +_modinst_: + @if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \ + echo "Warning: you may need to install module-init-tools"; \ + echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\ + sleep 1; \ + fi + @rm -rf $(MODLIB)/kernel + @rm -f $(MODLIB)/source + @mkdir -p $(MODLIB)/kernel + @ln -s $(srctree) $(MODLIB)/source + @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \ + rm -f $(MODLIB)/build ; \ + ln -s $(objtree) $(MODLIB)/build ; \ + fi + $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modinst + +# If System.map exists, run depmod. This deliberately does not have a +# dependency on System.map since that would run the dependency tree on +# busybox. This depmod is only for convenience to give the initial +# boot a modules.dep even before / is mounted read-write. However the +# boot script depmod is the master version. +ifeq "$(strip $(INSTALL_MOD_PATH))" "" +depmod_opts := +else +depmod_opts := -b $(INSTALL_MOD_PATH) -r +endif +PHONY += _modinst_post +_modinst_post: _modinst_ + if [ -r System.map -a -x $(DEPMOD) ]; then $(DEPMOD) -ae -F System.map $(depmod_opts) $(KERNELRELEASE); fi + +else # CONFIG_MODULES + +# Modules not configured +# --------------------------------------------------------------------------- + +modules modules_install: FORCE + @echo + @echo "The present busybox configuration has modules disabled." + @echo "Type 'make config' and enable loadable module support." + @echo "Then build a kernel with module support enabled." + @echo + @exit 1 + +endif # CONFIG_MODULES + +### +# Cleaning is done on three levels. +# make clean Delete most generated files +# Leave enough to build external modules +# make mrproper Delete the current configuration, and all generated files +# make distclean Remove editor backup files, patch leftover files and the like + +# Directories & files removed with 'make clean' +CLEAN_DIRS += $(MODVERDIR) _install 0_lib +CLEAN_FILES += busybox busybox_unstripped* busybox.links \ + System.map .kernelrelease \ + .tmp_kallsyms* .tmp_version .tmp_busybox* .tmp_System.map + +# Directories & files removed with 'make mrproper' +MRPROPER_DIRS += include/config include2 +MRPROPER_FILES += .config.old include/asm .version .old_version \ + include/NUM_APPLETS.h \ + include/common_bufsiz.h \ + include/autoconf.h \ + include/bbconfigopts.h \ + include/bbconfigopts_bz2.h \ + include/usage_compressed.h \ + include/applet_tables.h \ + include/applets.h \ + include/usage.h \ + applets/usage \ + .kernelrelease Module.symvers tags TAGS cscope* \ + busybox_old + +# clean - Delete most, but leave enough to build external modules +# +clean: rm-dirs := $(CLEAN_DIRS) +clean: rm-files := $(CLEAN_FILES) +clean-dirs := $(addprefix _clean_,$(srctree) $(busybox-alldirs)) + +PHONY += $(clean-dirs) clean archclean +$(clean-dirs): + $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@) + +clean: archclean $(clean-dirs) + $(call cmd,rmdirs) + $(call cmd,rmfiles) + @find . $(RCS_FIND_IGNORE) \ + \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ + -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \ + -type f -print | xargs rm -f + +PHONY += doc-clean +doc-clean: rm-files := docs/busybox.pod \ + docs/BusyBox.html docs/busybox.1 docs/BusyBox.txt +doc-clean: + $(call cmd,rmfiles) + +# mrproper - Delete all generated files, including .config +# +mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS)) +mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) +mrproper-dirs := $(addprefix _mrproper_,scripts) + +PHONY += $(mrproper-dirs) mrproper archmrproper +$(mrproper-dirs): + $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) + +mrproper: clean archmrproper $(mrproper-dirs) + $(call cmd,rmdirs) + $(call cmd,rmfiles) + @find . -name Config.src | sed 's/.src$$/.in/' | xargs -r rm -f + @find . -name Kbuild.src | sed 's/.src$$//' | xargs -r rm -f + +# distclean +# +PHONY += distclean + +distclean: mrproper + @find $(srctree) $(RCS_FIND_IGNORE) \ + \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ + -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ + -o -name '.*.rej' -o -name '*.tmp' -o -size 0 \ + -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \ + -type f -print | xargs rm -f + + +# Packaging of the kernel to various formats +# --------------------------------------------------------------------------- +# rpm target kept for backward compatibility +package-dir := $(srctree)/scripts/package + +%pkg: FORCE + $(Q)$(MAKE) $(build)=$(package-dir) $@ +rpm: FORCE + $(Q)$(MAKE) $(build)=$(package-dir) $@ + + +# Brief documentation of the typical targets used +# --------------------------------------------------------------------------- + +boards := $(wildcard $(srctree)/configs/*_defconfig) +boards := $(notdir $(boards)) + +-include $(srctree)/Makefile.help + +# Documentation targets +# --------------------------------------------------------------------------- +%docs: scripts_basic FORCE + $(Q)$(MAKE) $(build)=Documentation/DocBook $@ + +else # KBUILD_EXTMOD + +### +# External module support. +# When building external modules the kernel used as basis is considered +# read-only, and no consistency checks are made and the make +# system is not used on the basis kernel. If updates are required +# in the basis kernel ordinary make commands (without M=...) must +# be used. +# +# The following are the only valid targets when building external +# modules. +# make M=dir clean Delete all automatically generated files +# make M=dir modules Make all modules in specified dir +# make M=dir Same as 'make M=dir modules' +# make M=dir modules_install +# Install the modules build in the module directory +# Assumes install directory is already created + +# We are always building modules +KBUILD_MODULES := 1 +PHONY += crmodverdir +crmodverdir: + $(Q)mkdir -p $(MODVERDIR) + $(Q)rm -f $(MODVERDIR)/* + +PHONY += $(objtree)/Module.symvers +$(objtree)/Module.symvers: + @test -e $(objtree)/Module.symvers || ( \ + echo; \ + echo " WARNING: Symbol version dump $(objtree)/Module.symvers"; \ + echo " is missing; modules will have no dependencies and modversions."; \ + echo ) + +module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD)) +PHONY += $(module-dirs) modules +$(module-dirs): crmodverdir $(objtree)/Module.symvers + $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) + +modules: $(module-dirs) + @echo ' Building modules, stage 2.'; + $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modpost + +PHONY += modules_install +modules_install: _emodinst_ _emodinst_post + +install-dir := $(if $(INSTALL_MOD_DIR),$(INSTALL_MOD_DIR),extra) +PHONY += _emodinst_ +_emodinst_: + $(Q)mkdir -p $(MODLIB)/$(install-dir) + $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modinst + +# Run depmod only is we have System.map and depmod is executable +quiet_cmd_depmod = DEPMOD $(KERNELRELEASE) + cmd_depmod = if [ -r System.map -a -x $(DEPMOD) ]; then \ + $(DEPMOD) -ae -F System.map \ + $(if $(strip $(INSTALL_MOD_PATH)), \ + -b $(INSTALL_MOD_PATH) -r) \ + $(KERNELRELEASE); \ + fi + +PHONY += _emodinst_post +_emodinst_post: _emodinst_ + $(call cmd,depmod) + +clean-dirs := $(addprefix _clean_,$(KBUILD_EXTMOD)) + +PHONY += $(clean-dirs) clean +$(clean-dirs): + $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@) + +clean: rm-dirs := $(MODVERDIR) +clean: $(clean-dirs) + $(call cmd,rmdirs) + @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \ + \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ + -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \ + -type f -print | xargs rm -f + +# Dummies... +PHONY += prepare scripts +prepare: ; +scripts: ; +endif # KBUILD_EXTMOD + +# Generate tags for editors +# --------------------------------------------------------------------------- + +#We want __srctree to totally vanish out when KBUILD_OUTPUT is not set +#(which is the most common case IMHO) to avoid unneeded clutter in the big tags file. +#Adding $(srctree) adds about 20M on i386 to the size of the output file! + +ifeq ($(src),$(obj)) +__srctree = +else +__srctree = $(srctree)/ +endif + +ifeq ($(ALLSOURCE_ARCHS),) +ifeq ($(ARCH),um) +ALLINCLUDE_ARCHS := $(ARCH) $(SUBARCH) +else +ALLINCLUDE_ARCHS := $(ARCH) +endif +else +#Allow user to specify only ALLSOURCE_PATHS on the command line, keeping existing behaviour. +ALLINCLUDE_ARCHS := $(ALLSOURCE_ARCHS) +endif + +ALLSOURCE_ARCHS := $(ARCH) + +define all-sources + ( find -regex '.*\.[ch]$$' ) +endef + +quiet_cmd_cscope-file = FILELST cscope.files + cmd_cscope-file = (echo \-k; echo \-q; $(all-sources)) > cscope.files + +quiet_cmd_cscope = MAKE cscope.out + cmd_cscope = cscope -b + +cscope: FORCE + $(call cmd,cscope-file) + $(call cmd,cscope) + +quiet_cmd_TAGS = MAKE $@ +define cmd_TAGS + rm -f $@; \ + ETAGSF=`etags --version | grep -i exuberant >/dev/null && \ + echo "-I __initdata,__exitdata,__acquires,__releases \ + -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \ + --extra=+f --c-kinds=+px"`; \ + $(all-sources) | xargs etags $$ETAGSF -a +endef + +TAGS: FORCE + $(call cmd,TAGS) + + +quiet_cmd_tags = MAKE $@ +define cmd_tags + rm -f $@; \ + CTAGSF=`ctags --version | grep -i exuberant >/dev/null && \ + echo "-I __initdata,__exitdata,__acquires,__releases \ + -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \ + --extra=+f --c-kinds=+px"`; \ + $(all-sources) | xargs ctags $$CTAGSF -a +endef + +tags: FORCE + $(call cmd,tags) + + +# Scripts to check various things for consistency +# --------------------------------------------------------------------------- + +includecheck: + find * $(RCS_FIND_IGNORE) \ + -name '*.[hcS]' -type f -print | sort \ + | xargs $(PERL) -w scripts/checkincludes.pl + +versioncheck: + find * $(RCS_FIND_IGNORE) \ + -name '*.[hcS]' -type f -print | sort \ + | xargs $(PERL) -w scripts/checkversion.pl + +namespacecheck: + $(PERL) $(srctree)/scripts/namespace.pl + +endif #ifeq ($(config-targets),1) +endif #ifeq ($(mixed-targets),1) + +PHONY += checkstack +checkstack: + $(OBJDUMP) -d busybox $$(find . -name '*.ko') | \ + $(PERL) $(src)/scripts/checkstack.pl $(ARCH) + +kernelrelease: + $(if $(wildcard .kernelrelease), $(Q)echo $(KERNELRELEASE), \ + $(error kernelrelease not valid - run 'make *config' to update it)) +kernelversion: + @echo $(KERNELVERSION) + +# Single targets +# --------------------------------------------------------------------------- +# Single targets are compatible with: +# - build whith mixed source and output +# - build with separate output dir 'make O=...' +# - external modules +# +# target-dir => where to store outputfile +# build-dir => directory in kernel source tree to use + +ifeq ($(KBUILD_EXTMOD),) + build-dir = $(patsubst %/,%,$(dir $@)) + target-dir = $(dir $@) +else + zap-slash=$(filter-out .,$(patsubst %/,%,$(dir $@))) + build-dir = $(KBUILD_EXTMOD)$(if $(zap-slash),/$(zap-slash)) + target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@)) +endif + +%.s: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.i: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.o: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.lst: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.s: %.S prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +%.o: %.S prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + +# Modules +%/: prepare scripts FORCE + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) +/: prepare scripts FORCE + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) + +%.ko: prepare scripts FORCE + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) $(@:.ko=.o) + $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modpost + +# FIXME Should go into a make.lib or something +# =========================================================================== + +quiet_cmd_rmdirs = $(if $(wildcard $(rm-dirs)),CLEAN $(wildcard $(rm-dirs))) + cmd_rmdirs = rm -rf $(rm-dirs) + +quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))) + cmd_rmfiles = rm -f $(rm-files) + + +a_flags = -Wp,-MD,$(depfile) $(AFLAGS) $(AFLAGS_KERNEL) \ + $(NOSTDINC_FLAGS) $(CPPFLAGS) \ + $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o) + +quiet_cmd_as_o_S = AS $@ +cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< + +# read all saved command lines + +targets := $(wildcard $(sort $(targets))) +cmd_files := $(wildcard .*.cmd $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd)) + +ifneq ($(cmd_files),) + $(cmd_files): ; # Do not try to update included dependency files + include $(cmd_files) +endif + +# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=dir +# Usage: +# $(Q)$(MAKE) $(clean)=dir +clean := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.clean obj + +endif # skip-makefile + +PHONY += FORCE +FORCE: + +-include $(srctree)/Makefile.custom + +# Declare the contents of the .PHONY variable as phony. We keep that +# information in a variable se we can use it in if_changed and friends. +.PHONY: $(PHONY) diff --git a/probe-busybox/Makefile.custom b/probe-busybox/Makefile.custom new file mode 100644 index 00000000..891c9ced --- /dev/null +++ b/probe-busybox/Makefile.custom @@ -0,0 +1,195 @@ +# ========================================================================== +# Build system +# ========================================================================== + +busybox.links: $(srctree)/applets/busybox.mkll $(objtree)/include/autoconf.h include/applets.h + $(Q)-$(SHELL) $^ > $@ + +busybox.cfg.suid: $(srctree)/applets/busybox.mksuid $(objtree)/include/autoconf.h include/applets.h + $(Q)-SUID="yes" $(SHELL) $^ > $@ +busybox.cfg.nosuid: $(srctree)/applets/busybox.mksuid $(objtree)/include/autoconf.h include/applets.h + $(Q)-SUID="DROP" $(SHELL) $^ > $@ + +.PHONY: install +ifeq ($(CONFIG_INSTALL_APPLET_SYMLINKS),y) +INSTALL_OPTS:= --symlinks +endif +ifeq ($(CONFIG_INSTALL_APPLET_HARDLINKS),y) +INSTALL_OPTS:= --hardlinks +endif +ifeq ($(CONFIG_INSTALL_APPLET_SCRIPT_WRAPPERS),y) +ifeq ($(CONFIG_INSTALL_SH_APPLET_SYMLINK),y) +INSTALL_OPTS:= --sw-sh-sym +endif +ifeq ($(CONFIG_INSTALL_SH_APPLET_HARDLINK),y) +INSTALL_OPTS:= --sw-sh-hard +endif +ifeq ($(CONFIG_INSTALL_SH_APPLET_SCRIPT_WRAPPER),y) +INSTALL_OPTS:= --scriptwrapper +endif +endif +ifeq ($(CONFIG_FEATURE_INDIVIDUAL),y) +INSTALL_OPTS:= --binaries +LIBBUSYBOX_SONAME:= 0_lib/libbusybox.so.$(BB_VER) +endif +install: $(srctree)/applets/install.sh busybox busybox.links + $(Q)DO_INSTALL_LIBS="$(strip $(LIBBUSYBOX_SONAME) $(DO_INSTALL_LIBS))" \ + $(SHELL) $< $(CONFIG_PREFIX) $(INSTALL_OPTS) +ifeq ($(strip $(CONFIG_FEATURE_SUID)),y) + @echo + @echo + @echo -------------------------------------------------- + @echo You will probably need to make your busybox binary + @echo setuid root to ensure all configured applets will + @echo work properly. + @echo -------------------------------------------------- + @echo +endif + +uninstall: busybox.links + rm -f $(CONFIG_PREFIX)/bin/busybox + for i in `cat busybox.links` ; do rm -f $(CONFIG_PREFIX)$$i; done +ifneq ($(strip $(DO_INSTALL_LIBS)),n) + for i in $(LIBBUSYBOX_SONAME) $(DO_INSTALL_LIBS); do \ + rm -f $(CONFIG_PREFIX)$$i; \ + done +endif + +# Not very elegant: copies testsuite to objdir... +# (cp -pPR is POSIX-compliant (cp -dpR or cp -a would not be)) +.PHONY: check +.PHONY: test +ifeq ($(CONFIG_UNIT_TEST),y) +UNIT_CMD = ./busybox unit +endif +check test: busybox busybox.links + $(UNIT_CMD) + test -d $(objtree)/testsuite || cp -pPR $(srctree)/testsuite $(objtree) + bindir=$(objtree) srcdir=$(srctree)/testsuite \ + $(SHELL) -c "cd $(objtree)/testsuite && $(srctree)/testsuite/runtest $(if $(KBUILD_VERBOSE:0=),-v)" + +.PHONY: release +release: distclean + cd ..; \ + rm -r -f busybox-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION); \ + cp -pPR busybox busybox-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) && { \ + find busybox-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)/ -type d \ + -name .svn \ + -print \ + -exec rm -r -f {} \; ; \ + find busybox-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)/ -type d \ + -name .git \ + -print \ + -exec rm -r -f {} \; ; \ + find busybox-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)/ -type f \ + -name .gitignore \ + -print \ + -exec rm -f {} \; ; \ + find busybox-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)/ -type f \ + -name .\#* \ + -print \ + -exec rm -f {} \; ; \ + tar -czf busybox-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION).tar.gz \ + busybox-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)/ ; } + +.PHONY: checkhelp +checkhelp: + $(Q)$(srctree)/scripts/checkhelp.awk \ + $(patsubst %,$(srctree)/%,$(wildcard $(patsubst %,%/Config.in,$(busybox-dirs) ./))) + +.PHONY: sizes +sizes: busybox_unstripped + $(NM) --size-sort $(<) + +.PHONY: bloatcheck +bloatcheck: busybox_old busybox_unstripped + @$(srctree)/scripts/bloat-o-meter busybox_old busybox_unstripped + @$(CROSS_COMPILE)size busybox_old busybox_unstripped + +.PHONY: baseline +baseline: busybox_unstripped + @mv busybox_unstripped busybox_old + +.PHONY: objsizes +objsizes: busybox_unstripped + $(srctree)/scripts/objsizes + +.PHONY: stksizes +stksizes: busybox_unstripped + $(CROSS_COMPILE)objdump -d busybox_unstripped | $(srctree)/scripts/checkstack.pl $(ARCH) | uniq + +.PHONY: bigdata +bigdata: busybox_unstripped + $(CROSS_COMPILE)nm --size-sort busybox_unstripped | grep -vi ' [trw] ' + +# Documentation Targets +.PHONY: doc +doc: docs/busybox.pod docs/BusyBox.txt docs/busybox.1 docs/BusyBox.html + +# FIXME: Doesn't belong here + cmd_doc = + quiet_cmd_doc = $(Q)echo " DOC $(@F)" +silent_cmd_doc = +disp_doc = $($(quiet)cmd_doc) + +# sed adds newlines after "Options:" etc, +# this is needed in order to get good BusyBox.{1,txt,html} +docs/busybox.pod: $(srctree)/docs/busybox_header.pod \ + include/usage.h \ + $(srctree)/docs/busybox_footer.pod \ + applets/usage_pod + $(disp_doc) + $(Q)-mkdir -p docs + $(Q)-( \ + cat $(srctree)/docs/busybox_header.pod; \ + echo; \ + applets/usage_pod | sed 's/^[A-Za-z][A-Za-z ]*[a-z]:$$/&\n/'; \ + cat $(srctree)/docs/busybox_footer.pod; \ + ) > docs/busybox.pod + +docs/BusyBox.txt: docs/busybox.pod + $(disp_doc) + $(Q)-mkdir -p docs + $(Q)-pod2text $< > $@ + +docs/busybox.1: docs/busybox.pod + $(disp_doc) + $(Q)-mkdir -p docs + $(Q)-pod2man --center=busybox --release="version $(KERNELVERSION)" $< > $@ + +docs/BusyBox.html: docs/busybox.net/BusyBox.html + $(disp_doc) + $(Q)-mkdir -p docs + $(Q)-rm -f docs/BusyBox.html + $(Q)-cp docs/busybox.net/BusyBox.html docs/BusyBox.html + +docs/busybox.net/BusyBox.html: docs/busybox.pod + $(Q)-mkdir -p docs/busybox.net + $(Q)-pod2html --noindex $< > $@ + $(Q)-rm -f pod2htm* + +# documentation, cross-reference +# Modern distributions already ship synopsis packages (e.g. debian) +# If you have an old distribution go to http://synopsis.fresco.org/ +syn_tgt = $(wildcard $(patsubst %,%/*.c,$(busybox-alldirs))) +syn = $(patsubst %.c, %.syn, $(syn_tgt)) + +comma:= , +brace_open:= ( +brace_close:= ) + +SYN_CPPFLAGS := $(strip $(CPPFLAGS) $(EXTRA_CPPFLAGS)) +SYN_CPPFLAGS := $(subst $(brace_open),\$(brace_open),$(SYN_CPPFLAGS)) +SYN_CPPFLAGS := $(subst $(brace_close),\$(brace_close),$(SYN_CPPFLAGS)) +#SYN_CPPFLAGS := $(subst ",\",$(SYN_CPPFLAGS)) +#") +#SYN_CPPFLAGS := [$(patsubst %,'%'$(comma),$(SYN_CPPFLAGS))''] + +%.syn: %.c + synopsis -p C -l Comments.SSDFilter,Comments.Previous -Wp,preprocess=True,cppflags="'$(SYN_CPPFLAGS)'" -o $@ $< + +.PHONY: html +html: $(syn) + synopsis -f HTML -Wf,title="'BusyBox Documentation'" -o $@ $^ + +-include $(srctree)/Makefile.local diff --git a/probe-busybox/Makefile.flags b/probe-busybox/Makefile.flags new file mode 100644 index 00000000..542da25f --- /dev/null +++ b/probe-busybox/Makefile.flags @@ -0,0 +1,211 @@ +# ========================================================================== +# Build system +# ========================================================================== + +BB_VER = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) +export BB_VER +SKIP_STRIP ?= n + +# -std=gnu99 needed for [U]LLONG_MAX on some systems +CPPFLAGS += $(call cc-option,-std=gnu99,) + +CPPFLAGS += \ + -Iinclude -Ilibbb \ + $(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include -I$(srctree)/libbb) \ + -include include/autoconf.h \ + -D_GNU_SOURCE -DNDEBUG \ + $(if $(CONFIG_LFS),-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64) \ + -D"BB_VER=KBUILD_STR($(BB_VER))" -DBB_BT=AUTOCONF_TIMESTAMP + +CFLAGS += $(call cc-option,-Wall,) +CFLAGS += $(call cc-option,-Wshadow,) +CFLAGS += $(call cc-option,-Wwrite-strings,) +CFLAGS += $(call cc-option,-Wundef,) +CFLAGS += $(call cc-option,-Wstrict-prototypes,) +CFLAGS += $(call cc-option,-Wunused -Wunused-parameter,) +CFLAGS += $(call cc-option,-Wunused-function -Wunused-value,) +CFLAGS += $(call cc-option,-Wmissing-prototypes -Wmissing-declarations,) +CFLAGS += $(call cc-option,-Wno-format-security,) +# warn about C99 declaration after statement +CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) +# If you want to add more -Wsomething above, make sure that it is +# still possible to build bbox without warnings. + +ifeq ($(CONFIG_WERROR),y) +CFLAGS += $(call cc-option,-Werror,) +## TODO: +## gcc version 4.4.0 20090506 (Red Hat 4.4.0-4) (GCC) is a PITA: +## const char *ptr; ... off_t v = *(off_t*)ptr; -> BOOM +## and no easy way to convince it to shut the hell up. +## We have a lot of such things all over the place. +## Classic *(off_t*)(void*)ptr does not work, +## and I am unwilling to do crazy gcc specific ({ void *ppp = ...; }) +## stuff in macros. This would obfuscate the code too much. +## Maybe try __attribute__((__may_alias__))? +#CFLAGS += $(call cc-ifversion, -eq, 0404, -fno-strict-aliasing) +endif +# gcc 3.x emits bogus "old style proto" warning on find.c:alloc_action() +CFLAGS += $(call cc-ifversion, -ge, 0400, -Wold-style-definition) + +CFLAGS += $(call cc-option,-fno-builtin-strlen -finline-limit=0 -fomit-frame-pointer -ffunction-sections -fdata-sections,) +# -fno-guess-branch-probability: prohibit pseudo-random guessing +# of branch probabilities (hopefully makes bloatcheck more stable): +CFLAGS += $(call cc-option,-fno-guess-branch-probability,) +CFLAGS += $(call cc-option,-funsigned-char -static-libgcc,) +CFLAGS += $(call cc-option,-falign-functions=1 -falign-jumps=1 -falign-labels=1 -falign-loops=1,) +# Defeat .eh_frame bloat (gcc 4.6.3 x86-32 defconfig: 20% smaller busybox binary): +CFLAGS += $(call cc-option,-fno-unwind-tables,) +CFLAGS += $(call cc-option,-fno-asynchronous-unwind-tables,) +# No automatic printf->puts,putchar conversions +# (try disabling this and comparing assembly, it's instructive) +CFLAGS += $(call cc-option,-fno-builtin-printf,) + +# FIXME: These warnings are at least partially to be concerned about and should +# be fixed.. +#CFLAGS += $(call cc-option,-Wconversion,) + +ifneq ($(CONFIG_DEBUG),y) +CFLAGS += $(call cc-option,-Os,$(call cc-option,-O2,)) +else +CFLAGS += $(call cc-option,-g,) +#CFLAGS += "-D_FORTIFY_SOURCE=2" +ifeq ($(CONFIG_DEBUG_PESSIMIZE),y) +CFLAGS += $(call cc-option,-O0,) +else +CFLAGS += $(call cc-option,-Os,$(call cc-option,-O2,)) +endif +endif +ifeq ($(CONFIG_DEBUG_SANITIZE),y) +CFLAGS += $(call cc-option,-fsanitize=address,) +CFLAGS += $(call cc-option,-fsanitize=leak,) +CFLAGS += $(call cc-option,-fsanitize=undefined,) +endif + +# If arch/$(ARCH)/Makefile did not override it (with, say, -fPIC)... +ARCH_FPIC ?= -fpic +ARCH_FPIE ?= -fpie +ARCH_PIE ?= -pie + +# Usage: $(eval $(call pkg_check_modules,VARIABLE-PREFIX,MODULES)) +define pkg_check_modules +$(1)_CFLAGS := $(shell $(PKG_CONFIG) $(PKG_CONFIG_FLAGS) --cflags $(2)) +$(1)_LIBS := $(shell $(PKG_CONFIG) $(PKG_CONFIG_FLAGS) --libs $(2)) +endef + +ifeq ($(CONFIG_BUILD_LIBBUSYBOX),y) +# on i386: 14% smaller libbusybox.so +# (code itself is 9% bigger, we save on relocs/PLT/GOT) +CFLAGS += $(ARCH_FPIC) +# and another 4% reduction of libbusybox.so: +# (external entry points must be marked EXTERNALLY_VISIBLE) +CFLAGS += $(call cc-option,-fvisibility=hidden) +endif + +ifeq ($(CONFIG_STATIC),y) +CFLAGS_busybox += -static +PKG_CONFIG_FLAGS += --static +endif + +ifeq ($(CONFIG_PIE),y) +CFLAGS_busybox += $(ARCH_PIE) +CFLAGS += $(ARCH_FPIE) +endif + +ifneq ($(CONFIG_EXTRA_CFLAGS),) +CFLAGS += $(strip $(subst ",,$(CONFIG_EXTRA_CFLAGS))) +#")) +endif + +# Note: both "" (string consisting of two quote chars) and empty string +# are possible, and should be skipped below. +ifneq ($(subst "",,$(CONFIG_SYSROOT)),) +CFLAGS += --sysroot=$(CONFIG_SYSROOT) +export SYSROOT=$(CONFIG_SYSROOT) +endif + +# glibc versions before 2.17 need to link with -rt to use clock_gettime +RT_NEEDED := $(shell echo 'int main(void){struct timespec tp; return clock_gettime(CLOCK_MONOTONIC, &tp);}' >rttest.c; $(CC) $(CFLAGS) -include time.h -o /dev/null rttest.c >/dev/null 2>&1 || echo "y"; rm rttest.c) +ifeq ($(RT_NEEDED),y) +LDLIBS += rt +endif + +# Android has no separate crypt library +# gcc-4.2.1 fails if we try to feed C source on stdin: +# echo 'int main(void){return 0;}' | $(CC) $(CFLAGS) -lcrypt -o /dev/null -xc - +# fall back to using a temp file: +CRYPT_AVAILABLE := $(shell echo 'int main(void){return 0;}' >crypttest.c; $(CC) $(CFLAGS) -lcrypt -o /dev/null crypttest.c >/dev/null 2>&1 && echo "y"; rm crypttest.c) +ifeq ($(CRYPT_AVAILABLE),y) +LDLIBS += m crypt +else +LDLIBS += m +endif + +# libpam may use libpthread, libdl and/or libaudit. +# On some platforms that requires an explicit -lpthread, -ldl, -laudit. +# However, on *other platforms* it fails when some of those flags +# given needlessly. On some systems, crypt needs pthread. +# +# I even had a system where a runtime test for pthread +# (similar to CRYPT_AVAILABLE test above) was not reliable. +# +# Do not propagate this mess by adding libraries to CONFIG_PAM/CRYPT_AVAILABLE blocks. +# Add libraries you need to CONFIG_EXTRA_LDLIBS instead. + +ifeq ($(CONFIG_PAM),y) +LDLIBS += pam pam_misc +endif + +ifeq ($(CONFIG_SELINUX),y) +SELINUX_PC_MODULES = libselinux libsepol +$(eval $(call pkg_check_modules,SELINUX,$(SELINUX_PC_MODULES))) +CPPFLAGS += $(SELINUX_CFLAGS) +LDLIBS += $(if $(SELINUX_LIBS),$(SELINUX_LIBS:-l%=%),$(SELINUX_PC_MODULES:lib%=%)) +endif + +ifeq ($(CONFIG_EFENCE),y) +LDLIBS += efence +endif + +ifeq ($(CONFIG_DMALLOC),y) +LDLIBS += dmalloc +endif + +ifeq ($(CONFIG_FEATURE_EVHTTPGET_HTTPS),y) +LINK_EV_TLS = y +endif + +ifeq ($(CONFIG_EVTLSSCAN),y) +LINK_EV_TLS = y +endif + +ifeq ($(LINK_EV_TLS),y) +LDLIBS += crypto +LDLIBS += ssl +LDLIBS += pthread +endif + +# If a flat binary should be built, CFLAGS_busybox="-elf2flt" +# env var should be set for make invocation. +# Here we check whether CFLAGS_busybox indeed contains that flag. +# (For historical reasons, we also check LDFLAGS, which doesn't +# seem to be entirely correct variable to put "-elf2flt" into). +W_ELF2FLT = -elf2flt +ifneq (,$(findstring $(W_ELF2FLT),$(LDFLAGS) $(CFLAGS_busybox))) +SKIP_STRIP = y +endif + +ifneq ($(CONFIG_EXTRA_LDFLAGS),) +LDFLAGS += $(strip $(subst ",,$(CONFIG_EXTRA_LDFLAGS))) +#")) +endif + +ifneq ($(CONFIG_EXTRA_LDLIBS),) +LDLIBS += $(strip $(subst ",,$(CONFIG_EXTRA_LDLIBS))) +#")) +endif + +# Busybox is a stack-fatty so make sure we increase default size +# TODO: use "make stksizes" to find & fix big stack users +# (we stole scripts/checkstack.pl from the kernel... thanks guys!) +# Reduced from 20k to 16k in 1.9.0. +FLTFLAGS += -s 16000 diff --git a/probe-busybox/Makefile.help b/probe-busybox/Makefile.help new file mode 100644 index 00000000..6a23e2a8 --- /dev/null +++ b/probe-busybox/Makefile.help @@ -0,0 +1,44 @@ +# ========================================================================== +# Build system +# ========================================================================== + +help: + @echo 'Cleaning:' + @echo ' clean - delete temporary files created by build' + @echo ' distclean - delete all non-source files (including .config)' + @echo ' doc-clean - delete all generated documentation' + @echo + @echo 'Build:' + @echo ' all - Executable and documentation' + @echo ' busybox - the swiss-army executable' + @echo ' doc - docs/BusyBox.{txt,html,1}' + @echo ' html - create html-based cross-reference' + @echo + @echo 'Configuration:' + @echo ' allnoconfig - disable all symbols in .config' + @echo ' allyesconfig - enable all symbols in .config (see defconfig)' + @echo ' config - text based configurator (of last resort)' + @echo ' defconfig - set .config to largest generic configuration' + @echo ' menuconfig - interactive curses-based configurator' + @echo ' oldconfig - resolve any unresolved symbols in .config' + @$(if $(boards), \ + $(foreach b, $(boards), \ + printf " %-21s - Build for %s\\n" $(b) $(subst _defconfig,,$(b));) \ + echo '') + @echo + @echo 'Installation:' + @echo ' install - install busybox into CONFIG_PREFIX' + @echo ' uninstall' + @echo + @echo 'Development:' + @echo ' baseline - create busybox_old for bloatcheck.' + @echo ' bloatcheck - show size difference between old and new versions' + @echo ' check - run the test suite for all applets' + @echo ' checkhelp - check for missing help-entries in Config.in' + @echo ' randconfig - generate a random configuration' + @echo ' release - create a distribution tarball' + @echo ' sizes - show size of all enabled busybox symbols' + @echo ' objsizes - show size of each .o object built' + @echo ' bigdata - show data objects, biggest first' + @echo ' stksizes - show stack users, biggest first' + @echo diff --git a/probe-busybox/applets/.gitignore b/probe-busybox/applets/.gitignore new file mode 100644 index 00000000..459938d6 --- /dev/null +++ b/probe-busybox/applets/.gitignore @@ -0,0 +1,3 @@ +/applet_tables +/usage +/usage_pod diff --git a/probe-busybox/applets/Kbuild.src b/probe-busybox/applets/Kbuild.src new file mode 100644 index 00000000..3aedbbff --- /dev/null +++ b/probe-busybox/applets/Kbuild.src @@ -0,0 +1,57 @@ +# Makefile for busybox +# +# Copyright (C) 1999-2005 by Erik Andersen +# +# Licensed under GPLv2, see file LICENSE in this source tree. + +obj-y := +obj-y += applets.o + +hostprogs-y:= +hostprogs-y += usage usage_pod applet_tables + +always:= $(hostprogs-y) + +# Generated files need additional love + +# This trick decreases amount of rebuilds +# if tree is merely renamed/copied +ifeq ($(srctree),$(objtree)) +srctree_slash = +else +srctree_slash = $(srctree)/ +endif + +HOSTCFLAGS_usage.o = -I$(srctree_slash)include -Iinclude +HOSTCFLAGS_usage_pod.o = -I$(srctree_slash)include -Iinclude + +applets/applets.o: include/usage_compressed.h include/applet_tables.h + +applets/applet_tables: .config include/applets.h +applets/usage: .config include/applets.h +applets/usage_pod: .config include/applets.h include/applet_tables.h + +quiet_cmd_gen_usage_compressed = GEN include/usage_compressed.h + cmd_gen_usage_compressed = $(srctree_slash)applets/usage_compressed include/usage_compressed.h applets + +include/usage_compressed.h: applets/usage $(srctree_slash)applets/usage_compressed + $(call cmd,gen_usage_compressed) + +quiet_cmd_gen_applet_tables = GEN include/applet_tables.h include/NUM_APPLETS.h + cmd_gen_applet_tables = applets/applet_tables include/applet_tables.h include/NUM_APPLETS.h + +include/NUM_APPLETS.h: applets/applet_tables + $(call cmd,gen_applet_tables) + +# In fact, include/applet_tables.h depends only on applets/applet_tables, +# and is generated by it. But specifying only it can run +# applets/applet_tables twice, possibly in parallel. +# We say that it also needs NUM_APPLETS.h +# +# Unfortunately, we need to list the same command, +# and it can be executed twice (sequentially). +# The alternative is to not list any command, +# and then if include/applet_tables.h is deleted, it won't be rebuilt. +# +include/applet_tables.h: include/NUM_APPLETS.h applets/applet_tables + $(call cmd,gen_applet_tables) diff --git a/probe-busybox/applets/applet_tables.c b/probe-busybox/applets/applet_tables.c new file mode 100644 index 00000000..ef911a43 --- /dev/null +++ b/probe-busybox/applets/applet_tables.c @@ -0,0 +1,220 @@ +/* vi: set sw=4 ts=4: */ +/* + * Applet table generator. + * Runs on host and produces include/applet_tables.h + * + * Copyright (C) 2007 Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) ((unsigned)(sizeof(x) / sizeof((x)[0]))) + +#include "../include/autoconf.h" +#include "../include/applet_metadata.h" + +struct bb_applet { + const char *name; + const char *main; + enum bb_install_loc_t install_loc; + enum bb_suid_t need_suid; + /* true if instead of fork(); exec("applet"); waitpid(); + * one can do fork(); exit(applet_main(argc,argv)); waitpid(); */ + unsigned char noexec; + /* Even nicer */ + /* true if instead of fork(); exec("applet"); waitpid(); + * one can simply call applet_main(argc,argv); */ + unsigned char nofork; +}; + +/* Define struct bb_applet applets[] */ +#include "../include/applets.h" + +enum { NUM_APPLETS = ARRAY_SIZE(applets) }; + +static int cmp_name(const void *a, const void *b) +{ + const struct bb_applet *aa = a; + const struct bb_applet *bb = b; + return strcmp(aa->name, bb->name); +} + +static int str_isalnum_(const char *s) +{ + while (*s) { + if (!isalnum(*s) && *s != '_') + return 0; + s++; + } + return 1; +} + +int main(int argc, char **argv) +{ + int i, j; + + // In find_applet_by_name(), before linear search, narrow it down + // by looking at N "equidistant" names. With ~350 applets: + // KNOWN_APPNAME_OFFSETS cycles + // 0 9057 + // 2 4604 + ~100 bytes of code + // 4 2407 + 4 bytes + // 8 1342 + 8 bytes + // 16 908 + 16 bytes + // 32 884 + 32 bytes + // With 8, int16_t applet_nameofs[] table has 7 elements. + int KNOWN_APPNAME_OFFSETS = 8; + // With 128 applets we do two linear searches, with 1..7 strcmp's in the first one + // and 1..16 strcmp's in the second. With 256 apps, second search does 1..32 strcmp's. + if (NUM_APPLETS < 128) + KNOWN_APPNAME_OFFSETS = 4; + if (NUM_APPLETS < 32) + KNOWN_APPNAME_OFFSETS = 0; + + qsort(applets, NUM_APPLETS, sizeof(applets[0]), cmp_name); + + if (!argv[1]) + return 1; + i = open(argv[1], O_WRONLY | O_TRUNC | O_CREAT, 0666); + if (i < 0) + return 1; + dup2(i, 1); + + /* Keep in sync with include/busybox.h! */ + + printf("/* This is a generated file, don't edit */\n\n"); + + printf("#define NUM_APPLETS %u\n", NUM_APPLETS); + if (NUM_APPLETS == 1) { + printf("#define SINGLE_APPLET_STR \"%s\"\n", applets[0].name); + printf("#define SINGLE_APPLET_MAIN %s_main\n", applets[0].main); + } + + printf("#define KNOWN_APPNAME_OFFSETS %u\n\n", KNOWN_APPNAME_OFFSETS); + if (KNOWN_APPNAME_OFFSETS > 0) { + int ofs, offset[KNOWN_APPNAME_OFFSETS], index[KNOWN_APPNAME_OFFSETS]; + for (i = 0; i < KNOWN_APPNAME_OFFSETS; i++) + index[i] = i * NUM_APPLETS / KNOWN_APPNAME_OFFSETS; + ofs = 0; + for (i = 0; i < NUM_APPLETS; i++) { + for (j = 0; j < KNOWN_APPNAME_OFFSETS; j++) + if (i == index[j]) + offset[j] = ofs; + ofs += strlen(applets[i].name) + 1; + } + /* If the list of names is too long refuse to proceed */ + if (ofs > 0xffff) + return 1; + printf("const uint16_t applet_nameofs[] ALIGN2 = {\n"); + for (i = 1; i < KNOWN_APPNAME_OFFSETS; i++) + printf("%d,\n", offset[i]); + printf("};\n\n"); + } + + //printf("#ifndef SKIP_definitions\n"); + printf("const char applet_names[] ALIGN1 = \"\"\n"); + for (i = 0; i < NUM_APPLETS; i++) { + printf("\"%s\" \"\\0\"\n", applets[i].name); +// if (MAX_APPLET_NAME_LEN < strlen(applets[i].name)) +// MAX_APPLET_NAME_LEN = strlen(applets[i].name); + } + printf(";\n\n"); + + for (i = 0; i < NUM_APPLETS; i++) { + if (str_isalnum_(applets[i].name)) + printf("#define APPLET_NO_%s %d\n", applets[i].name, i); + } + printf("\n"); + + printf("#ifndef SKIP_applet_main\n"); + printf("int (*const applet_main[])(int argc, char **argv) = {\n"); + for (i = 0; i < NUM_APPLETS; i++) { + printf("%s_main,\n", applets[i].main); + } + printf("};\n"); + printf("#endif\n\n"); + +#if ENABLE_FEATURE_PREFER_APPLETS \ + || ENABLE_FEATURE_SH_STANDALONE \ + || ENABLE_FEATURE_SH_NOFORK + printf("const uint8_t applet_flags[] ALIGN1 = {\n"); + i = 0; + while (i < NUM_APPLETS) { + int v = applets[i].nofork + (applets[i].noexec << 1); + if (++i < NUM_APPLETS) + v |= (applets[i].nofork + (applets[i].noexec << 1)) << 2; + if (++i < NUM_APPLETS) + v |= (applets[i].nofork + (applets[i].noexec << 1)) << 4; + if (++i < NUM_APPLETS) + v |= (applets[i].nofork + (applets[i].noexec << 1)) << 6; + printf("0x%02x,\n", v); + i++; + } + printf("};\n\n"); +#endif + +#if ENABLE_FEATURE_SUID + printf("const uint8_t applet_suid[] ALIGN1 = {\n"); + i = 0; + while (i < NUM_APPLETS) { + int v = applets[i].need_suid; /* 2 bits */ + if (++i < NUM_APPLETS) + v |= applets[i].need_suid << 2; + if (++i < NUM_APPLETS) + v |= applets[i].need_suid << 4; + if (++i < NUM_APPLETS) + v |= applets[i].need_suid << 6; + printf("0x%02x,\n", v); + i++; + } + printf("};\n\n"); +#endif + +#if ENABLE_FEATURE_INSTALLER + printf("const uint8_t applet_install_loc[] ALIGN1 = {\n"); + i = 0; + while (i < NUM_APPLETS) { + int v = applets[i].install_loc; /* 3 bits */ + if (++i < NUM_APPLETS) + v |= applets[i].install_loc << 4; /* 3 bits */ + printf("0x%02x,\n", v); + i++; + } + printf("};\n"); +#endif + //printf("#endif /* SKIP_definitions */\n"); + +// printf("\n"); +// printf("#define MAX_APPLET_NAME_LEN %u\n", MAX_APPLET_NAME_LEN); + + if (argv[2]) { + FILE *fp; + char line_new[80]; +// char line_old[80]; + + sprintf(line_new, "#define NUM_APPLETS %u\n", NUM_APPLETS); +// line_old[0] = 0; +// fp = fopen(argv[2], "r"); +// if (fp) { +// fgets(line_old, sizeof(line_old), fp); +// fclose(fp); +// } +// if (strcmp(line_old, line_new) != 0) { + fp = fopen(argv[2], "w"); + if (!fp) + return 1; + fputs(line_new, fp); +// } + } + + return 0; +} diff --git a/probe-busybox/applets/applets.c b/probe-busybox/applets/applets.c new file mode 100644 index 00000000..98c2b44f --- /dev/null +++ b/probe-busybox/applets/applets.c @@ -0,0 +1,16 @@ +/* vi: set sw=4 ts=4: */ +/* + * Stub for linking busybox binary against libbusybox. + * + * Copyright (C) 2007 Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#include "busybox.h" + +#if ENABLE_BUILD_LIBBUSYBOX +int main(int argc UNUSED_PARAM, char **argv) +{ + return lbb_main(argv); +} +#endif diff --git a/probe-busybox/applets/busybox.mkll b/probe-busybox/applets/busybox.mkll new file mode 100755 index 00000000..68dbf216 --- /dev/null +++ b/probe-busybox/applets/busybox.mkll @@ -0,0 +1,24 @@ +#!/bin/sh +# Make busybox links list file. + +# input $1: full path to Config.h +# input $2: full path to applets.h +# output (stdout): list of pathnames that should be linked to busybox + +# Maintainer: Larry Doolittle + +export LC_ALL=POSIX +export LC_CTYPE=POSIX + +CONFIG_H=${1:-include/autoconf.h} +APPLETS_H=${2:-include/applets.h} +$HOSTCC -E -DMAKE_LINKS -include $CONFIG_H $APPLETS_H | + awk '/^[ \t]*LINK/{ + dir=substr($2,7) + gsub("_","/",dir) + if(dir=="/ROOT") dir="" + file=$3 + gsub("\"","",file) + if (file=="busybox") next + print tolower(dir) "/" file + }' diff --git a/probe-busybox/applets/busybox.mksuid b/probe-busybox/applets/busybox.mksuid new file mode 100755 index 00000000..6492c079 --- /dev/null +++ b/probe-busybox/applets/busybox.mksuid @@ -0,0 +1,54 @@ +#!/bin/sh +# Make list of configuration variables regarding suid handling + +# input $1: full path to autoconf.h +# input $2: full path to applets.h +# input $3: full path to .config +# output (stdout): list of CONFIG_ that do or may require suid + +# If the environment variable SUID is not set or set to DROP, +# lists all config options that do not require suid permissions. +# Otherwise, lists all config options for applets that DO or MAY require +# suid permissions. + +# Maintainer: Bernhard Reutner-Fischer + +export LC_ALL=POSIX +export LC_CTYPE=POSIX + +CONFIG_H=${1:-include/autoconf.h} +APPLETS_H=${2:-include/applets.h} +DOT_CONFIG=${3:-.config} + +case ${SUID:-DROP} in +[dD][rR][oO][pP]) USE="DROP" ;; +*) USE="suid" ;; +esac + +$HOSTCC -E -DMAKE_SUID -include $CONFIG_H $APPLETS_H | + awk -v USE=${USE} ' + /^SUID[ \t]/{ + if (USE == "DROP") { + if ($2 != "BB_SUID_DROP") next + } else { + if ($2 == "BB_SUID_DROP") next + } + cfg = $NF + gsub("\"", "", cfg) + cfg = substr(cfg, 8) + s[i++] = "CONFIG_" cfg + s[i++] = "CONFIG_FEATURE_" cfg "_.*" + } + END{ + while (getline < ARGV[2]) { + for (j in s) { + if ($0 ~ "^" s[j] "=y$") { + sub(/=.*/, "") + print + if (s[j] !~ /\*$/) delete s[j] # can drop this applet now + } + } + } + } +' - $DOT_CONFIG + diff --git a/probe-busybox/applets/individual.c b/probe-busybox/applets/individual.c new file mode 100644 index 00000000..4c468df0 --- /dev/null +++ b/probe-busybox/applets/individual.c @@ -0,0 +1,24 @@ +/* Minimal wrapper to build an individual busybox applet. + * + * Copyright 2005 Rob Landley +#include +#include "usage.h" + +int main(int argc, char **argv) +{ + applet_name = argv[0]; + return APPLET_main(argc, argv); +} + +void bb_show_usage(void) +{ + fputs(APPLET_full_usage "\n", stdout); + exit(EXIT_FAILURE); +} diff --git a/probe-busybox/applets/install.sh b/probe-busybox/applets/install.sh new file mode 100755 index 00000000..e6559ff4 --- /dev/null +++ b/probe-busybox/applets/install.sh @@ -0,0 +1,135 @@ +#!/bin/sh + +export LC_ALL=POSIX +export LC_CTYPE=POSIX + +prefix=$1 +if [ -z "$prefix" ]; then + echo "usage: applets/install.sh DESTINATION [--symlinks/--hardlinks/--binaries/--scriptwrapper]" + exit 1 +fi + +# Source the configuration +. ./.config + +h=`sort busybox.links | uniq` + +sharedlib_dir="0_lib" + +linkopts="" +scriptwrapper="n" +binaries="n" +cleanup="0" +noclobber="0" +case "$2" in + --hardlinks) linkopts="-f";; + --symlinks) linkopts="-fs";; + --binaries) binaries="y";; + --scriptwrapper) scriptwrapper="y";swrapall="y";; + --sw-sh-hard) scriptwrapper="y";linkopts="-f";; + --sw-sh-sym) scriptwrapper="y";linkopts="-fs";; + --cleanup) cleanup="1";; + --noclobber) noclobber="1";; + "") h="";; + *) echo "Unknown install option: $2"; exit 1;; +esac + +if [ -n "$DO_INSTALL_LIBS" ] && [ "$DO_INSTALL_LIBS" != "n" ]; then + # get the target dir for the libs + # assume it starts with lib + libdir=$($CC -print-file-name=libc.so | \ + sed -n 's%^.*\(/lib[^\/]*\)/libc.so%\1%p') + if test -z "$libdir"; then + libdir=/lib + fi + + mkdir -p "$prefix/$libdir" || exit 1 + for i in $DO_INSTALL_LIBS; do + rm -f "$prefix/$libdir/$i" || exit 1 + if [ -f "$i" ]; then + echo " Installing $i to the target at $prefix/$libdir/" + cp -pPR "$i" "$prefix/$libdir/" || exit 1 + chmod 0644 "$prefix/$libdir/`basename $i`" || exit 1 + fi + done +fi + +if [ "$cleanup" = "1" ] && [ -e "$prefix/busybox" ]; then + inode=`ls -i "$prefix/busybox" | awk '{print $1}'` + sub_shell_it=` + cd "$prefix" + for d in usr/sbin usr/bin sbin bin; do + pd=$PWD + if [ -d "$d" ]; then + cd "$d" + ls -iL . | grep "^ *$inode" | awk '{print $2}' | env -i xargs rm -f + fi + cd "$pd" + done + ` + exit 0 +fi + +rm -f "$prefix/busybox" || exit 1 +mkdir -p "$prefix" || exit 1 +install -m 755 busybox "$prefix/busybox" || exit 1 + +for i in $h; do + appdir=`dirname "$i"` + app=`basename "$i"` + mkdir -p "$prefix/$appdir" || exit 1 + if [ "$scriptwrapper" = "y" ]; then + if [ "$swrapall" != "y" ] && [ "$i" = "/bin/sh" ]; then + ln $linkopts busybox "$prefix/$i" || exit 1 + else + rm -f "$prefix/$i" + echo "#!/bin/busybox" >"$prefix/$i" + chmod +x "$prefix/$i" + fi + echo " $prefix/$i" + elif [ "$binaries" = "y" ]; then + # Copy the binary over rather + if [ -e $sharedlib_dir/$app ]; then + if [ "$noclobber" = "0" ] || [ ! -e "$prefix/$i" ]; then + echo " Copying $sharedlib_dir/$app to $prefix/$i" + cp -pPR $sharedlib_dir/$app $prefix/$i || exit 1 + else + echo " $prefix/$i already exists" + fi + else + echo "Error: Could not find $sharedlib_dir/$app" + exit 1 + fi + else + if [ "$2" = "--hardlinks" ]; then + bb_path="$prefix/busybox" + else + case "$appdir" in + /) + bb_path="busybox" + ;; + /bin) + bb_path="busybox" + ;; + /sbin) + bb_path="../bin/busybox" + ;; + /usr/bin | /usr/sbin) + bb_path="../../bin/busybox" + ;; + *) + echo "Unknown installation directory: $appdir" + exit 1 + ;; + esac + fi + if [ "$noclobber" = "0" ] || [ ! -e "$prefix/$i" ]; then + echo " $prefix/$i -> $bb_path" + ln $linkopts "$bb_path" "$prefix/$i" || exit 1 + else + echo " $prefix/$i already exists" + fi + fi +done + +exit 0 diff --git a/probe-busybox/applets/usage.c b/probe-busybox/applets/usage.c new file mode 100644 index 00000000..94520ff6 --- /dev/null +++ b/probe-busybox/applets/usage.c @@ -0,0 +1,55 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright (C) 2008 Denys Vlasenko. + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#include +#include +#include + +#include "autoconf.h" + +/* Since we can't use platform.h, have to do this again by hand: */ +#if ENABLE_NOMMU +# define BB_MMU 0 +# define USE_FOR_NOMMU(...) __VA_ARGS__ +# define USE_FOR_MMU(...) +#else +# define BB_MMU 1 +# define USE_FOR_NOMMU(...) +# define USE_FOR_MMU(...) __VA_ARGS__ +#endif + +#include "usage.h" +#define MAKE_USAGE(aname, usage) { aname, usage }, +static struct usage_data { + const char *aname; + const char *usage; +} usage_array[] = { +#include "applets.h" +}; + +static int compare_func(const void *a, const void *b) +{ + const struct usage_data *ua = a; + const struct usage_data *ub = b; + return strcmp(ua->aname, ub->aname); +} + +int main(void) +{ + int i; + int num_messages = sizeof(usage_array) / sizeof(usage_array[0]); + + if (num_messages == 0) + return 0; + + qsort(usage_array, + num_messages, sizeof(usage_array[0]), + compare_func); + for (i = 0; i < num_messages; i++) + write(STDOUT_FILENO, usage_array[i].usage, strlen(usage_array[i].usage) + 1); + + return 0; +} diff --git a/probe-busybox/applets/usage_compressed b/probe-busybox/applets/usage_compressed new file mode 100755 index 00000000..186fcde7 --- /dev/null +++ b/probe-busybox/applets/usage_compressed @@ -0,0 +1,59 @@ +#!/bin/sh + +target="$1" +loc="$2" + +test "$target" || exit 1 +test "$loc" || loc=. +test -x "$loc/usage" || exit 1 +test "$SED" || SED=sed +test "$DD" || DD=dd + +# Some people were bitten by their system lacking a (proper) od +od -v -b /dev/null +if test $? != 0; then + echo 'od tool is not installed or cannot accept "-v -b" options' + exit 1 +fi + +exec >"$target.$$" + +echo '#define UNPACKED_USAGE "" \' +"$loc/usage" | od -v -b \ +| grep -v '^ ' \ +| $SED -e 's/^[^ ]*//' \ + -e 's/ //g' \ + -e '/^$/d' \ + -e 's/\(...\)/\\\1/g' \ + -e 's/^/"/' \ + -e 's/$/" \\/' +echo '' +# "grep -v '^ '" is for toybox's od bug: od -b prints some extra lines: +#0000000 010 000 010 000 133 055 144 146 135 040 133 055 143 040 103 117 +# 000010 000010 026533 063144 020135 026533 020143 047503 +#0000020 116 106 104 111 122 135 040 133 055 154 040 114 117 107 106 111 +# 043116 044504 056522 055440 066055 046040 043517 044506 +#0000040 114 105 135 040 133 055 141 040 101 103 124 111 117 116 106 111 +# 042514 020135 026533 020141 041501 044524 047117 044506 + +echo '#define PACKED_USAGE \' +## Breaks on big-endian systems! +## # Extra effort to avoid using "od -t x1": -t is not available +## # in non-CONFIG_DESKTOPed busybox od +## +## "$loc/usage" | bzip2 -1 | od -v -x \ +## | $SED -e 's/^[^ ]*//' \ +## -e 's/ //g' \ +## -e '/^$/d' \ +## -e 's/\(..\)\(..\)/0x\2,0x\1,/g' +## -e 's/$/ \\/' +"$loc/usage" | bzip2 -1 | $DD bs=2 skip=1 2>/dev/null | od -v -b \ +| grep -v '^ ' \ +| $SED -e 's/^[^ ]*//' \ + -e 's/ //g' \ + -e '/^$/d' \ + -e 's/\(...\)/0\1,/g' \ + -e 's/$/ \\/' +echo '' + +mv -- "$target.$$" "$target" diff --git a/probe-busybox/applets/usage_pod.c b/probe-busybox/applets/usage_pod.c new file mode 100644 index 00000000..ccc166ae --- /dev/null +++ b/probe-busybox/applets/usage_pod.c @@ -0,0 +1,113 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright (C) 2009 Denys Vlasenko. + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#include +#include +#include +#include +#include + +#include "autoconf.h" + +#define SKIP_applet_main +#define ALIGN1 /* nothing, just to placate applet_tables.h */ +#define ALIGN2 /* nothing, just to placate applet_tables.h */ +#include "applet_tables.h" + +/* Since we can't use platform.h, have to do this again by hand: */ +#if ENABLE_NOMMU +# define BB_MMU 0 +# define USE_FOR_NOMMU(...) __VA_ARGS__ +# define USE_FOR_MMU(...) +#else +# define BB_MMU 1 +# define USE_FOR_NOMMU(...) +# define USE_FOR_MMU(...) __VA_ARGS__ +#endif + +#include "usage.h" +#define MAKE_USAGE(aname, usage) { aname, usage }, +static struct usage_data { + const char *aname; + const char *usage; +} usage_array[] = { +#include "applets.h" +}; + +static int compare_func(const void *a, const void *b) +{ + const struct usage_data *ua = a; + const struct usage_data *ub = b; + return strcmp(ua->aname, ub->aname); +} + +int main(void) +{ + int col, len2; + + int i; + int num_messages = sizeof(usage_array) / sizeof(usage_array[0]); + + if (num_messages == 0) + return 0; + + qsort(usage_array, + num_messages, sizeof(usage_array[0]), + compare_func); + + col = 0; + for (i = 0; i < num_messages; i++) { + len2 = strlen(usage_array[i].aname) + 2; + if (col >= 76 - len2) { + printf(",\n"); + col = 0; + } + if (col == 0) { + col = 6; + printf("\t"); + } else { + printf(", "); + } + printf(usage_array[i].aname); + col += len2; + } + printf("\n\n"); + + printf("=head1 COMMAND DESCRIPTIONS\n\n"); + printf("=over 4\n\n"); + + for (i = 0; i < num_messages; i++) { + if (usage_array[i].aname[0] >= 'a' && usage_array[i].aname[0] <= 'z' + && usage_array[i].usage[0] != NOUSAGE_STR[0] + ) { + printf("=item B<%s>\n\n", usage_array[i].aname); + if (usage_array[i].usage[0]) + printf("%s %s\n\n", usage_array[i].aname, usage_array[i].usage); + else + printf("%s\n\n", usage_array[i].aname); + } + } + printf("=back\n\n"); + + return 0; +} + +/* TODO: we used to make options bold with B<> and output an example too: + +=item B + +cat [B<-u>] [FILE]... + +Concatenate FILE(s) and print them to stdout + +Options: + -u Use unbuffered i/o (ignored) + +Example: + $ cat /proc/uptime + 110716.72 17.67 + +*/ diff --git a/probe-busybox/applets_sh/README b/probe-busybox/applets_sh/README new file mode 100644 index 00000000..9dcd38ae --- /dev/null +++ b/probe-busybox/applets_sh/README @@ -0,0 +1,5 @@ +This directory contains examples of applets implemented as shell scripts. + +So far these scripts are not hooked to the build system and are not +installed by "make install". If you want to use them, +you need to install them by hand. diff --git a/probe-busybox/applets_sh/dos2unix b/probe-busybox/applets_sh/dos2unix new file mode 100755 index 00000000..0fd5206f --- /dev/null +++ b/probe-busybox/applets_sh/dos2unix @@ -0,0 +1,5 @@ +#!/bin/sh +# TODO: use getopt to avoid parsing options as filenames, +# and to support -- and --help +[ $# -ne 0 ] && DASH_I=-i +sed $DASH_I -e 's/\r$//' "$@" diff --git a/probe-busybox/applets_sh/nologin b/probe-busybox/applets_sh/nologin new file mode 100755 index 00000000..3768eaaa --- /dev/null +++ b/probe-busybox/applets_sh/nologin @@ -0,0 +1,4 @@ +#!/bin/sh +cat /etc/nologin.txt 2>/dev/null || echo "This account is not available" +sleep 5 +exit 1 diff --git a/probe-busybox/applets_sh/tac b/probe-busybox/applets_sh/tac new file mode 100755 index 00000000..c5a8e39c --- /dev/null +++ b/probe-busybox/applets_sh/tac @@ -0,0 +1,7 @@ +#!/bin/sh +# TODO: use getopt to avoid parsing options as filenames, +# and to support -- and --help +for i in "$@" +do +sed -e '1!G;h;$!d' "$i" +done diff --git a/probe-busybox/applets_sh/unix2dos b/probe-busybox/applets_sh/unix2dos new file mode 100755 index 00000000..70e04290 --- /dev/null +++ b/probe-busybox/applets_sh/unix2dos @@ -0,0 +1,5 @@ +#!/bin/sh +# TODO: use getopt to avoid parsing options as filenames, +# and to support -- and --help +[ $# -ne 0 ] && DASH_I=-i +sed $DASH_I -e 's/$/\r/' "$@" diff --git a/probe-busybox/arch/i386/Makefile b/probe-busybox/arch/i386/Makefile new file mode 100644 index 00000000..e6c99c67 --- /dev/null +++ b/probe-busybox/arch/i386/Makefile @@ -0,0 +1,7 @@ +# ========================================================================== +# Build system +# ========================================================================== + +# -mpreferred-stack-boundary=2 is essential in preventing gcc 4.2.x +# from aligning stack to 16 bytes. (Which is gcc's way of supporting SSE). +CFLAGS += $(call cc-option,-march=i386 -mpreferred-stack-boundary=2,) diff --git a/probe-busybox/archival/Config.src b/probe-busybox/archival/Config.src new file mode 100644 index 00000000..5e7cfc0a --- /dev/null +++ b/probe-busybox/archival/Config.src @@ -0,0 +1,40 @@ +# +# For a description of the syntax of this configuration file, +# see scripts/kbuild/config-language.txt. +# + +menu "Archival Utilities" + +config FEATURE_SEAMLESS_XZ + bool "Make tar, rpm, modprobe etc understand .xz data" + default y + help + Make tar, rpm, modprobe etc understand .xz data. + +config FEATURE_SEAMLESS_LZMA + bool "Make tar, rpm, modprobe etc understand .lzma data" + default y + help + Make tar, rpm, modprobe etc understand .lzma data. + +config FEATURE_SEAMLESS_BZ2 + bool "Make tar, rpm, modprobe etc understand .bz2 data" + default y + help + Make tar, rpm, modprobe etc understand .bz2 data. + +config FEATURE_SEAMLESS_GZ + bool "Make tar, rpm, modprobe etc understand .gz data" + default y + help + Make tar, rpm, modprobe etc understand .gz data. + +config FEATURE_SEAMLESS_Z + bool "Make tar, rpm, modprobe etc understand .Z data" + default n # it is ancient + help + Make tar, rpm, modprobe etc understand .Z data. + +INSERT + +endmenu diff --git a/probe-busybox/archival/Kbuild.src b/probe-busybox/archival/Kbuild.src new file mode 100644 index 00000000..b3a7d538 --- /dev/null +++ b/probe-busybox/archival/Kbuild.src @@ -0,0 +1,11 @@ +# Makefile for busybox +# +# Copyright (C) 1999-2005 by Erik Andersen +# +# Licensed under GPLv2, see file LICENSE in this source tree. + +libs-y += libarchive/ + +lib-y:= + +INSERT diff --git a/probe-busybox/archival/bbunzip.c b/probe-busybox/archival/bbunzip.c new file mode 100644 index 00000000..70345839 --- /dev/null +++ b/probe-busybox/archival/bbunzip.c @@ -0,0 +1,600 @@ +/* vi: set sw=4 ts=4: */ +/* + * Common code for gunzip-like applets + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#include "libbb.h" +#include "bb_archive.h" + +/* lzop_main() uses bbunpack(), need this: */ +//kbuild:lib-$(CONFIG_LZOP) += bbunzip.o +//kbuild:lib-$(CONFIG_LZOPCAT) += bbunzip.o +//kbuild:lib-$(CONFIG_UNLZOP) += bbunzip.o +/* bzip2_main() too: */ +//kbuild:lib-$(CONFIG_BZIP2) += bbunzip.o +/* gzip_main() too: */ +//kbuild:lib-$(CONFIG_GZIP) += bbunzip.o + +/* Note: must be kept in sync with archival/lzop.c */ +enum { + OPT_STDOUT = 1 << 0, + OPT_FORCE = 1 << 1, + /* only some decompressors: */ + OPT_VERBOSE = 1 << 2, + OPT_QUIET = 1 << 3, + OPT_DECOMPRESS = 1 << 4, + OPT_TEST = 1 << 5, + SEAMLESS_MAGIC = (1 << 31) * SEAMLESS_COMPRESSION, +}; + +static +int open_to_or_warn(int to_fd, const char *filename, int flags, int mode) +{ + int fd = open3_or_warn(filename, flags, mode); + if (fd < 0) { + return 1; + } + xmove_fd(fd, to_fd); + return 0; +} + +char* FAST_FUNC append_ext(char *filename, const char *expected_ext) +{ + return xasprintf("%s.%s", filename, expected_ext); +} + +int FAST_FUNC bbunpack(char **argv, + IF_DESKTOP(long long) int FAST_FUNC (*unpacker)(transformer_state_t *xstate), + char* FAST_FUNC (*make_new_name)(char *filename, const char *expected_ext), + const char *expected_ext +) +{ + struct stat stat_buf; + IF_DESKTOP(long long) int status = 0; + char *filename, *new_name; + smallint exitcode = 0; + transformer_state_t xstate; + + do { + /* NB: new_name is *maybe* malloc'ed! */ + new_name = NULL; + filename = *argv; /* can be NULL - 'streaming' bunzip2 */ + + if (filename && LONE_DASH(filename)) + filename = NULL; + + /* Open src */ + if (filename) { + if (!(option_mask32 & SEAMLESS_MAGIC)) { + if (stat(filename, &stat_buf) != 0) { + err_name: + bb_simple_perror_msg(filename); + err: + exitcode = 1; + goto free_name; + } + if (open_to_or_warn(STDIN_FILENO, filename, O_RDONLY, 0)) + goto err; + } else { + /* "clever zcat" with FILE */ + /* fail_if_not_compressed because zcat refuses uncompressed input */ + int fd = open_zipped(filename, /*fail_if_not_compressed:*/ 1); + if (fd < 0) + goto err_name; + xmove_fd(fd, STDIN_FILENO); + } + } else + if (option_mask32 & SEAMLESS_MAGIC) { + /* "clever zcat" on stdin */ + if (setup_unzip_on_fd(STDIN_FILENO, /*fail_if_not_compressed*/ 1)) + goto err; + } + + /* Special cases: test, stdout */ + if (option_mask32 & (OPT_STDOUT|OPT_TEST)) { + if (option_mask32 & OPT_TEST) + if (open_to_or_warn(STDOUT_FILENO, bb_dev_null, O_WRONLY, 0)) + xfunc_die(); + filename = NULL; + } + + /* Open dst if we are going to unpack to file */ + if (filename) { + new_name = make_new_name(filename, expected_ext); + if (!new_name) { + bb_error_msg("%s: unknown suffix - ignored", filename); + goto err; + } + + /* -f: overwrite existing output files */ + if (option_mask32 & OPT_FORCE) { + unlink(new_name); + } + + /* O_EXCL: "real" bunzip2 doesn't overwrite files */ + /* GNU gunzip does not bail out, but goes to next file */ + if (open_to_or_warn(STDOUT_FILENO, new_name, O_WRONLY | O_CREAT | O_EXCL, + stat_buf.st_mode)) + goto err; + } + + /* Check that the input is sane */ + if (!(option_mask32 & OPT_FORCE) && isatty(STDIN_FILENO)) { + bb_error_msg_and_die("compressed data not read from terminal, " + "use -f to force it"); + } + + if (!(option_mask32 & SEAMLESS_MAGIC)) { + init_transformer_state(&xstate); + xstate.signature_skipped = 0; + /*xstate.src_fd = STDIN_FILENO; - already is */ + xstate.dst_fd = STDOUT_FILENO; + status = unpacker(&xstate); + if (status < 0) + exitcode = 1; + } else { + if (bb_copyfd_eof(STDIN_FILENO, STDOUT_FILENO) < 0) + /* Disk full, tty closed, etc. No point in continuing */ + xfunc_die(); + } + + if (!(option_mask32 & OPT_STDOUT)) + xclose(STDOUT_FILENO); /* with error check! */ + + if (filename) { + char *del = new_name; + + if (status >= 0) { + unsigned new_name_len; + + /* TODO: restore other things? */ + if (xstate.mtime != 0) { + struct timeval times[2]; + + times[1].tv_sec = times[0].tv_sec = xstate.mtime; + times[1].tv_usec = times[0].tv_usec = 0; + /* Note: we closed it first. + * On some systems calling utimes + * then closing resets the mtime + * back to current time. */ + utimes(new_name, times); /* ignoring errors */ + } + + if (ENABLE_DESKTOP) + new_name_len = strlen(new_name); + /* Restore source filename (unless tgz -> tar case) */ + if (new_name == filename) { + new_name_len = strlen(filename); + filename[new_name_len] = '.'; + } + /* Extreme bloat for gunzip compat */ + /* Some users do want this info... */ + if (ENABLE_DESKTOP && (option_mask32 & OPT_VERBOSE)) { + unsigned percent = status + ? ((uoff_t)stat_buf.st_size * 100u / (unsigned long long)status) + : 0; + fprintf(stderr, "%s: %u%% - replaced with %.*s\n", + filename, + 100u - percent, + new_name_len, new_name + ); + } + /* Delete _source_ file */ + del = filename; + } + xunlink(del); + free_name: + if (new_name != filename) + free(new_name); + } + } while (*argv && *++argv); + + if (option_mask32 & OPT_STDOUT) + xclose(STDOUT_FILENO); /* with error check! */ + + return exitcode; +} + +#if ENABLE_UNCOMPRESS \ + || ENABLE_BUNZIP2 || ENABLE_BZCAT \ + || ENABLE_UNLZMA || ENABLE_LZCAT || ENABLE_LZMA \ + || ENABLE_UNXZ || ENABLE_XZCAT || ENABLE_XZ +static +char* FAST_FUNC make_new_name_generic(char *filename, const char *expected_ext) +{ + char *extension = strrchr(filename, '.'); + if (!extension || strcmp(extension + 1, expected_ext) != 0) { + /* Mimic GNU gunzip - "real" bunzip2 tries to */ + /* unpack file anyway, to file.out */ + return NULL; + } + *extension = '\0'; + return filename; +} +#endif + + +/* + * Uncompress applet for busybox (c) 2002 Glenn McGrath + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +//usage:#define uncompress_trivial_usage +//usage: "[-cf] [FILE]..." +//usage:#define uncompress_full_usage "\n\n" +//usage: "Decompress .Z file[s]\n" +//usage: "\n -c Write to stdout" +//usage: "\n -f Overwrite" + +//config:config UNCOMPRESS +//config: bool "uncompress" +//config: default n # ancient +//config: help +//config: uncompress is used to decompress archives created by compress. +//config: Not much used anymore, replaced by gzip/gunzip. + +//applet:IF_UNCOMPRESS(APPLET(uncompress, BB_DIR_ROOT, BB_SUID_DROP)) +//kbuild:lib-$(CONFIG_UNCOMPRESS) += bbunzip.o +#if ENABLE_UNCOMPRESS +int uncompress_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int uncompress_main(int argc UNUSED_PARAM, char **argv) +{ + getopt32(argv, "cf"); + argv += optind; + + return bbunpack(argv, unpack_Z_stream, make_new_name_generic, "Z"); +} +#endif + + +/* + * Gzip implementation for busybox + * + * Based on GNU gzip v1.2.4 Copyright (C) 1992-1993 Jean-loup Gailly. + * + * Originally adjusted for busybox by Sven Rudolph + * based on gzip sources + * + * Adjusted further by Erik Andersen to support files as + * well as stdin/stdout, and to generally behave itself wrt command line + * handling. + * + * General cleanup to better adhere to the style guide and make use of standard + * busybox functions by Glenn McGrath + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + * + * gzip (GNU zip) -- compress files with zip algorithm and 'compress' interface + * Copyright (C) 1992-1993 Jean-loup Gailly + * The unzip code was written and put in the public domain by Mark Adler. + * Portions of the lzw code are derived from the public domain 'compress' + * written by Spencer Thomas, Joe Orost, James Woods, Jim McKie, Steve Davies, + * Ken Turkowski, Dave Mack and Peter Jannesen. + */ +//usage:#define gunzip_trivial_usage +//usage: "[-cft] [FILE]..." +//usage:#define gunzip_full_usage "\n\n" +//usage: "Decompress FILEs (or stdin)\n" +//usage: "\n -c Write to stdout" +//usage: "\n -f Force" +//usage: "\n -t Test file integrity" +//usage: +//usage:#define gunzip_example_usage +//usage: "$ ls -la /tmp/BusyBox*\n" +//usage: "-rw-rw-r-- 1 andersen andersen 557009 Apr 11 10:55 /tmp/BusyBox-0.43.tar.gz\n" +//usage: "$ gunzip /tmp/BusyBox-0.43.tar.gz\n" +//usage: "$ ls -la /tmp/BusyBox*\n" +//usage: "-rw-rw-r-- 1 andersen andersen 1761280 Apr 14 17:47 /tmp/BusyBox-0.43.tar\n" +//usage: +//usage:#define zcat_trivial_usage +//usage: "[FILE]..." +//usage:#define zcat_full_usage "\n\n" +//usage: "Decompress to stdout" + +//config:config GUNZIP +//config: bool "gunzip" +//config: default y +//config: help +//config: gunzip is used to decompress archives created by gzip. +//config: You can use the `-t' option to test the integrity of +//config: an archive, without decompressing it. +//config: +//config:config ZCAT +//config: bool "zcat" +//config: default y +//config: help +//config: Alias to "gunzip -c". +//config: +//config:config FEATURE_GUNZIP_LONG_OPTIONS +//config: bool "Enable long options" +//config: default y +//config: depends on (GUNZIP || ZCAT) && LONG_OPTS +//config: help +//config: Enable use of long options. + +//applet:IF_GUNZIP(APPLET(gunzip, BB_DIR_ROOT, BB_SUID_DROP)) +//applet:IF_ZCAT(APPLET_ODDNAME(zcat, gunzip, BB_DIR_ROOT, BB_SUID_DROP, zcat)) +//kbuild:lib-$(CONFIG_GUNZIP) += bbunzip.o +//kbuild:lib-$(CONFIG_ZCAT) += bbunzip.o +#if ENABLE_GUNZIP || ENABLE_ZCAT +static +char* FAST_FUNC make_new_name_gunzip(char *filename, const char *expected_ext UNUSED_PARAM) +{ + char *extension = strrchr(filename, '.'); + + if (!extension) + return NULL; + + extension++; + if (strcmp(extension, "tgz" + 1) == 0 +#if ENABLE_FEATURE_SEAMLESS_Z + || (extension[0] == 'Z' && extension[1] == '\0') +#endif + ) { + extension[-1] = '\0'; + } else if (strcmp(extension, "tgz") == 0) { + filename = xstrdup(filename); + extension = strrchr(filename, '.'); + extension[2] = 'a'; + extension[3] = 'r'; + } else { + return NULL; + } + return filename; +} + +#if ENABLE_FEATURE_GUNZIP_LONG_OPTIONS +static const char gunzip_longopts[] ALIGN1 = + "stdout\0" No_argument "c" + "to-stdout\0" No_argument "c" + "force\0" No_argument "f" + "test\0" No_argument "t" + "no-name\0" No_argument "n" + ; +#endif + +/* + * Linux kernel build uses gzip -d -n. We accept and ignore it. + * Man page says: + * -n --no-name + * gzip: do not save the original file name and time stamp. + * (The original name is always saved if the name had to be truncated.) + * gunzip: do not restore the original file name/time even if present + * (remove only the gzip suffix from the compressed file name). + * This option is the default when decompressing. + * -N --name + * gzip: always save the original file name and time stamp (this is the default) + * gunzip: restore the original file name and time stamp if present. + */ +int gunzip_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int gunzip_main(int argc UNUSED_PARAM, char **argv) +{ +#if ENABLE_FEATURE_GUNZIP_LONG_OPTIONS + applet_long_options = gunzip_longopts; +#endif + getopt32(argv, "cfvqdtn"); + argv += optind; + + /* If called as zcat... + * Normally, "zcat" is just "gunzip -c". + * But if seamless magic is enabled, then we are much more clever. + */ + if (ENABLE_ZCAT && applet_name[1] == 'c') + option_mask32 |= OPT_STDOUT | SEAMLESS_MAGIC; + + return bbunpack(argv, unpack_gz_stream, make_new_name_gunzip, /*unused:*/ NULL); +} +#endif + + +/* + * Modified for busybox by Glenn McGrath + * Added support output to stdout by Thomas Lundquist + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +//usage:#define bunzip2_trivial_usage +//usage: "[-cf] [FILE]..." +//usage:#define bunzip2_full_usage "\n\n" +//usage: "Decompress FILEs (or stdin)\n" +//usage: "\n -c Write to stdout" +//usage: "\n -f Force" +//usage:#define bzcat_trivial_usage +//usage: "[FILE]..." +//usage:#define bzcat_full_usage "\n\n" +//usage: "Decompress to stdout" + +//config:config BUNZIP2 +//config: bool "bunzip2" +//config: default y +//config: help +//config: bunzip2 is a compression utility using the Burrows-Wheeler block +//config: sorting text compression algorithm, and Huffman coding. Compression +//config: is generally considerably better than that achieved by more +//config: conventional LZ77/LZ78-based compressors, and approaches the +//config: performance of the PPM family of statistical compressors. +//config: +//config: Unless you have a specific application which requires bunzip2, you +//config: should probably say N here. +//config: +//config:config BZCAT +//config: bool "bzcat" +//config: default y +//config: help +//config: Alias to "bunzip2 -c". + +//applet:IF_BUNZIP2(APPLET(bunzip2, BB_DIR_ROOT, BB_SUID_DROP)) +//applet:IF_BZCAT(APPLET_ODDNAME(bzcat, bunzip2, BB_DIR_ROOT, BB_SUID_DROP, bzcat)) +//kbuild:lib-$(CONFIG_BUNZIP2) += bbunzip.o +//kbuild:lib-$(CONFIG_BZCAT) += bbunzip.o +#if ENABLE_BUNZIP2 || ENABLE_BZCAT +int bunzip2_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int bunzip2_main(int argc UNUSED_PARAM, char **argv) +{ + getopt32(argv, "cfvqdt"); + argv += optind; + if (ENABLE_BZCAT && applet_name[2] == 'c') /* bzcat */ + option_mask32 |= OPT_STDOUT; + + return bbunpack(argv, unpack_bz2_stream, make_new_name_generic, "bz2"); +} +#endif + + +/* + * Small lzma deflate implementation. + * Copyright (C) 2006 Aurelien Jacobs + * + * Based on bunzip.c from busybox + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +//usage:#define unlzma_trivial_usage +//usage: "[-cf] [FILE]..." +//usage:#define unlzma_full_usage "\n\n" +//usage: "Decompress FILE (or stdin)\n" +//usage: "\n -c Write to stdout" +//usage: "\n -f Force" +//usage: +//usage:#define lzma_trivial_usage +//usage: "-d [-cf] [FILE]..." +//usage:#define lzma_full_usage "\n\n" +//usage: "Decompress FILE (or stdin)\n" +//usage: "\n -d Decompress" +//usage: "\n -c Write to stdout" +//usage: "\n -f Force" +//usage: +//usage:#define lzcat_trivial_usage +//usage: "[FILE]..." +//usage:#define lzcat_full_usage "\n\n" +//usage: "Decompress to stdout" +//usage: +//usage:#define unxz_trivial_usage +//usage: "[-cf] [FILE]..." +//usage:#define unxz_full_usage "\n\n" +//usage: "Decompress FILE (or stdin)\n" +//usage: "\n -c Write to stdout" +//usage: "\n -f Force" +//usage: +//usage:#define xz_trivial_usage +//usage: "-d [-cf] [FILE]..." +//usage:#define xz_full_usage "\n\n" +//usage: "Decompress FILE (or stdin)\n" +//usage: "\n -d Decompress" +//usage: "\n -c Write to stdout" +//usage: "\n -f Force" +//usage: +//usage:#define xzcat_trivial_usage +//usage: "[FILE]..." +//usage:#define xzcat_full_usage "\n\n" +//usage: "Decompress to stdout" + +//config:config UNLZMA +//config: bool "unlzma" +//config: default y +//config: help +//config: unlzma is a compression utility using the Lempel-Ziv-Markov chain +//config: compression algorithm, and range coding. Compression +//config: is generally considerably better than that achieved by the bzip2 +//config: compressors. +//config: +//config: The BusyBox unlzma applet is limited to decompression only. +//config: On an x86 system, this applet adds about 4K. +//config: +//config:config LZCAT +//config: bool "lzcat" +//config: default y +//config: help +//config: unlzma is a compression utility using the Lempel-Ziv-Markov chain +//config: compression algorithm, and range coding. Compression +//config: is generally considerably better than that achieved by the bzip2 +//config: compressors. +//config: +//config: The BusyBox unlzma applet is limited to decompression only. +//config: On an x86 system, this applet adds about 4K. +//config: +//config:config LZMA +//config: bool "lzma -d" +//config: default y +//config: help +//config: Enable this option if you want commands like "lzma -d" to work. +//config: IOW: you'll get lzma applet, but it will always require -d option. +//config: +//config:config FEATURE_LZMA_FAST +//config: bool "Optimize unlzma for speed" +//config: default n +//config: depends on UNLZMA || LZCAT || LZMA +//config: help +//config: This option reduces decompression time by about 25% at the cost of +//config: a 1K bigger binary. + +//applet:IF_UNLZMA(APPLET(unlzma, BB_DIR_ROOT, BB_SUID_DROP)) +//applet:IF_LZCAT(APPLET_ODDNAME(lzcat, unlzma, BB_DIR_ROOT, BB_SUID_DROP, lzcat)) +//applet:IF_LZMA(APPLET_ODDNAME(lzma, unlzma, BB_DIR_ROOT, BB_SUID_DROP, lzma)) +//kbuild:lib-$(CONFIG_UNLZMA) += bbunzip.o +//kbuild:lib-$(CONFIG_LZCAT) += bbunzip.o +//kbuild:lib-$(CONFIG_LZMA) += bbunzip.o +#if ENABLE_UNLZMA || ENABLE_LZCAT || ENABLE_LZMA +int unlzma_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int unlzma_main(int argc UNUSED_PARAM, char **argv) +{ + IF_LZMA(int opts =) getopt32(argv, "cfvqdt"); +# if ENABLE_LZMA + /* lzma without -d or -t? */ + if (applet_name[2] == 'm' && !(opts & (OPT_DECOMPRESS|OPT_TEST))) + bb_show_usage(); +# endif + /* lzcat? */ + if (ENABLE_LZCAT && applet_name[2] == 'c') + option_mask32 |= OPT_STDOUT; + + argv += optind; + return bbunpack(argv, unpack_lzma_stream, make_new_name_generic, "lzma"); +} +#endif + + +//config:config UNXZ +//config: bool "unxz" +//config: default y +//config: help +//config: unxz is a unlzma successor. +//config: +//config:config XZCAT +//config: bool "xzcat" +//config: default y +//config: help +//config: Alias to "unxz -c". +//config: +//config:config XZ +//config: bool "xz -d" +//config: default y +//config: help +//config: Enable this option if you want commands like "xz -d" to work. +//config: IOW: you'll get xz applet, but it will always require -d option. + +//applet:IF_UNXZ(APPLET(unxz, BB_DIR_ROOT, BB_SUID_DROP)) +//applet:IF_XZCAT(APPLET_ODDNAME(xzcat, unxz, BB_DIR_ROOT, BB_SUID_DROP, xzcat)) +//applet:IF_XZ(APPLET_ODDNAME(xz, unxz, BB_DIR_ROOT, BB_SUID_DROP, xz)) +//kbuild:lib-$(CONFIG_UNXZ) += bbunzip.o +//kbuild:lib-$(CONFIG_XZCAT) += bbunzip.o +//kbuild:lib-$(CONFIG_XZ) += bbunzip.o +#if ENABLE_UNXZ || ENABLE_XZCAT || ENABLE_XZ +int unxz_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int unxz_main(int argc UNUSED_PARAM, char **argv) +{ + IF_XZ(int opts =) getopt32(argv, "cfvqdt"); +# if ENABLE_XZ + /* xz without -d or -t? */ + if (applet_name[2] == '\0' && !(opts & (OPT_DECOMPRESS|OPT_TEST))) + bb_show_usage(); +# endif + /* xzcat? */ + if (ENABLE_XZCAT && applet_name[2] == 'c') + option_mask32 |= OPT_STDOUT; + + argv += optind; + return bbunpack(argv, unpack_xz_stream, make_new_name_generic, "xz"); +} +#endif diff --git a/probe-busybox/archival/gzip.c b/probe-busybox/archival/gzip.c new file mode 100644 index 00000000..e99a2cf2 --- /dev/null +++ b/probe-busybox/archival/gzip.c @@ -0,0 +1,2243 @@ +/* vi: set sw=4 ts=4: */ +/* + * Gzip implementation for busybox + * + * Based on GNU gzip Copyright (C) 1992-1993 Jean-loup Gailly. + * + * Originally adjusted for busybox by Charles P. Wright + * "this is a stripped down version of gzip I put into busybox, it does + * only standard in to standard out with -9 compression. It also requires + * the zcat module for some important functions." + * + * Adjusted further by Erik Andersen to support + * files as well as stdin/stdout, and to generally behave itself wrt + * command line handling. + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +/* big objects in bss: + * 00000020 b bl_count + * 00000074 b base_length + * 00000078 b base_dist + * 00000078 b static_dtree + * 0000009c b bl_tree + * 000000f4 b dyn_dtree + * 00000100 b length_code + * 00000200 b dist_code + * 0000023d b depth + * 00000400 b flag_buf + * 0000047a b heap + * 00000480 b static_ltree + * 000008f4 b dyn_ltree + */ +/* TODO: full support for -v for DESKTOP + * "/usr/bin/gzip -v a bogus aa" should say: +a: 85.1% -- replaced with a.gz +gzip: bogus: No such file or directory +aa: 85.1% -- replaced with aa.gz +*/ + +//config:config GZIP +//config: bool "gzip" +//config: default y +//config: help +//config: gzip is used to compress files. +//config: It's probably the most widely used UNIX compression program. +//config: +//config:config FEATURE_GZIP_LONG_OPTIONS +//config: bool "Enable long options" +//config: default y +//config: depends on GZIP && LONG_OPTS +//config: help +//config: Enable use of long options, increases size by about 106 Bytes +//config: +//config:config GZIP_FAST +//config: int "Trade memory for gzip speed (0:small,slow - 2:fast,big)" +//config: default 0 +//config: range 0 2 +//config: depends on GZIP +//config: help +//config: Enable big memory options for gzip. +//config: 0: small buffers, small hash-tables +//config: 1: larger buffers, larger hash-tables +//config: 2: larger buffers, largest hash-tables +//config: Larger models may give slightly better compression +//config: +//config:config FEATURE_GZIP_LEVELS +//config: bool "Enable compression levels" +//config: default n +//config: depends on GZIP +//config: help +//config: Enable support for compression levels 4-9. The default level +//config: is 6. If levels 1-3 are specified, 4 is used. +//config: If this option is not selected, -N options are ignored and -9 +//config: is used. + +//applet:IF_GZIP(APPLET(gzip, BB_DIR_ROOT, BB_SUID_DROP)) +//kbuild:lib-$(CONFIG_GZIP) += gzip.o + +//usage:#define gzip_trivial_usage +//usage: "[-cf" IF_GUNZIP("d") IF_FEATURE_GZIP_LEVELS("123456789") "] [FILE]..." +//usage:#define gzip_full_usage "\n\n" +//usage: "Compress FILEs (or stdin)\n" +//usage: IF_FEATURE_GZIP_LEVELS( +//usage: "\n -1..9 Compression level" +//usage: ) +//usage: IF_GUNZIP( +//usage: "\n -d Decompress" +//usage: ) +//usage: "\n -c Write to stdout" +//usage: "\n -f Force" +//usage: +//usage:#define gzip_example_usage +//usage: "$ ls -la /tmp/busybox*\n" +//usage: "-rw-rw-r-- 1 andersen andersen 1761280 Apr 14 17:47 /tmp/busybox.tar\n" +//usage: "$ gzip /tmp/busybox.tar\n" +//usage: "$ ls -la /tmp/busybox*\n" +//usage: "-rw-rw-r-- 1 andersen andersen 554058 Apr 14 17:49 /tmp/busybox.tar.gz\n" + +#include "libbb.h" +#include "bb_archive.h" + + +/* =========================================================================== + */ +//#define DEBUG 1 +/* Diagnostic functions */ +#ifdef DEBUG +# define Assert(cond,msg) { if (!(cond)) bb_error_msg(msg); } +# define Trace(x) fprintf x +# define Tracev(x) {if (verbose) fprintf x; } +# define Tracevv(x) {if (verbose > 1) fprintf x; } +# define Tracec(c,x) {if (verbose && (c)) fprintf x; } +# define Tracecv(c,x) {if (verbose > 1 && (c)) fprintf x; } +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + + +/* =========================================================================== + */ +#if CONFIG_GZIP_FAST == 0 +# define SMALL_MEM +#elif CONFIG_GZIP_FAST == 1 +# define MEDIUM_MEM +#elif CONFIG_GZIP_FAST == 2 +# define BIG_MEM +#else +# error "Invalid CONFIG_GZIP_FAST value" +#endif + +#ifndef INBUFSIZ +# ifdef SMALL_MEM +# define INBUFSIZ 0x2000 /* input buffer size */ +# else +# define INBUFSIZ 0x8000 /* input buffer size */ +# endif +#endif + +#ifndef OUTBUFSIZ +# ifdef SMALL_MEM +# define OUTBUFSIZ 8192 /* output buffer size */ +# else +# define OUTBUFSIZ 16384 /* output buffer size */ +# endif +#endif + +#ifndef DIST_BUFSIZE +# ifdef SMALL_MEM +# define DIST_BUFSIZE 0x2000 /* buffer for distances, see trees.c */ +# else +# define DIST_BUFSIZE 0x8000 /* buffer for distances, see trees.c */ +# endif +#endif + +/* gzip flag byte */ +#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ +#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ +#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ +#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ +#define COMMENT 0x10 /* bit 4 set: file comment present */ +#define RESERVED 0xC0 /* bit 6,7: reserved */ + +/* internal file attribute */ +#define UNKNOWN 0xffff +#define BINARY 0 +#define ASCII 1 + +#ifndef WSIZE +# define WSIZE 0x8000 /* window size--must be a power of two, and */ +#endif /* at least 32K for zip's deflate method */ + +#define MIN_MATCH 3 +#define MAX_MATCH 258 +/* The minimum and maximum match lengths */ + +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) +/* Minimum amount of lookahead, except at the end of the input file. + * See deflate.c for comments about the MIN_MATCH+1. + */ + +#define MAX_DIST (WSIZE-MIN_LOOKAHEAD) +/* In order to simplify the code, particularly on 16 bit machines, match + * distances are limited to MAX_DIST instead of WSIZE. + */ + +#ifndef MAX_PATH_LEN +# define MAX_PATH_LEN 1024 /* max pathname length */ +#endif + +#define seekable() 0 /* force sequential output */ +#define translate_eol 0 /* no option -a yet */ + +#ifndef BITS +# define BITS 16 +#endif +#define INIT_BITS 9 /* Initial number of bits per code */ + +#define BIT_MASK 0x1f /* Mask for 'number of compression bits' */ +/* Mask 0x20 is reserved to mean a fourth header byte, and 0x40 is free. + * It's a pity that old uncompress does not check bit 0x20. That makes + * extension of the format actually undesirable because old compress + * would just crash on the new format instead of giving a meaningful + * error message. It does check the number of bits, but it's more + * helpful to say "unsupported format, get a new version" than + * "can only handle 16 bits". + */ + +#ifdef MAX_EXT_CHARS +# define MAX_SUFFIX MAX_EXT_CHARS +#else +# define MAX_SUFFIX 30 +#endif + + +/* =========================================================================== + * Compile with MEDIUM_MEM to reduce the memory requirements or + * with SMALL_MEM to use as little memory as possible. Use BIG_MEM if the + * entire input file can be held in memory (not possible on 16 bit systems). + * Warning: defining these symbols affects HASH_BITS (see below) and thus + * affects the compression ratio. The compressed output + * is still correct, and might even be smaller in some cases. + */ + +#ifdef SMALL_MEM +# define HASH_BITS 13 /* Number of bits used to hash strings */ +#endif +#ifdef MEDIUM_MEM +# define HASH_BITS 14 +#endif +#ifndef HASH_BITS +# define HASH_BITS 15 + /* For portability to 16 bit machines, do not use values above 15. */ +#endif + +#define HASH_SIZE (unsigned)(1<= 4. + */ + + max_insert_length = max_lazy_match, +/* Insert new strings in the hash table only if the match length + * is not greater than this length. This saves time but degrades compression. + * max_insert_length is used only for compression levels <= 3. + */ + + good_match = 32, +/* Use a faster search when the previous match is longer than this */ + +/* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ + + nice_match = 258, /* Stop searching when current match exceeds this */ +/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 + * For deflate_fast() (levels <= 3) good is ignored and lazy has a different + * meaning. + */ +#endif /* ENABLE_FEATURE_GZIP_LEVELS */ +}; + + +struct globals { + +#ifdef ENABLE_FEATURE_GZIP_LEVELS + unsigned max_chain_length; + unsigned max_lazy_match; + unsigned good_match; + unsigned nice_match; +#define max_chain_length (G1.max_chain_length) +#define max_lazy_match (G1.max_lazy_match) +#define good_match (G1.good_match) +#define nice_match (G1.nice_match) +#endif + + lng block_start; + +/* window position at the beginning of the current output block. Gets + * negative when the window is moved backwards. + */ + unsigned ins_h; /* hash index of string to be inserted */ + +#define H_SHIFT ((HASH_BITS+MIN_MATCH-1) / MIN_MATCH) +/* Number of bits by which ins_h and del_h must be shifted at each + * input step. It must be such that after MIN_MATCH steps, the oldest + * byte no longer takes part in the hash key, that is: + * H_SHIFT * MIN_MATCH >= HASH_BITS + */ + + unsigned prev_length; + +/* Length of the best match at previous step. Matches not greater than this + * are discarded. This is used in the lazy match evaluation. + */ + + unsigned strstart; /* start of string to insert */ + unsigned match_start; /* start of matching string */ + unsigned lookahead; /* number of valid bytes ahead in window */ + +/* =========================================================================== + */ +#define DECLARE(type, array, size) \ + type * array +#define ALLOC(type, array, size) \ + array = xzalloc((size_t)(((size)+1L)/2) * 2*sizeof(type)) +#define FREE(array) \ + do { free(array); array = NULL; } while (0) + + /* global buffers */ + + /* buffer for literals or lengths */ + /* DECLARE(uch, l_buf, LIT_BUFSIZE); */ + DECLARE(uch, l_buf, INBUFSIZ); + + DECLARE(ush, d_buf, DIST_BUFSIZE); + DECLARE(uch, outbuf, OUTBUFSIZ); + +/* Sliding window. Input bytes are read into the second half of the window, + * and move to the first half later to keep a dictionary of at least WSIZE + * bytes. With this organization, matches are limited to a distance of + * WSIZE-MAX_MATCH bytes, but this ensures that IO is always + * performed with a length multiple of the block size. Also, it limits + * the window size to 64K, which is quite useful on MSDOS. + * To do: limit the window size to WSIZE+BSZ if SMALL_MEM (the code would + * be less efficient). + */ + DECLARE(uch, window, 2L * WSIZE); + +/* Link to older string with same hash index. To limit the size of this + * array to 64K, this link is maintained only for the last 32K strings. + * An index in this array is thus a window index modulo 32K. + */ + /* DECLARE(Pos, prev, WSIZE); */ + DECLARE(ush, prev, 1L << BITS); + +/* Heads of the hash chains or 0. */ + /* DECLARE(Pos, head, 1<>= 8; +#else + *dst = (uch)w; + w >>= 8; + if (outcnt < OUTBUFSIZ-2) { + /* Common case */ + dst[1] = w; + G1.outcnt = outcnt + 2; + return; + } +#endif + + /* Slowpath: we will need to do flush_outbuf() */ + G1.outcnt = ++outcnt; + if (outcnt == OUTBUFSIZ) + flush_outbuf(); + put_8bit(w); +} + +static void put_32bit(ulg n) +{ + put_16bit(n); + put_16bit(n >> 16); +} + +/* =========================================================================== + * Run a set of bytes through the crc shift register. If s is a NULL + * pointer, then initialize the crc shift register contents instead. + * Return the current crc in either case. + */ +static void updcrc(uch * s, unsigned n) +{ + G1.crc = crc32_block_endian0(G1.crc, s, n, global_crc32_table /*G1.crc_32_tab*/); +} + + +/* =========================================================================== + * Read a new buffer from the current input file, perform end-of-line + * translation, and update the crc and input file size. + * IN assertion: size >= 2 (for end-of-line translation) + */ +static unsigned file_read(void *buf, unsigned size) +{ + unsigned len; + + Assert(G1.insize == 0, "l_buf not empty"); + + len = safe_read(ifd, buf, size); + if (len == (unsigned)(-1) || len == 0) + return len; + + updcrc(buf, len); + G1.isize += len; + return len; +} + + +/* =========================================================================== + * Send a value on a given number of bits. + * IN assertion: length <= 16 and value fits in length bits. + */ +static void send_bits(int value, int length) +{ +#ifdef DEBUG + Tracev((stderr, " l %2d v %4x ", length, value)); + Assert(length > 0 && length <= 15, "invalid length"); + G1.bits_sent += length; +#endif + /* If not enough room in bi_buf, use (valid) bits from bi_buf and + * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) + * unused bits in value. + */ + if (G1.bi_valid > (int) BUF_SIZE - length) { + G1.bi_buf |= (value << G1.bi_valid); + put_16bit(G1.bi_buf); + G1.bi_buf = (ush) value >> (BUF_SIZE - G1.bi_valid); + G1.bi_valid += length - BUF_SIZE; + } else { + G1.bi_buf |= value << G1.bi_valid; + G1.bi_valid += length; + } +} + + +/* =========================================================================== + * Reverse the first len bits of a code, using straightforward code (a faster + * method would use a table) + * IN assertion: 1 <= len <= 15 + */ +static unsigned bi_reverse(unsigned code, int len) +{ + unsigned res = 0; + + while (1) { + res |= code & 1; + if (--len <= 0) return res; + code >>= 1; + res <<= 1; + } +} + + +/* =========================================================================== + * Write out any remaining bits in an incomplete byte. + */ +static void bi_windup(void) +{ + if (G1.bi_valid > 8) { + put_16bit(G1.bi_buf); + } else if (G1.bi_valid > 0) { + put_8bit(G1.bi_buf); + } + G1.bi_buf = 0; + G1.bi_valid = 0; +#ifdef DEBUG + G1.bits_sent = (G1.bits_sent + 7) & ~7; +#endif +} + + +/* =========================================================================== + * Copy a stored block to the zip file, storing first the length and its + * one's complement if requested. + */ +static void copy_block(char *buf, unsigned len, int header) +{ + bi_windup(); /* align on byte boundary */ + + if (header) { + put_16bit(len); + put_16bit(~len); +#ifdef DEBUG + G1.bits_sent += 2 * 16; +#endif + } +#ifdef DEBUG + G1.bits_sent += (ulg) len << 3; +#endif + while (len--) { + put_8bit(*buf++); + } +} + + +/* =========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead, and sets eofile if end of input file. + * IN assertion: lookahead < MIN_LOOKAHEAD && strstart + lookahead > 0 + * OUT assertions: at least one byte has been read, or eofile is set; + * file reads are performed for at least two bytes (required for the + * translate_eol option). + */ +static void fill_window(void) +{ + unsigned n, m; + unsigned more = WINDOW_SIZE - G1.lookahead - G1.strstart; + /* Amount of free space at the end of the window. */ + + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (more == (unsigned) -1) { + /* Very unlikely, but possible on 16 bit machine if strstart == 0 + * and lookahead == 1 (input done one byte at time) + */ + more--; + } else if (G1.strstart >= WSIZE + MAX_DIST) { + /* By the IN assertion, the window is not empty so we can't confuse + * more == 0 with more == 64K on a 16 bit machine. + */ + Assert(WINDOW_SIZE == 2 * WSIZE, "no sliding with BIG_MEM"); + + memcpy(G1.window, G1.window + WSIZE, WSIZE); + G1.match_start -= WSIZE; + G1.strstart -= WSIZE; /* we now have strstart >= MAX_DIST: */ + + G1.block_start -= WSIZE; + + for (n = 0; n < HASH_SIZE; n++) { + m = head[n]; + head[n] = (Pos) (m >= WSIZE ? m - WSIZE : 0); + } + for (n = 0; n < WSIZE; n++) { + m = G1.prev[n]; + G1.prev[n] = (Pos) (m >= WSIZE ? m - WSIZE : 0); + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + } + more += WSIZE; + } + /* At this point, more >= 2 */ + if (!G1.eofile) { + n = file_read(G1.window + G1.strstart + G1.lookahead, more); + if (n == 0 || n == (unsigned) -1) { + G1.eofile = 1; + } else { + G1.lookahead += n; + } + } +} + + +/* =========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + */ + +/* For MSDOS, OS/2 and 386 Unix, an optimized version is in match.asm or + * match.s. The code is functionally equivalent, so you can use the C version + * if desired. + */ +static int longest_match(IPos cur_match) +{ + unsigned chain_length = max_chain_length; /* max hash chain length */ + uch *scan = G1.window + G1.strstart; /* current string */ + uch *match; /* matched string */ + int len; /* length of current match */ + int best_len = G1.prev_length; /* best match length so far */ + IPos limit = G1.strstart > (IPos) MAX_DIST ? G1.strstart - (IPos) MAX_DIST : 0; + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ + +/* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ +#if HASH_BITS < 8 || MAX_MATCH != 258 +# error Code too clever +#endif + uch *strend = G1.window + G1.strstart + MAX_MATCH; + uch scan_end1 = scan[best_len - 1]; + uch scan_end = scan[best_len]; + + /* Do not waste too much time if we already have a good match: */ + if (G1.prev_length >= good_match) { + chain_length >>= 2; + } + Assert(G1.strstart <= WINDOW_SIZE - MIN_LOOKAHEAD, "insufficient lookahead"); + + do { + Assert(cur_match < G1.strstart, "no future"); + match = G1.window + cur_match; + + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2: + */ + if (match[best_len] != scan_end + || match[best_len - 1] != scan_end1 + || *match != *scan || *++match != scan[1] + ) { + continue; + } + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2, match++; + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + } while (*++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && scan < strend); + + len = MAX_MATCH - (int) (strend - scan); + scan = strend - MAX_MATCH; + + if (len > best_len) { + G1.match_start = cur_match; + best_len = len; + if (len >= nice_match) + break; + scan_end1 = scan[best_len - 1]; + scan_end = scan[best_len]; + } + } while ((cur_match = G1.prev[cur_match & WMASK]) > limit + && --chain_length != 0); + + return best_len; +} + + +#ifdef DEBUG +/* =========================================================================== + * Check that the match at match_start is indeed a match. + */ +static void check_match(IPos start, IPos match, int length) +{ + /* check that the match is indeed a match */ + if (memcmp(G1.window + match, G1.window + start, length) != 0) { + bb_error_msg(" start %d, match %d, length %d", start, match, length); + bb_error_msg("invalid match"); + } + if (verbose > 1) { + bb_error_msg("\\[%d,%d]", start - match, length); + do { + bb_putchar_stderr(G1.window[start++]); + } while (--length != 0); + } +} +#else +# define check_match(start, match, length) ((void)0) +#endif + + +/* trees.c -- output deflated data using Huffman coding + * Copyright (C) 1992-1993 Jean-loup Gailly + * This is free software; you can redistribute it and/or modify it under the + * terms of the GNU General Public License, see the file COPYING. + */ + +/* PURPOSE + * Encode various sets of source values using variable-length + * binary code trees. + * + * DISCUSSION + * The PKZIP "deflation" process uses several Huffman trees. The more + * common source values are represented by shorter bit sequences. + * + * Each code tree is stored in the ZIP file in a compressed form + * which is itself a Huffman encoding of the lengths of + * all the code strings (in ascending order by source values). + * The actual code strings are reconstructed from the lengths in + * the UNZIP process, as described in the "application note" + * (APPNOTE.TXT) distributed as part of PKWARE's PKZIP program. + * + * REFERENCES + * Lynch, Thomas J. + * Data Compression: Techniques and Applications, pp. 53-55. + * Lifetime Learning Publications, 1985. ISBN 0-534-03418-7. + * + * Storer, James A. + * Data Compression: Methods and Theory, pp. 49-50. + * Computer Science Press, 1988. ISBN 0-7167-8156-5. + * + * Sedgewick, R. + * Algorithms, p290. + * Addison-Wesley, 1983. ISBN 0-201-06672-6. + * + * INTERFACE + * void ct_init() + * Allocate the match buffer, initialize the various tables [and save + * the location of the internal file attribute (ascii/binary) and + * method (DEFLATE/STORE) -- deleted in bbox] + * + * void ct_tally(int dist, int lc); + * Save the match info and tally the frequency counts. + * + * ulg flush_block(char *buf, ulg stored_len, int eof) + * Determine the best encoding for the current block: dynamic trees, + * static trees or store, and output the encoded block to the zip + * file. Returns the total compressed length for the file so far. + */ + +#define MAX_BITS 15 +/* All codes must not exceed MAX_BITS bits */ + +#define MAX_BL_BITS 7 +/* Bit length codes must not exceed MAX_BL_BITS bits */ + +#define LENGTH_CODES 29 +/* number of length codes, not counting the special END_BLOCK code */ + +#define LITERALS 256 +/* number of literal bytes 0..255 */ + +#define END_BLOCK 256 +/* end of block literal code */ + +#define L_CODES (LITERALS+1+LENGTH_CODES) +/* number of Literal or Length codes, including the END_BLOCK code */ + +#define D_CODES 30 +/* number of distance codes */ + +#define BL_CODES 19 +/* number of codes used to transfer the bit lengths */ + +/* extra bits for each length code */ +static const uint8_t extra_lbits[LENGTH_CODES] ALIGN1 = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, + 4, 4, 5, 5, 5, 5, 0 +}; + +/* extra bits for each distance code */ +static const uint8_t extra_dbits[D_CODES] ALIGN1 = { + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, + 10, 10, 11, 11, 12, 12, 13, 13 +}; + +/* extra bits for each bit length code */ +static const uint8_t extra_blbits[BL_CODES] ALIGN1 = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7 }; + +/* number of codes at each bit length for an optimal tree */ +static const uint8_t bl_order[BL_CODES] ALIGN1 = { + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; + +#define STORED_BLOCK 0 +#define STATIC_TREES 1 +#define DYN_TREES 2 +/* The three kinds of block type */ + +#ifndef LIT_BUFSIZE +# ifdef SMALL_MEM +# define LIT_BUFSIZE 0x2000 +# else +# ifdef MEDIUM_MEM +# define LIT_BUFSIZE 0x4000 +# else +# define LIT_BUFSIZE 0x8000 +# endif +# endif +#endif +#ifndef DIST_BUFSIZE +# define DIST_BUFSIZE LIT_BUFSIZE +#endif +/* Sizes of match buffers for literals/lengths and distances. There are + * 4 reasons for limiting LIT_BUFSIZE to 64K: + * - frequencies can be kept in 16 bit counters + * - if compression is not successful for the first block, all input data is + * still in the window so we can still emit a stored block even when input + * comes from standard input. (This can also be done for all blocks if + * LIT_BUFSIZE is not greater than 32K.) + * - if compression is not successful for a file smaller than 64K, we can + * even emit a stored file instead of a stored block (saving 5 bytes). + * - creating new Huffman trees less frequently may not provide fast + * adaptation to changes in the input data statistics. (Take for + * example a binary file with poorly compressible code followed by + * a highly compressible string table.) Smaller buffer sizes give + * fast adaptation but have of course the overhead of transmitting trees + * more frequently. + * - I can't count above 4 + * The current code is general and allows DIST_BUFSIZE < LIT_BUFSIZE (to save + * memory at the expense of compression). Some optimizations would be possible + * if we rely on DIST_BUFSIZE == LIT_BUFSIZE. + */ +#define REP_3_6 16 +/* repeat previous bit length 3-6 times (2 bits of repeat count) */ +#define REPZ_3_10 17 +/* repeat a zero length 3-10 times (3 bits of repeat count) */ +#define REPZ_11_138 18 +/* repeat a zero length 11-138 times (7 bits of repeat count) */ + +/* =========================================================================== +*/ +/* Data structure describing a single value and its code string. */ +typedef struct ct_data { + union { + ush freq; /* frequency count */ + ush code; /* bit string */ + } fc; + union { + ush dad; /* father node in Huffman tree */ + ush len; /* length of bit string */ + } dl; +} ct_data; + +#define Freq fc.freq +#define Code fc.code +#define Dad dl.dad +#define Len dl.len + +#define HEAP_SIZE (2*L_CODES + 1) +/* maximum heap size */ + +typedef struct tree_desc { + ct_data *dyn_tree; /* the dynamic tree */ + ct_data *static_tree; /* corresponding static tree or NULL */ + const uint8_t *extra_bits; /* extra bits for each code or NULL */ + int extra_base; /* base index for extra_bits */ + int elems; /* max number of elements in the tree */ + int max_length; /* max bit length for the codes */ + int max_code; /* largest code with non zero frequency */ +} tree_desc; + +struct globals2 { + + ush heap[HEAP_SIZE]; /* heap used to build the Huffman trees */ + int heap_len; /* number of elements in the heap */ + int heap_max; /* element of largest frequency */ + +/* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + * The same heap array is used to build all trees. + */ + + ct_data dyn_ltree[HEAP_SIZE]; /* literal and length tree */ + ct_data dyn_dtree[2 * D_CODES + 1]; /* distance tree */ + + ct_data static_ltree[L_CODES + 2]; + +/* The static literal tree. Since the bit lengths are imposed, there is no + * need for the L_CODES extra codes used during heap construction. However + * The codes 286 and 287 are needed to build a canonical tree (see ct_init + * below). + */ + + ct_data static_dtree[D_CODES]; + +/* The static distance tree. (Actually a trivial tree since all codes use + * 5 bits.) + */ + + ct_data bl_tree[2 * BL_CODES + 1]; + +/* Huffman tree for the bit lengths */ + + tree_desc l_desc; + tree_desc d_desc; + tree_desc bl_desc; + + ush bl_count[MAX_BITS + 1]; + +/* The lengths of the bit length codes are sent in order of decreasing + * probability, to avoid transmitting the lengths for unused bit length codes. + */ + + uch depth[2 * L_CODES + 1]; + +/* Depth of each subtree used as tie breaker for trees of equal frequency */ + + uch length_code[MAX_MATCH - MIN_MATCH + 1]; + +/* length code for each normalized match length (0 == MIN_MATCH) */ + + uch dist_code[512]; + +/* distance codes. The first 256 values correspond to the distances + * 3 .. 258, the last 256 values correspond to the top 8 bits of + * the 15 bit distances. + */ + + int base_length[LENGTH_CODES]; + +/* First normalized length for each code (0 = MIN_MATCH) */ + + int base_dist[D_CODES]; + +/* First normalized distance for each code (0 = distance of 1) */ + + uch flag_buf[LIT_BUFSIZE / 8]; + +/* flag_buf is a bit array distinguishing literals from lengths in + * l_buf, thus indicating the presence or absence of a distance. + */ + + unsigned last_lit; /* running index in l_buf */ + unsigned last_dist; /* running index in d_buf */ + unsigned last_flags; /* running index in flag_buf */ + uch flags; /* current flags not yet saved in flag_buf */ + uch flag_bit; /* current bit used in flags */ + +/* bits are filled in flags starting at bit 0 (least significant). + * Note: these flags are overkill in the current code since we don't + * take advantage of DIST_BUFSIZE == LIT_BUFSIZE. + */ + + ulg opt_len; /* bit length of current block with optimal trees */ + ulg static_len; /* bit length of current block with static trees */ + + ulg compressed_len; /* total bit length of compressed file */ +}; + +#define G2ptr ((struct globals2*)(ptr_to_globals)) +#define G2 (*G2ptr) + + +/* =========================================================================== + */ +static void gen_codes(ct_data * tree, int max_code); +static void build_tree(tree_desc * desc); +static void scan_tree(ct_data * tree, int max_code); +static void send_tree(ct_data * tree, int max_code); +static int build_bl_tree(void); +static void send_all_trees(int lcodes, int dcodes, int blcodes); +static void compress_block(ct_data * ltree, ct_data * dtree); + + +#ifndef DEBUG +/* Send a code of the given tree. c and tree must not have side effects */ +# define SEND_CODE(c, tree) send_bits(tree[c].Code, tree[c].Len) +#else +# define SEND_CODE(c, tree) \ +{ \ + if (verbose > 1) bb_error_msg("\ncd %3d ", (c)); \ + send_bits(tree[c].Code, tree[c].Len); \ +} +#endif + +#define D_CODE(dist) \ + ((dist) < 256 ? G2.dist_code[dist] : G2.dist_code[256 + ((dist)>>7)]) +/* Mapping from a distance to a distance code. dist is the distance - 1 and + * must not have side effects. dist_code[256] and dist_code[257] are never + * used. + * The arguments must not have side effects. + */ + + +/* =========================================================================== + * Initialize a new block. + */ +static void init_block(void) +{ + int n; /* iterates over tree elements */ + + /* Initialize the trees. */ + for (n = 0; n < L_CODES; n++) + G2.dyn_ltree[n].Freq = 0; + for (n = 0; n < D_CODES; n++) + G2.dyn_dtree[n].Freq = 0; + for (n = 0; n < BL_CODES; n++) + G2.bl_tree[n].Freq = 0; + + G2.dyn_ltree[END_BLOCK].Freq = 1; + G2.opt_len = G2.static_len = 0; + G2.last_lit = G2.last_dist = G2.last_flags = 0; + G2.flags = 0; + G2.flag_bit = 1; +} + + +/* =========================================================================== + * Restore the heap property by moving down the tree starting at node k, + * exchanging a node with the smallest of its two sons if necessary, stopping + * when the heap property is re-established (each father smaller than its + * two sons). + */ + +/* Compares to subtrees, using the tree depth as tie breaker when + * the subtrees have equal frequency. This minimizes the worst case length. */ +#define SMALLER(tree, n, m) \ + (tree[n].Freq < tree[m].Freq \ + || (tree[n].Freq == tree[m].Freq && G2.depth[n] <= G2.depth[m])) + +static void pqdownheap(ct_data * tree, int k) +{ + int v = G2.heap[k]; + int j = k << 1; /* left son of k */ + + while (j <= G2.heap_len) { + /* Set j to the smallest of the two sons: */ + if (j < G2.heap_len && SMALLER(tree, G2.heap[j + 1], G2.heap[j])) + j++; + + /* Exit if v is smaller than both sons */ + if (SMALLER(tree, v, G2.heap[j])) + break; + + /* Exchange v with the smallest son */ + G2.heap[k] = G2.heap[j]; + k = j; + + /* And continue down the tree, setting j to the left son of k */ + j <<= 1; + } + G2.heap[k] = v; +} + + +/* =========================================================================== + * Compute the optimal bit lengths for a tree and update the total bit length + * for the current block. + * IN assertion: the fields freq and dad are set, heap[heap_max] and + * above are the tree nodes sorted by increasing frequency. + * OUT assertions: the field len is set to the optimal bit length, the + * array bl_count contains the frequencies for each bit length. + * The length opt_len is updated; static_len is also updated if stree is + * not null. + */ +static void gen_bitlen(tree_desc * desc) +{ + ct_data *tree = desc->dyn_tree; + const uint8_t *extra = desc->extra_bits; + int base = desc->extra_base; + int max_code = desc->max_code; + int max_length = desc->max_length; + ct_data *stree = desc->static_tree; + int h; /* heap index */ + int n, m; /* iterate over the tree elements */ + int bits; /* bit length */ + int xbits; /* extra bits */ + ush f; /* frequency */ + int overflow = 0; /* number of elements with bit length too large */ + + for (bits = 0; bits <= MAX_BITS; bits++) + G2.bl_count[bits] = 0; + + /* In a first pass, compute the optimal bit lengths (which may + * overflow in the case of the bit length tree). + */ + tree[G2.heap[G2.heap_max]].Len = 0; /* root of the heap */ + + for (h = G2.heap_max + 1; h < HEAP_SIZE; h++) { + n = G2.heap[h]; + bits = tree[tree[n].Dad].Len + 1; + if (bits > max_length) { + bits = max_length; + overflow++; + } + tree[n].Len = (ush) bits; + /* We overwrite tree[n].Dad which is no longer needed */ + + if (n > max_code) + continue; /* not a leaf node */ + + G2.bl_count[bits]++; + xbits = 0; + if (n >= base) + xbits = extra[n - base]; + f = tree[n].Freq; + G2.opt_len += (ulg) f *(bits + xbits); + + if (stree) + G2.static_len += (ulg) f * (stree[n].Len + xbits); + } + if (overflow == 0) + return; + + Trace((stderr, "\nbit length overflow\n")); + /* This happens for example on obj2 and pic of the Calgary corpus */ + + /* Find the first bit length which could increase: */ + do { + bits = max_length - 1; + while (G2.bl_count[bits] == 0) + bits--; + G2.bl_count[bits]--; /* move one leaf down the tree */ + G2.bl_count[bits + 1] += 2; /* move one overflow item as its brother */ + G2.bl_count[max_length]--; + /* The brother of the overflow item also moves one step up, + * but this does not affect bl_count[max_length] + */ + overflow -= 2; + } while (overflow > 0); + + /* Now recompute all bit lengths, scanning in increasing frequency. + * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all + * lengths instead of fixing only the wrong ones. This idea is taken + * from 'ar' written by Haruhiko Okumura.) + */ + for (bits = max_length; bits != 0; bits--) { + n = G2.bl_count[bits]; + while (n != 0) { + m = G2.heap[--h]; + if (m > max_code) + continue; + if (tree[m].Len != (unsigned) bits) { + Trace((stderr, "code %d bits %d->%d\n", m, tree[m].Len, bits)); + G2.opt_len += ((int32_t) bits - tree[m].Len) * tree[m].Freq; + tree[m].Len = bits; + } + n--; + } + } +} + + +/* =========================================================================== + * Generate the codes for a given tree and bit counts (which need not be + * optimal). + * IN assertion: the array bl_count contains the bit length statistics for + * the given tree and the field len is set for all tree elements. + * OUT assertion: the field code is set for all tree elements of non + * zero code length. + */ +static void gen_codes(ct_data * tree, int max_code) +{ + ush next_code[MAX_BITS + 1]; /* next code value for each bit length */ + ush code = 0; /* running code value */ + int bits; /* bit index */ + int n; /* code index */ + + /* The distribution counts are first used to generate the code values + * without bit reversal. + */ + for (bits = 1; bits <= MAX_BITS; bits++) { + next_code[bits] = code = (code + G2.bl_count[bits - 1]) << 1; + } + /* Check that the bit counts in bl_count are consistent. The last code + * must be all ones. + */ + Assert(code + G2.bl_count[MAX_BITS] - 1 == (1 << MAX_BITS) - 1, + "inconsistent bit counts"); + Tracev((stderr, "\ngen_codes: max_code %d ", max_code)); + + for (n = 0; n <= max_code; n++) { + int len = tree[n].Len; + + if (len == 0) + continue; + /* Now reverse the bits */ + tree[n].Code = bi_reverse(next_code[len]++, len); + + Tracec(tree != G2.static_ltree, + (stderr, "\nn %3d %c l %2d c %4x (%x) ", n, + (n > ' ' ? n : ' '), len, tree[n].Code, + next_code[len] - 1)); + } +} + + +/* =========================================================================== + * Construct one Huffman tree and assigns the code bit strings and lengths. + * Update the total bit length for the current block. + * IN assertion: the field freq is set for all tree elements. + * OUT assertions: the fields len and code are set to the optimal bit length + * and corresponding code. The length opt_len is updated; static_len is + * also updated if stree is not null. The field max_code is set. + */ + +/* Remove the smallest element from the heap and recreate the heap with + * one less element. Updates heap and heap_len. */ + +#define SMALLEST 1 +/* Index within the heap array of least frequent node in the Huffman tree */ + +#define PQREMOVE(tree, top) \ +do { \ + top = G2.heap[SMALLEST]; \ + G2.heap[SMALLEST] = G2.heap[G2.heap_len--]; \ + pqdownheap(tree, SMALLEST); \ +} while (0) + +static void build_tree(tree_desc * desc) +{ + ct_data *tree = desc->dyn_tree; + ct_data *stree = desc->static_tree; + int elems = desc->elems; + int n, m; /* iterate over heap elements */ + int max_code = -1; /* largest code with non zero frequency */ + int node = elems; /* next internal node of the tree */ + + /* Construct the initial heap, with least frequent element in + * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + * heap[0] is not used. + */ + G2.heap_len = 0; + G2.heap_max = HEAP_SIZE; + + for (n = 0; n < elems; n++) { + if (tree[n].Freq != 0) { + G2.heap[++G2.heap_len] = max_code = n; + G2.depth[n] = 0; + } else { + tree[n].Len = 0; + } + } + + /* The pkzip format requires that at least one distance code exists, + * and that at least one bit should be sent even if there is only one + * possible code. So to avoid special checks later on we force at least + * two codes of non zero frequency. + */ + while (G2.heap_len < 2) { + int new = G2.heap[++G2.heap_len] = (max_code < 2 ? ++max_code : 0); + + tree[new].Freq = 1; + G2.depth[new] = 0; + G2.opt_len--; + if (stree) + G2.static_len -= stree[new].Len; + /* new is 0 or 1 so it does not have extra bits */ + } + desc->max_code = max_code; + + /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + * establish sub-heaps of increasing lengths: + */ + for (n = G2.heap_len / 2; n >= 1; n--) + pqdownheap(tree, n); + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + do { + PQREMOVE(tree, n); /* n = node of least frequency */ + m = G2.heap[SMALLEST]; /* m = node of next least frequency */ + + G2.heap[--G2.heap_max] = n; /* keep the nodes sorted by frequency */ + G2.heap[--G2.heap_max] = m; + + /* Create a new node father of n and m */ + tree[node].Freq = tree[n].Freq + tree[m].Freq; + G2.depth[node] = MAX(G2.depth[n], G2.depth[m]) + 1; + tree[n].Dad = tree[m].Dad = (ush) node; +#ifdef DUMP_BL_TREE + if (tree == G2.bl_tree) { + bb_error_msg("\nnode %d(%d), sons %d(%d) %d(%d)", + node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); + } +#endif + /* and insert the new node in the heap */ + G2.heap[SMALLEST] = node++; + pqdownheap(tree, SMALLEST); + } while (G2.heap_len >= 2); + + G2.heap[--G2.heap_max] = G2.heap[SMALLEST]; + + /* At this point, the fields freq and dad are set. We can now + * generate the bit lengths. + */ + gen_bitlen((tree_desc *) desc); + + /* The field len is now set, we can generate the bit codes */ + gen_codes((ct_data *) tree, max_code); +} + + +/* =========================================================================== + * Scan a literal or distance tree to determine the frequencies of the codes + * in the bit length tree. Updates opt_len to take into account the repeat + * counts. (The contribution of the bit length codes will be added later + * during the construction of bl_tree.) + */ +static void scan_tree(ct_data * tree, int max_code) +{ + int n; /* iterates over all tree elements */ + int prevlen = -1; /* last emitted length */ + int curlen; /* length of current code */ + int nextlen = tree[0].Len; /* length of next code */ + int count = 0; /* repeat count of the current code */ + int max_count = 7; /* max repeat count */ + int min_count = 4; /* min repeat count */ + + if (nextlen == 0) { + max_count = 138; + min_count = 3; + } + tree[max_code + 1].Len = 0xffff; /* guard */ + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[n + 1].Len; + if (++count < max_count && curlen == nextlen) + continue; + + if (count < min_count) { + G2.bl_tree[curlen].Freq += count; + } else if (curlen != 0) { + if (curlen != prevlen) + G2.bl_tree[curlen].Freq++; + G2.bl_tree[REP_3_6].Freq++; + } else if (count <= 10) { + G2.bl_tree[REPZ_3_10].Freq++; + } else { + G2.bl_tree[REPZ_11_138].Freq++; + } + count = 0; + prevlen = curlen; + + max_count = 7; + min_count = 4; + if (nextlen == 0) { + max_count = 138; + min_count = 3; + } else if (curlen == nextlen) { + max_count = 6; + min_count = 3; + } + } +} + + +/* =========================================================================== + * Send a literal or distance tree in compressed form, using the codes in + * bl_tree. + */ +static void send_tree(ct_data * tree, int max_code) +{ + int n; /* iterates over all tree elements */ + int prevlen = -1; /* last emitted length */ + int curlen; /* length of current code */ + int nextlen = tree[0].Len; /* length of next code */ + int count = 0; /* repeat count of the current code */ + int max_count = 7; /* max repeat count */ + int min_count = 4; /* min repeat count */ + +/* tree[max_code+1].Len = -1; *//* guard already set */ + if (nextlen == 0) + max_count = 138, min_count = 3; + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[n + 1].Len; + if (++count < max_count && curlen == nextlen) { + continue; + } else if (count < min_count) { + do { + SEND_CODE(curlen, G2.bl_tree); + } while (--count); + } else if (curlen != 0) { + if (curlen != prevlen) { + SEND_CODE(curlen, G2.bl_tree); + count--; + } + Assert(count >= 3 && count <= 6, " 3_6?"); + SEND_CODE(REP_3_6, G2.bl_tree); + send_bits(count - 3, 2); + } else if (count <= 10) { + SEND_CODE(REPZ_3_10, G2.bl_tree); + send_bits(count - 3, 3); + } else { + SEND_CODE(REPZ_11_138, G2.bl_tree); + send_bits(count - 11, 7); + } + count = 0; + prevlen = curlen; + if (nextlen == 0) { + max_count = 138; + min_count = 3; + } else if (curlen == nextlen) { + max_count = 6; + min_count = 3; + } else { + max_count = 7; + min_count = 4; + } + } +} + + +/* =========================================================================== + * Construct the Huffman tree for the bit lengths and return the index in + * bl_order of the last bit length code to send. + */ +static int build_bl_tree(void) +{ + int max_blindex; /* index of last bit length code of non zero freq */ + + /* Determine the bit length frequencies for literal and distance trees */ + scan_tree(G2.dyn_ltree, G2.l_desc.max_code); + scan_tree(G2.dyn_dtree, G2.d_desc.max_code); + + /* Build the bit length tree: */ + build_tree(&G2.bl_desc); + /* opt_len now includes the length of the tree representations, except + * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + */ + + /* Determine the number of bit length codes to send. The pkzip format + * requires that at least 4 bit length codes be sent. (appnote.txt says + * 3 but the actual value used is 4.) + */ + for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) { + if (G2.bl_tree[bl_order[max_blindex]].Len != 0) + break; + } + /* Update opt_len to include the bit length tree and counts */ + G2.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; + Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", G2.opt_len, G2.static_len)); + + return max_blindex; +} + + +/* =========================================================================== + * Send the header for a block using dynamic Huffman trees: the counts, the + * lengths of the bit length codes, the literal tree and the distance tree. + * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + */ +static void send_all_trees(int lcodes, int dcodes, int blcodes) +{ + int rank; /* index in bl_order */ + + Assert(lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); + Assert(lcodes <= L_CODES && dcodes <= D_CODES + && blcodes <= BL_CODES, "too many codes"); + Tracev((stderr, "\nbl counts: ")); + send_bits(lcodes - 257, 5); /* not +255 as stated in appnote.txt */ + send_bits(dcodes - 1, 5); + send_bits(blcodes - 4, 4); /* not -3 as stated in appnote.txt */ + for (rank = 0; rank < blcodes; rank++) { + Tracev((stderr, "\nbl code %2d ", bl_order[rank])); + send_bits(G2.bl_tree[bl_order[rank]].Len, 3); + } + Tracev((stderr, "\nbl tree: sent %ld", G1.bits_sent)); + + send_tree((ct_data *) G2.dyn_ltree, lcodes - 1); /* send the literal tree */ + Tracev((stderr, "\nlit tree: sent %ld", G1.bits_sent)); + + send_tree((ct_data *) G2.dyn_dtree, dcodes - 1); /* send the distance tree */ + Tracev((stderr, "\ndist tree: sent %ld", G1.bits_sent)); +} + + +/* =========================================================================== + * Save the match info and tally the frequency counts. Return true if + * the current block must be flushed. + */ +static int ct_tally(int dist, int lc) +{ + G1.l_buf[G2.last_lit++] = lc; + if (dist == 0) { + /* lc is the unmatched char */ + G2.dyn_ltree[lc].Freq++; + } else { + /* Here, lc is the match length - MIN_MATCH */ + dist--; /* dist = match distance - 1 */ + Assert((ush) dist < (ush) MAX_DIST + && (ush) lc <= (ush) (MAX_MATCH - MIN_MATCH) + && (ush) D_CODE(dist) < (ush) D_CODES, "ct_tally: bad match" + ); + + G2.dyn_ltree[G2.length_code[lc] + LITERALS + 1].Freq++; + G2.dyn_dtree[D_CODE(dist)].Freq++; + + G1.d_buf[G2.last_dist++] = dist; + G2.flags |= G2.flag_bit; + } + G2.flag_bit <<= 1; + + /* Output the flags if they fill a byte: */ + if ((G2.last_lit & 7) == 0) { + G2.flag_buf[G2.last_flags++] = G2.flags; + G2.flags = 0; + G2.flag_bit = 1; + } + /* Try to guess if it is profitable to stop the current block here */ + if ((G2.last_lit & 0xfff) == 0) { + /* Compute an upper bound for the compressed length */ + ulg out_length = G2.last_lit * 8L; + ulg in_length = (ulg) G1.strstart - G1.block_start; + int dcode; + + for (dcode = 0; dcode < D_CODES; dcode++) { + out_length += G2.dyn_dtree[dcode].Freq * (5L + extra_dbits[dcode]); + } + out_length >>= 3; + Trace((stderr, + "\nlast_lit %u, last_dist %u, in %ld, out ~%ld(%ld%%) ", + G2.last_lit, G2.last_dist, in_length, out_length, + 100L - out_length * 100L / in_length)); + if (G2.last_dist < G2.last_lit / 2 && out_length < in_length / 2) + return 1; + } + return (G2.last_lit == LIT_BUFSIZE - 1 || G2.last_dist == DIST_BUFSIZE); + /* We avoid equality with LIT_BUFSIZE because of wraparound at 64K + * on 16 bit machines and because stored blocks are restricted to + * 64K-1 bytes. + */ +} + +/* =========================================================================== + * Send the block data compressed using the given Huffman trees + */ +static void compress_block(ct_data * ltree, ct_data * dtree) +{ + unsigned dist; /* distance of matched string */ + int lc; /* match length or unmatched char (if dist == 0) */ + unsigned lx = 0; /* running index in l_buf */ + unsigned dx = 0; /* running index in d_buf */ + unsigned fx = 0; /* running index in flag_buf */ + uch flag = 0; /* current flags */ + unsigned code; /* the code to send */ + int extra; /* number of extra bits to send */ + + if (G2.last_lit != 0) do { + if ((lx & 7) == 0) + flag = G2.flag_buf[fx++]; + lc = G1.l_buf[lx++]; + if ((flag & 1) == 0) { + SEND_CODE(lc, ltree); /* send a literal byte */ + Tracecv(lc > ' ', (stderr, " '%c' ", lc)); + } else { + /* Here, lc is the match length - MIN_MATCH */ + code = G2.length_code[lc]; + SEND_CODE(code + LITERALS + 1, ltree); /* send the length code */ + extra = extra_lbits[code]; + if (extra != 0) { + lc -= G2.base_length[code]; + send_bits(lc, extra); /* send the extra length bits */ + } + dist = G1.d_buf[dx++]; + /* Here, dist is the match distance - 1 */ + code = D_CODE(dist); + Assert(code < D_CODES, "bad d_code"); + + SEND_CODE(code, dtree); /* send the distance code */ + extra = extra_dbits[code]; + if (extra != 0) { + dist -= G2.base_dist[code]; + send_bits(dist, extra); /* send the extra distance bits */ + } + } /* literal or match pair ? */ + flag >>= 1; + } while (lx < G2.last_lit); + + SEND_CODE(END_BLOCK, ltree); +} + + +/* =========================================================================== + * Determine the best encoding for the current block: dynamic trees, static + * trees or store, and output the encoded block to the zip file. This function + * returns the total compressed length for the file so far. + */ +static ulg flush_block(char *buf, ulg stored_len, int eof) +{ + ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ + int max_blindex; /* index of last bit length code of non zero freq */ + + G2.flag_buf[G2.last_flags] = G2.flags; /* Save the flags for the last 8 items */ + + /* Construct the literal and distance trees */ + build_tree(&G2.l_desc); + Tracev((stderr, "\nlit data: dyn %ld, stat %ld", G2.opt_len, G2.static_len)); + + build_tree(&G2.d_desc); + Tracev((stderr, "\ndist data: dyn %ld, stat %ld", G2.opt_len, G2.static_len)); + /* At this point, opt_len and static_len are the total bit lengths of + * the compressed block data, excluding the tree representations. + */ + + /* Build the bit length tree for the above two trees, and get the index + * in bl_order of the last bit length code to send. + */ + max_blindex = build_bl_tree(); + + /* Determine the best encoding. Compute first the block length in bytes */ + opt_lenb = (G2.opt_len + 3 + 7) >> 3; + static_lenb = (G2.static_len + 3 + 7) >> 3; + + Trace((stderr, + "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u dist %u ", + opt_lenb, G2.opt_len, static_lenb, G2.static_len, stored_len, + G2.last_lit, G2.last_dist)); + + if (static_lenb <= opt_lenb) + opt_lenb = static_lenb; + + /* If compression failed and this is the first and last block, + * and if the zip file can be seeked (to rewrite the local header), + * the whole file is transformed into a stored file: + */ + if (stored_len <= opt_lenb && eof && G2.compressed_len == 0L && seekable()) { + /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */ + if (buf == NULL) + bb_error_msg("block vanished"); + + copy_block(buf, (unsigned) stored_len, 0); /* without header */ + G2.compressed_len = stored_len << 3; + } else if (stored_len + 4 <= opt_lenb && buf != NULL) { + /* 4: two words for the lengths */ + /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + * Otherwise we can't have processed more than WSIZE input bytes since + * the last block flush, because compression would have been + * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + * transform a block into a stored block. + */ + send_bits((STORED_BLOCK << 1) + eof, 3); /* send block type */ + G2.compressed_len = (G2.compressed_len + 3 + 7) & ~7L; + G2.compressed_len += (stored_len + 4) << 3; + + copy_block(buf, (unsigned) stored_len, 1); /* with header */ + } else if (static_lenb == opt_lenb) { + send_bits((STATIC_TREES << 1) + eof, 3); + compress_block((ct_data *) G2.static_ltree, (ct_data *) G2.static_dtree); + G2.compressed_len += 3 + G2.static_len; + } else { + send_bits((DYN_TREES << 1) + eof, 3); + send_all_trees(G2.l_desc.max_code + 1, G2.d_desc.max_code + 1, + max_blindex + 1); + compress_block((ct_data *) G2.dyn_ltree, (ct_data *) G2.dyn_dtree); + G2.compressed_len += 3 + G2.opt_len; + } + Assert(G2.compressed_len == G1.bits_sent, "bad compressed size"); + init_block(); + + if (eof) { + bi_windup(); + G2.compressed_len += 7; /* align on byte boundary */ + } + Tracev((stderr, "\ncomprlen %lu(%lu) ", G2.compressed_len >> 3, + G2.compressed_len - 7 * eof)); + + return G2.compressed_len >> 3; +} + + +/* =========================================================================== + * Update a hash value with the given input byte + * IN assertion: all calls to UPDATE_HASH are made with consecutive + * input characters, so that a running hash key can be computed from the + * previous key instead of complete recalculation each time. + */ +#define UPDATE_HASH(h, c) (h = (((h)<= 0L \ + ? (char*)&G1.window[(unsigned)G1.block_start] \ + : (char*)NULL, \ + (ulg)G1.strstart - G1.block_start, \ + (eof) \ + ) + +/* Insert string s in the dictionary and set match_head to the previous head + * of the hash chain (the most recent string with same hash key). Return + * the previous length of the hash chain. + * IN assertion: all calls to INSERT_STRING are made with consecutive + * input characters and the first MIN_MATCH bytes of s are valid + * (except for the last MIN_MATCH-1 bytes of the input file). */ +#define INSERT_STRING(s, match_head) \ +do { \ + UPDATE_HASH(G1.ins_h, G1.window[(s) + MIN_MATCH-1]); \ + G1.prev[(s) & WMASK] = match_head = head[G1.ins_h]; \ + head[G1.ins_h] = (s); \ +} while (0) + +static ulg deflate(void) +{ + IPos hash_head; /* head of hash chain */ + IPos prev_match; /* previous match */ + int flush; /* set if current block must be flushed */ + int match_available = 0; /* set if previous match exists */ + unsigned match_length = MIN_MATCH - 1; /* length of best match */ + + /* Process the input block. */ + while (G1.lookahead != 0) { + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + INSERT_STRING(G1.strstart, hash_head); + + /* Find the longest match, discarding those <= prev_length. + */ + G1.prev_length = match_length; + prev_match = G1.match_start; + match_length = MIN_MATCH - 1; + + if (hash_head != 0 && G1.prev_length < max_lazy_match + && G1.strstart - hash_head <= MAX_DIST + ) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + match_length = longest_match(hash_head); + /* longest_match() sets match_start */ + if (match_length > G1.lookahead) + match_length = G1.lookahead; + + /* Ignore a length 3 match if it is too distant: */ + if (match_length == MIN_MATCH && G1.strstart - G1.match_start > TOO_FAR) { + /* If prev_match is also MIN_MATCH, G1.match_start is garbage + * but we will ignore the current match anyway. + */ + match_length--; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (G1.prev_length >= MIN_MATCH && match_length <= G1.prev_length) { + check_match(G1.strstart - 1, prev_match, G1.prev_length); + flush = ct_tally(G1.strstart - 1 - prev_match, G1.prev_length - MIN_MATCH); + + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. + */ + G1.lookahead -= G1.prev_length - 1; + G1.prev_length -= 2; + do { + G1.strstart++; + INSERT_STRING(G1.strstart, hash_head); + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. If lookahead < MIN_MATCH + * these bytes are garbage, but it does not matter since the + * next lookahead bytes will always be emitted as literals. + */ + } while (--G1.prev_length != 0); + match_available = 0; + match_length = MIN_MATCH - 1; + G1.strstart++; + if (flush) { + FLUSH_BLOCK(0); + G1.block_start = G1.strstart; + } + } else if (match_available) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + Tracevv((stderr, "%c", G1.window[G1.strstart - 1])); + if (ct_tally(0, G1.window[G1.strstart - 1])) { + FLUSH_BLOCK(0); + G1.block_start = G1.strstart; + } + G1.strstart++; + G1.lookahead--; + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + match_available = 1; + G1.strstart++; + G1.lookahead--; + } + Assert(G1.strstart <= G1.isize && lookahead <= G1.isize, "a bit too far"); + + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + while (G1.lookahead < MIN_LOOKAHEAD && !G1.eofile) + fill_window(); + } + if (match_available) + ct_tally(0, G1.window[G1.strstart - 1]); + + return FLUSH_BLOCK(1); /* eof */ +} + + +/* =========================================================================== + * Initialize the bit string routines. + */ +static void bi_init(void) +{ + G1.bi_buf = 0; + G1.bi_valid = 0; +#ifdef DEBUG + G1.bits_sent = 0L; +#endif +} + + +/* =========================================================================== + * Initialize the "longest match" routines for a new file + */ +static void lm_init(ush * flagsp) +{ + unsigned j; + + /* Initialize the hash table. */ + memset(head, 0, HASH_SIZE * sizeof(*head)); + /* prev will be initialized on the fly */ + + /* speed options for the general purpose bit flag */ + *flagsp |= 2; /* FAST 4, SLOW 2 */ + /* ??? reduce max_chain_length for binary files */ + + G1.strstart = 0; + G1.block_start = 0L; + + G1.lookahead = file_read(G1.window, + sizeof(int) <= 2 ? (unsigned) WSIZE : 2 * WSIZE); + + if (G1.lookahead == 0 || G1.lookahead == (unsigned) -1) { + G1.eofile = 1; + G1.lookahead = 0; + return; + } + G1.eofile = 0; + /* Make sure that we always have enough lookahead. This is important + * if input comes from a device such as a tty. + */ + while (G1.lookahead < MIN_LOOKAHEAD && !G1.eofile) + fill_window(); + + G1.ins_h = 0; + for (j = 0; j < MIN_MATCH - 1; j++) + UPDATE_HASH(G1.ins_h, G1.window[j]); + /* If lookahead < MIN_MATCH, ins_h is garbage, but this is + * not important since only literal bytes will be emitted. + */ +} + + +/* =========================================================================== + * Allocate the match buffer, initialize the various tables and save the + * location of the internal file attribute (ascii/binary) and method + * (DEFLATE/STORE). + * One callsite in zip() + */ +static void ct_init(void) +{ + int n; /* iterates over tree elements */ + int length; /* length value */ + int code; /* code value */ + int dist; /* distance index */ + + G2.compressed_len = 0L; + +#ifdef NOT_NEEDED + if (G2.static_dtree[0].Len != 0) + return; /* ct_init already called */ +#endif + + /* Initialize the mapping length (0..255) -> length code (0..28) */ + length = 0; + for (code = 0; code < LENGTH_CODES - 1; code++) { + G2.base_length[code] = length; + for (n = 0; n < (1 << extra_lbits[code]); n++) { + G2.length_code[length++] = code; + } + } + Assert(length == 256, "ct_init: length != 256"); + /* Note that the length 255 (match length 258) can be represented + * in two different ways: code 284 + 5 bits or code 285, so we + * overwrite length_code[255] to use the best encoding: + */ + G2.length_code[length - 1] = code; + + /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ + dist = 0; + for (code = 0; code < 16; code++) { + G2.base_dist[code] = dist; + for (n = 0; n < (1 << extra_dbits[code]); n++) { + G2.dist_code[dist++] = code; + } + } + Assert(dist == 256, "ct_init: dist != 256"); + dist >>= 7; /* from now on, all distances are divided by 128 */ + for (; code < D_CODES; code++) { + G2.base_dist[code] = dist << 7; + for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) { + G2.dist_code[256 + dist++] = code; + } + } + Assert(dist == 256, "ct_init: 256+dist != 512"); + + /* Construct the codes of the static literal tree */ + /* already zeroed - it's in bss + for (n = 0; n <= MAX_BITS; n++) + G2.bl_count[n] = 0; */ + + n = 0; + while (n <= 143) { + G2.static_ltree[n++].Len = 8; + G2.bl_count[8]++; + } + while (n <= 255) { + G2.static_ltree[n++].Len = 9; + G2.bl_count[9]++; + } + while (n <= 279) { + G2.static_ltree[n++].Len = 7; + G2.bl_count[7]++; + } + while (n <= 287) { + G2.static_ltree[n++].Len = 8; + G2.bl_count[8]++; + } + /* Codes 286 and 287 do not exist, but we must include them in the + * tree construction to get a canonical Huffman tree (longest code + * all ones) + */ + gen_codes((ct_data *) G2.static_ltree, L_CODES + 1); + + /* The static distance tree is trivial: */ + for (n = 0; n < D_CODES; n++) { + G2.static_dtree[n].Len = 5; + G2.static_dtree[n].Code = bi_reverse(n, 5); + } + + /* Initialize the first block of the first file: */ + init_block(); +} + + +/* =========================================================================== + * Deflate in to out. + * IN assertions: the input and output buffers are cleared. + */ + +static void zip(void) +{ + ush deflate_flags = 0; /* pkzip -es, -en or -ex equivalent */ + + G1.outcnt = 0; + + /* Write the header to the gzip file. See algorithm.doc for the format */ + /* magic header for gzip files: 1F 8B */ + /* compression method: 8 (DEFLATED) */ + /* general flags: 0 */ + put_32bit(0x00088b1f); + put_32bit(0); /* Unix timestamp */ + + /* Write deflated file to zip file */ + G1.crc = ~0; + + bi_init(); + ct_init(); + lm_init(&deflate_flags); + + put_8bit(deflate_flags); /* extra flags */ + put_8bit(3); /* OS identifier = 3 (Unix) */ + + deflate(); + + /* Write the crc and uncompressed size */ + put_32bit(~G1.crc); + put_32bit(G1.isize); + + flush_outbuf(); +} + + +/* ======================================================================== */ +static +IF_DESKTOP(long long) int FAST_FUNC pack_gzip(transformer_state_t *xstate UNUSED_PARAM) +{ + /* Clear input and output buffers */ + G1.outcnt = 0; +#ifdef DEBUG + G1.insize = 0; +#endif + G1.isize = 0; + + /* Reinit G2.xxx */ + memset(&G2, 0, sizeof(G2)); + G2.l_desc.dyn_tree = G2.dyn_ltree; + G2.l_desc.static_tree = G2.static_ltree; + G2.l_desc.extra_bits = extra_lbits; + G2.l_desc.extra_base = LITERALS + 1; + G2.l_desc.elems = L_CODES; + G2.l_desc.max_length = MAX_BITS; + //G2.l_desc.max_code = 0; + G2.d_desc.dyn_tree = G2.dyn_dtree; + G2.d_desc.static_tree = G2.static_dtree; + G2.d_desc.extra_bits = extra_dbits; + //G2.d_desc.extra_base = 0; + G2.d_desc.elems = D_CODES; + G2.d_desc.max_length = MAX_BITS; + //G2.d_desc.max_code = 0; + G2.bl_desc.dyn_tree = G2.bl_tree; + //G2.bl_desc.static_tree = NULL; + G2.bl_desc.extra_bits = extra_blbits, + //G2.bl_desc.extra_base = 0; + G2.bl_desc.elems = BL_CODES; + G2.bl_desc.max_length = MAX_BL_BITS; + //G2.bl_desc.max_code = 0; + +#if 0 + /* Saving of timestamp is disabled. Why? + * - it is not Y2038-safe. + * - some people want deterministic results + * (normally they'd use -n, but our -n is a nop). + * - it's bloat. + * Per RFC 1952, gzfile.time=0 is "no timestamp". + * If users will demand this to be reinstated, + * implement -n "don't save timestamp". + */ + struct stat s; + s.st_ctime = 0; + fstat(STDIN_FILENO, &s); + zip(s.st_ctime); +#else + zip(); +#endif + return 0; +} + +#if ENABLE_FEATURE_GZIP_LONG_OPTIONS +static const char gzip_longopts[] ALIGN1 = + "stdout\0" No_argument "c" + "to-stdout\0" No_argument "c" + "force\0" No_argument "f" + "verbose\0" No_argument "v" +#if ENABLE_GUNZIP + "decompress\0" No_argument "d" + "uncompress\0" No_argument "d" + "test\0" No_argument "t" +#endif + "quiet\0" No_argument "q" + "fast\0" No_argument "1" + "best\0" No_argument "9" + "no-name\0" No_argument "n" + ; +#endif + +/* + * Linux kernel build uses gzip -d -n. We accept and ignore -n. + * Man page says: + * -n --no-name + * gzip: do not save the original file name and time stamp. + * (The original name is always saved if the name had to be truncated.) + * gunzip: do not restore the original file name/time even if present + * (remove only the gzip suffix from the compressed file name). + * This option is the default when decompressing. + * -N --name + * gzip: always save the original file name and time stamp (this is the default) + * gunzip: restore the original file name and time stamp if present. + */ + +int gzip_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +#if ENABLE_GUNZIP +int gzip_main(int argc, char **argv) +#else +int gzip_main(int argc UNUSED_PARAM, char **argv) +#endif +{ + unsigned opt; +#ifdef ENABLE_FEATURE_GZIP_LEVELS + static const struct { + uint8_t good; + uint8_t chain_shift; + uint8_t lazy2; + uint8_t nice2; + } gzip_level_config[6] = { + {4, 4, 4/2, 16/2}, /* Level 4 */ + {8, 5, 16/2, 32/2}, /* Level 5 */ + {8, 7, 16/2, 128/2}, /* Level 6 */ + {8, 8, 32/2, 128/2}, /* Level 7 */ + {32, 10, 128/2, 258/2}, /* Level 8 */ + {32, 12, 258/2, 258/2}, /* Level 9 */ + }; +#endif + + SET_PTR_TO_GLOBALS((char *)xzalloc(sizeof(struct globals)+sizeof(struct globals2)) + + sizeof(struct globals)); + +#if ENABLE_FEATURE_GZIP_LONG_OPTIONS + applet_long_options = gzip_longopts; +#endif + /* Must match bbunzip's constants OPT_STDOUT, OPT_FORCE! */ + opt = getopt32(argv, "cfv" IF_GUNZIP("dt") "qn123456789"); +#if ENABLE_GUNZIP /* gunzip_main may not be visible... */ + if (opt & 0x18) // -d and/or -t + return gunzip_main(argc, argv); +#endif +#ifdef ENABLE_FEATURE_GZIP_LEVELS + opt >>= ENABLE_GUNZIP ? 7 : 5; /* drop cfv[dt]qn bits */ + if (opt == 0) + opt = 1 << 6; /* default: 6 */ + opt = ffs(opt >> 4); /* Maps -1..-4 to [0], -5 to [1] ... -9 to [5] */ + max_chain_length = 1 << gzip_level_config[opt].chain_shift; + good_match = gzip_level_config[opt].good; + max_lazy_match = gzip_level_config[opt].lazy2 * 2; + nice_match = gzip_level_config[opt].nice2 * 2; +#endif + option_mask32 &= 0x7; /* retain only -cfv */ + + /* Allocate all global buffers (for DYN_ALLOC option) */ + ALLOC(uch, G1.l_buf, INBUFSIZ); + ALLOC(uch, G1.outbuf, OUTBUFSIZ); + ALLOC(ush, G1.d_buf, DIST_BUFSIZE); + ALLOC(uch, G1.window, 2L * WSIZE); + ALLOC(ush, G1.prev, 1L << BITS); + + /* Initialize the CRC32 table */ + global_crc32_table = crc32_filltable(NULL, 0); + + argv += optind; + return bbunpack(argv, pack_gzip, append_ext, "gz"); +} diff --git a/probe-busybox/archival/libarchive/Kbuild.src b/probe-busybox/archival/libarchive/Kbuild.src new file mode 100644 index 00000000..ad5c5c42 --- /dev/null +++ b/probe-busybox/archival/libarchive/Kbuild.src @@ -0,0 +1,92 @@ +# Makefile for busybox +# +# Copyright (C) 1999-2004 by Erik Andersen +# +# Licensed under GPLv2 or later, see file LICENSE in this source tree. + +lib-y:= common.o + +COMMON_FILES:= \ +\ + data_skip.o \ + data_extract_all.o \ + data_extract_to_stdout.o \ +\ + filter_accept_all.o \ + filter_accept_list.o \ + filter_accept_reject_list.o \ +\ + header_skip.o \ + header_list.o \ + header_verbose_list.o \ +\ + seek_by_read.o \ + seek_by_jump.o \ +\ + data_align.o \ + find_list_entry.o \ + init_handle.o + +DPKG_FILES:= \ + unpack_ar_archive.o \ + filter_accept_list_reassign.o \ + unsafe_prefix.o \ + get_header_ar.o \ + get_header_tar.o \ + get_header_tar_gz.o \ + get_header_tar_bz2.o \ + get_header_tar_lzma.o \ + get_header_tar_xz.o \ + +INSERT + +lib-$(CONFIG_DPKG) += $(DPKG_FILES) +lib-$(CONFIG_DPKG_DEB) += $(DPKG_FILES) + +lib-$(CONFIG_AR) += get_header_ar.o unpack_ar_archive.o +lib-$(CONFIG_CPIO) += get_header_cpio.o +lib-$(CONFIG_TAR) += get_header_tar.o unsafe_prefix.o +lib-$(CONFIG_FEATURE_TAR_TO_COMMAND) += data_extract_to_command.o +lib-$(CONFIG_LZOP) += lzo1x_1.o lzo1x_1o.o lzo1x_d.o +lib-$(CONFIG_UNLZOP) += lzo1x_1.o lzo1x_1o.o lzo1x_d.o +lib-$(CONFIG_LZOPCAT) += lzo1x_1.o lzo1x_1o.o lzo1x_d.o +lib-$(CONFIG_LZOP_COMPR_HIGH) += lzo1x_9x.o +lib-$(CONFIG_BUNZIP2) += open_transformer.o decompress_bunzip2.o +lib-$(CONFIG_BZCAT) += open_transformer.o decompress_bunzip2.o +lib-$(CONFIG_UNLZMA) += open_transformer.o decompress_unlzma.o +lib-$(CONFIG_LZCAT) += open_transformer.o decompress_unlzma.o +lib-$(CONFIG_LZMA) += open_transformer.o decompress_unlzma.o +lib-$(CONFIG_UNXZ) += open_transformer.o decompress_unxz.o +lib-$(CONFIG_XZCAT) += open_transformer.o decompress_unxz.o +lib-$(CONFIG_XZ) += open_transformer.o decompress_unxz.o +lib-$(CONFIG_GUNZIP) += open_transformer.o decompress_gunzip.o +lib-$(CONFIG_ZCAT) += open_transformer.o decompress_gunzip.o +lib-$(CONFIG_UNCOMPRESS) += open_transformer.o decompress_uncompress.o +lib-$(CONFIG_UNZIP) += open_transformer.o decompress_gunzip.o unsafe_prefix.o +lib-$(CONFIG_RPM2CPIO) += open_transformer.o decompress_gunzip.o get_header_cpio.o +lib-$(CONFIG_RPM) += open_transformer.o decompress_gunzip.o get_header_cpio.o +lib-$(CONFIG_GZIP) += open_transformer.o +lib-$(CONFIG_BZIP2) += open_transformer.o +lib-$(CONFIG_LZOP) += open_transformer.o +lib-$(CONFIG_MAN) += open_transformer.o +lib-$(CONFIG_SETFONT) += open_transformer.o +lib-$(CONFIG_FEATURE_2_4_MODULES) += open_transformer.o +lib-$(CONFIG_MODINFO) += open_transformer.o +lib-$(CONFIG_INSMOD) += open_transformer.o +lib-$(CONFIG_DEPMOD) += open_transformer.o +lib-$(CONFIG_RMMOD) += open_transformer.o +lib-$(CONFIG_LSMOD) += open_transformer.o +lib-$(CONFIG_MODPROBE) += open_transformer.o +lib-$(CONFIG_MODPROBE_SMALL) += open_transformer.o + +lib-$(CONFIG_FEATURE_SEAMLESS_Z) += open_transformer.o decompress_uncompress.o +lib-$(CONFIG_FEATURE_SEAMLESS_GZ) += open_transformer.o decompress_gunzip.o +lib-$(CONFIG_FEATURE_SEAMLESS_BZ2) += open_transformer.o decompress_bunzip2.o +lib-$(CONFIG_FEATURE_SEAMLESS_LZMA) += open_transformer.o decompress_unlzma.o +lib-$(CONFIG_FEATURE_SEAMLESS_XZ) += open_transformer.o decompress_unxz.o +lib-$(CONFIG_FEATURE_COMPRESS_USAGE) += open_transformer.o decompress_bunzip2.o +lib-$(CONFIG_FEATURE_COMPRESS_BBCONFIG) += open_transformer.o decompress_bunzip2.o + +ifneq ($(lib-y),) +lib-y += $(COMMON_FILES) +endif diff --git a/probe-busybox/archival/libarchive/bz/LICENSE b/probe-busybox/archival/libarchive/bz/LICENSE new file mode 100644 index 00000000..da434652 --- /dev/null +++ b/probe-busybox/archival/libarchive/bz/LICENSE @@ -0,0 +1,44 @@ +bzip2 applet in busybox is based on lightly-modified source +of bzip2 version 1.0.4. bzip2 source is distributed +under the following conditions (copied verbatim from LICENSE file) +=========================================================== + + +This program, "bzip2", the associated library "libbzip2", and all +documentation, are copyright (C) 1996-2006 Julian R Seward. All +rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + +4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Julian Seward, Cambridge, UK. +jseward@bzip.org +bzip2/libbzip2 version 1.0.4 of 20 December 2006 diff --git a/probe-busybox/archival/libarchive/bz/README b/probe-busybox/archival/libarchive/bz/README new file mode 100644 index 00000000..fffd47b8 --- /dev/null +++ b/probe-busybox/archival/libarchive/bz/README @@ -0,0 +1,90 @@ +This file is an abridged version of README from bzip2 1.0.4 +Build instructions (which are not relevant to busyboxed bzip2) +are removed. +=========================================================== + + +This is the README for bzip2/libzip2. +This version is fully compatible with the previous public releases. + +------------------------------------------------------------------ +This file is part of bzip2/libbzip2, a program and library for +lossless, block-sorting data compression. + +bzip2/libbzip2 version 1.0.4 of 20 December 2006 +Copyright (C) 1996-2006 Julian Seward + +Please read the WARNING, DISCLAIMER and PATENTS sections in this file. + +This program is released under the terms of the license contained +in the file LICENSE. +------------------------------------------------------------------ + +Please read and be aware of the following: + + +WARNING: + + This program and library (attempts to) compress data by + performing several non-trivial transformations on it. + Unless you are 100% familiar with *all* the algorithms + contained herein, and with the consequences of modifying them, + you should NOT meddle with the compression or decompression + machinery. Incorrect changes can and very likely *will* + lead to disastrous loss of data. + + +DISCLAIMER: + + I TAKE NO RESPONSIBILITY FOR ANY LOSS OF DATA ARISING FROM THE + USE OF THIS PROGRAM/LIBRARY, HOWSOEVER CAUSED. + + Every compression of a file implies an assumption that the + compressed file can be decompressed to reproduce the original. + Great efforts in design, coding and testing have been made to + ensure that this program works correctly. However, the complexity + of the algorithms, and, in particular, the presence of various + special cases in the code which occur with very low but non-zero + probability make it impossible to rule out the possibility of bugs + remaining in the program. DO NOT COMPRESS ANY DATA WITH THIS + PROGRAM UNLESS YOU ARE PREPARED TO ACCEPT THE POSSIBILITY, HOWEVER + SMALL, THAT THE DATA WILL NOT BE RECOVERABLE. + + That is not to say this program is inherently unreliable. + Indeed, I very much hope the opposite is true. bzip2/libbzip2 + has been carefully constructed and extensively tested. + + +PATENTS: + + To the best of my knowledge, bzip2/libbzip2 does not use any + patented algorithms. However, I do not have the resources + to carry out a patent search. Therefore I cannot give any + guarantee of the above statement. + + +I hope you find bzip2 useful. Feel free to contact me at + jseward@bzip.org +if you have any suggestions or queries. Many people mailed me with +comments, suggestions and patches after the releases of bzip-0.15, +bzip-0.21, and bzip2 versions 0.1pl2, 0.9.0, 0.9.5, 1.0.0, 1.0.1, +1.0.2 and 1.0.3, and the changes in bzip2 are largely a result of this +feedback. I thank you for your comments. + +bzip2's "home" is http://www.bzip.org/ + +Julian Seward +jseward@bzip.org +Cambridge, UK. + +18 July 1996 (version 0.15) +25 August 1996 (version 0.21) + 7 August 1997 (bzip2, version 0.1) +29 August 1997 (bzip2, version 0.1pl2) +23 August 1998 (bzip2, version 0.9.0) + 8 June 1999 (bzip2, version 0.9.5) + 4 Sept 1999 (bzip2, version 0.9.5d) + 5 May 2000 (bzip2, version 1.0pre8) +30 December 2001 (bzip2, version 1.0.2pre1) +15 February 2005 (bzip2, version 1.0.3) +20 December 2006 (bzip2, version 1.0.4) diff --git a/probe-busybox/archival/libarchive/bz/blocksort.c b/probe-busybox/archival/libarchive/bz/blocksort.c new file mode 100644 index 00000000..e600cb7a --- /dev/null +++ b/probe-busybox/archival/libarchive/bz/blocksort.c @@ -0,0 +1,1072 @@ +/* + * bzip2 is written by Julian Seward . + * Adapted for busybox by Denys Vlasenko . + * See README and LICENSE files in this directory for more information. + */ + +/*-------------------------------------------------------------*/ +/*--- Block sorting machinery ---*/ +/*--- blocksort.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ +This file is part of bzip2/libbzip2, a program and library for +lossless, block-sorting data compression. + +bzip2/libbzip2 version 1.0.4 of 20 December 2006 +Copyright (C) 1996-2006 Julian Seward + +Please read the WARNING, DISCLAIMER and PATENTS sections in the +README file. + +This program is released under the terms of the license contained +in the file LICENSE. +------------------------------------------------------------------ */ + +/* #include "bzlib_private.h" */ + +#define mswap(zz1, zz2) \ +{ \ + int32_t zztmp = zz1; \ + zz1 = zz2; \ + zz2 = zztmp; \ +} + +static +/* No measurable speed gain with inlining */ +/* ALWAYS_INLINE */ +void mvswap(uint32_t* ptr, int32_t zzp1, int32_t zzp2, int32_t zzn) +{ + while (zzn > 0) { + mswap(ptr[zzp1], ptr[zzp2]); + zzp1++; + zzp2++; + zzn--; + } +} + +static +ALWAYS_INLINE +int32_t mmin(int32_t a, int32_t b) +{ + return (a < b) ? a : b; +} + + +/*---------------------------------------------*/ +/*--- Fallback O(N log(N)^2) sorting ---*/ +/*--- algorithm, for repetitive blocks ---*/ +/*---------------------------------------------*/ + +/*---------------------------------------------*/ +static +inline +void fallbackSimpleSort(uint32_t* fmap, + uint32_t* eclass, + int32_t lo, + int32_t hi) +{ + int32_t i, j, tmp; + uint32_t ec_tmp; + + if (lo == hi) return; + + if (hi - lo > 3) { + for (i = hi-4; i >= lo; i--) { + tmp = fmap[i]; + ec_tmp = eclass[tmp]; + for (j = i+4; j <= hi && ec_tmp > eclass[fmap[j]]; j += 4) + fmap[j-4] = fmap[j]; + fmap[j-4] = tmp; + } + } + + for (i = hi-1; i >= lo; i--) { + tmp = fmap[i]; + ec_tmp = eclass[tmp]; + for (j = i+1; j <= hi && ec_tmp > eclass[fmap[j]]; j++) + fmap[j-1] = fmap[j]; + fmap[j-1] = tmp; + } +} + + +/*---------------------------------------------*/ +#define fpush(lz,hz) { \ + stackLo[sp] = lz; \ + stackHi[sp] = hz; \ + sp++; \ +} + +#define fpop(lz,hz) { \ + sp--; \ + lz = stackLo[sp]; \ + hz = stackHi[sp]; \ +} + +#define FALLBACK_QSORT_SMALL_THRESH 10 +#define FALLBACK_QSORT_STACK_SIZE 100 + +static +void fallbackQSort3(uint32_t* fmap, + uint32_t* eclass, + int32_t loSt, + int32_t hiSt) +{ + int32_t unLo, unHi, ltLo, gtHi, n, m; + int32_t sp, lo, hi; + uint32_t med, r, r3; + int32_t stackLo[FALLBACK_QSORT_STACK_SIZE]; + int32_t stackHi[FALLBACK_QSORT_STACK_SIZE]; + + r = 0; + + sp = 0; + fpush(loSt, hiSt); + + while (sp > 0) { + AssertH(sp < FALLBACK_QSORT_STACK_SIZE - 1, 1004); + + fpop(lo, hi); + if (hi - lo < FALLBACK_QSORT_SMALL_THRESH) { + fallbackSimpleSort(fmap, eclass, lo, hi); + continue; + } + + /* Random partitioning. Median of 3 sometimes fails to + * avoid bad cases. Median of 9 seems to help but + * looks rather expensive. This too seems to work but + * is cheaper. Guidance for the magic constants + * 7621 and 32768 is taken from Sedgewick's algorithms + * book, chapter 35. + */ + r = ((r * 7621) + 1) % 32768; + r3 = r % 3; + if (r3 == 0) + med = eclass[fmap[lo]]; + else if (r3 == 1) + med = eclass[fmap[(lo+hi)>>1]]; + else + med = eclass[fmap[hi]]; + + unLo = ltLo = lo; + unHi = gtHi = hi; + + while (1) { + while (1) { + if (unLo > unHi) break; + n = (int32_t)eclass[fmap[unLo]] - (int32_t)med; + if (n == 0) { + mswap(fmap[unLo], fmap[ltLo]); + ltLo++; + unLo++; + continue; + }; + if (n > 0) break; + unLo++; + } + while (1) { + if (unLo > unHi) break; + n = (int32_t)eclass[fmap[unHi]] - (int32_t)med; + if (n == 0) { + mswap(fmap[unHi], fmap[gtHi]); + gtHi--; unHi--; + continue; + }; + if (n < 0) break; + unHi--; + } + if (unLo > unHi) break; + mswap(fmap[unLo], fmap[unHi]); unLo++; unHi--; + } + + AssertD(unHi == unLo-1, "fallbackQSort3(2)"); + + if (gtHi < ltLo) continue; + + n = mmin(ltLo-lo, unLo-ltLo); mvswap(fmap, lo, unLo-n, n); + m = mmin(hi-gtHi, gtHi-unHi); mvswap(fmap, unLo, hi-m+1, m); + + n = lo + unLo - ltLo - 1; + m = hi - (gtHi - unHi) + 1; + + if (n - lo > hi - m) { + fpush(lo, n); + fpush(m, hi); + } else { + fpush(m, hi); + fpush(lo, n); + } + } +} + +#undef fpush +#undef fpop +#undef FALLBACK_QSORT_SMALL_THRESH +#undef FALLBACK_QSORT_STACK_SIZE + + +/*---------------------------------------------*/ +/* Pre: + * nblock > 0 + * eclass exists for [0 .. nblock-1] + * ((uint8_t*)eclass) [0 .. nblock-1] holds block + * ptr exists for [0 .. nblock-1] + * + * Post: + * ((uint8_t*)eclass) [0 .. nblock-1] holds block + * All other areas of eclass destroyed + * fmap [0 .. nblock-1] holds sorted order + * bhtab[0 .. 2+(nblock/32)] destroyed +*/ + +#define SET_BH(zz) bhtab[(zz) >> 5] |= (1 << ((zz) & 31)) +#define CLEAR_BH(zz) bhtab[(zz) >> 5] &= ~(1 << ((zz) & 31)) +#define ISSET_BH(zz) (bhtab[(zz) >> 5] & (1 << ((zz) & 31))) +#define WORD_BH(zz) bhtab[(zz) >> 5] +#define UNALIGNED_BH(zz) ((zz) & 0x01f) + +static +void fallbackSort(uint32_t* fmap, + uint32_t* eclass, + uint32_t* bhtab, + int32_t nblock) +{ + int32_t ftab[257]; + int32_t ftabCopy[256]; + int32_t H, i, j, k, l, r, cc, cc1; + int32_t nNotDone; + int32_t nBhtab; + uint8_t* eclass8 = (uint8_t*)eclass; + + /* + * Initial 1-char radix sort to generate + * initial fmap and initial BH bits. + */ + for (i = 0; i < 257; i++) ftab[i] = 0; + for (i = 0; i < nblock; i++) ftab[eclass8[i]]++; + for (i = 0; i < 256; i++) ftabCopy[i] = ftab[i]; + + j = ftab[0]; /* bbox: optimized */ + for (i = 1; i < 257; i++) { + j += ftab[i]; + ftab[i] = j; + } + + for (i = 0; i < nblock; i++) { + j = eclass8[i]; + k = ftab[j] - 1; + ftab[j] = k; + fmap[k] = i; + } + + nBhtab = 2 + ((uint32_t)nblock / 32); /* bbox: unsigned div is easier */ + for (i = 0; i < nBhtab; i++) bhtab[i] = 0; + for (i = 0; i < 256; i++) SET_BH(ftab[i]); + + /* + * Inductively refine the buckets. Kind-of an + * "exponential radix sort" (!), inspired by the + * Manber-Myers suffix array construction algorithm. + */ + + /*-- set sentinel bits for block-end detection --*/ + for (i = 0; i < 32; i++) { + SET_BH(nblock + 2*i); + CLEAR_BH(nblock + 2*i + 1); + } + + /*-- the log(N) loop --*/ + H = 1; + while (1) { + j = 0; + for (i = 0; i < nblock; i++) { + if (ISSET_BH(i)) + j = i; + k = fmap[i] - H; + if (k < 0) + k += nblock; + eclass[k] = j; + } + + nNotDone = 0; + r = -1; + while (1) { + + /*-- find the next non-singleton bucket --*/ + k = r + 1; + while (ISSET_BH(k) && UNALIGNED_BH(k)) + k++; + if (ISSET_BH(k)) { + while (WORD_BH(k) == 0xffffffff) k += 32; + while (ISSET_BH(k)) k++; + } + l = k - 1; + if (l >= nblock) + break; + while (!ISSET_BH(k) && UNALIGNED_BH(k)) + k++; + if (!ISSET_BH(k)) { + while (WORD_BH(k) == 0x00000000) k += 32; + while (!ISSET_BH(k)) k++; + } + r = k - 1; + if (r >= nblock) + break; + + /*-- now [l, r] bracket current bucket --*/ + if (r > l) { + nNotDone += (r - l + 1); + fallbackQSort3(fmap, eclass, l, r); + + /*-- scan bucket and generate header bits-- */ + cc = -1; + for (i = l; i <= r; i++) { + cc1 = eclass[fmap[i]]; + if (cc != cc1) { + SET_BH(i); + cc = cc1; + }; + } + } + } + + H *= 2; + if (H > nblock || nNotDone == 0) + break; + } + + /* + * Reconstruct the original block in + * eclass8 [0 .. nblock-1], since the + * previous phase destroyed it. + */ + j = 0; + for (i = 0; i < nblock; i++) { + while (ftabCopy[j] == 0) + j++; + ftabCopy[j]--; + eclass8[fmap[i]] = (uint8_t)j; + } + AssertH(j < 256, 1005); +} + +#undef SET_BH +#undef CLEAR_BH +#undef ISSET_BH +#undef WORD_BH +#undef UNALIGNED_BH + + +/*---------------------------------------------*/ +/*--- The main, O(N^2 log(N)) sorting ---*/ +/*--- algorithm. Faster for "normal" ---*/ +/*--- non-repetitive blocks. ---*/ +/*---------------------------------------------*/ + +/*---------------------------------------------*/ +static +NOINLINE +int mainGtU( + uint32_t i1, + uint32_t i2, + uint8_t* block, + uint16_t* quadrant, + uint32_t nblock, + int32_t* budget) +{ + int32_t k; + uint8_t c1, c2; + uint16_t s1, s2; + +/* Loop unrolling here is actually very useful + * (generated code is much simpler), + * code size increase is only 270 bytes (i386) + * but speeds up compression 10% overall + */ + +#if CONFIG_BZIP2_FAST >= 1 + +#define TIMES_8(code) \ + code; code; code; code; \ + code; code; code; code; +#define TIMES_12(code) \ + code; code; code; code; \ + code; code; code; code; \ + code; code; code; code; + +#else + +#define TIMES_8(code) \ +{ \ + int nn = 8; \ + do { \ + code; \ + } while (--nn); \ +} +#define TIMES_12(code) \ +{ \ + int nn = 12; \ + do { \ + code; \ + } while (--nn); \ +} + +#endif + + AssertD(i1 != i2, "mainGtU"); + TIMES_12( + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + i1++; i2++; + ) + + k = nblock + 8; + + do { + TIMES_8( + c1 = block[i1]; c2 = block[i2]; + if (c1 != c2) return (c1 > c2); + s1 = quadrant[i1]; s2 = quadrant[i2]; + if (s1 != s2) return (s1 > s2); + i1++; i2++; + ) + + if (i1 >= nblock) i1 -= nblock; + if (i2 >= nblock) i2 -= nblock; + + (*budget)--; + k -= 8; + } while (k >= 0); + + return False; +} +#undef TIMES_8 +#undef TIMES_12 + +/*---------------------------------------------*/ +/* + * Knuth's increments seem to work better + * than Incerpi-Sedgewick here. Possibly + * because the number of elems to sort is + * usually small, typically <= 20. + */ +static +const int32_t incs[14] = { + 1, 4, 13, 40, 121, 364, 1093, 3280, + 9841, 29524, 88573, 265720, + 797161, 2391484 +}; + +static +void mainSimpleSort(uint32_t* ptr, + uint8_t* block, + uint16_t* quadrant, + int32_t nblock, + int32_t lo, + int32_t hi, + int32_t d, + int32_t* budget) +{ + int32_t i, j, h, bigN, hp; + uint32_t v; + + bigN = hi - lo + 1; + if (bigN < 2) return; + + hp = 0; + while (incs[hp] < bigN) hp++; + hp--; + + for (; hp >= 0; hp--) { + h = incs[hp]; + + i = lo + h; + while (1) { + /*-- copy 1 --*/ + if (i > hi) break; + v = ptr[i]; + j = i; + while (mainGtU(ptr[j-h]+d, v+d, block, quadrant, nblock, budget)) { + ptr[j] = ptr[j-h]; + j = j - h; + if (j <= (lo + h - 1)) break; + } + ptr[j] = v; + i++; + +/* 1.5% overall speedup, +290 bytes */ +#if CONFIG_BZIP2_FAST >= 3 + /*-- copy 2 --*/ + if (i > hi) break; + v = ptr[i]; + j = i; + while (mainGtU(ptr[j-h]+d, v+d, block, quadrant, nblock, budget)) { + ptr[j] = ptr[j-h]; + j = j - h; + if (j <= (lo + h - 1)) break; + } + ptr[j] = v; + i++; + + /*-- copy 3 --*/ + if (i > hi) break; + v = ptr[i]; + j = i; + while (mainGtU(ptr[j-h]+d, v+d, block, quadrant, nblock, budget)) { + ptr[j] = ptr[j-h]; + j = j - h; + if (j <= (lo + h - 1)) break; + } + ptr[j] = v; + i++; +#endif + if (*budget < 0) return; + } + } +} + + +/*---------------------------------------------*/ +/* + * The following is an implementation of + * an elegant 3-way quicksort for strings, + * described in a paper "Fast Algorithms for + * Sorting and Searching Strings", by Robert + * Sedgewick and Jon L. Bentley. + */ + +static +ALWAYS_INLINE +uint8_t mmed3(uint8_t a, uint8_t b, uint8_t c) +{ + uint8_t t; + if (a > b) { + t = a; + a = b; + b = t; + }; + /* here b >= a */ + if (b > c) { + b = c; + if (a > b) + b = a; + } + return b; +} + +#define mpush(lz,hz,dz) \ +{ \ + stackLo[sp] = lz; \ + stackHi[sp] = hz; \ + stackD [sp] = dz; \ + sp++; \ +} + +#define mpop(lz,hz,dz) \ +{ \ + sp--; \ + lz = stackLo[sp]; \ + hz = stackHi[sp]; \ + dz = stackD [sp]; \ +} + +#define mnextsize(az) (nextHi[az] - nextLo[az]) + +#define mnextswap(az,bz) \ +{ \ + int32_t tz; \ + tz = nextLo[az]; nextLo[az] = nextLo[bz]; nextLo[bz] = tz; \ + tz = nextHi[az]; nextHi[az] = nextHi[bz]; nextHi[bz] = tz; \ + tz = nextD [az]; nextD [az] = nextD [bz]; nextD [bz] = tz; \ +} + +#define MAIN_QSORT_SMALL_THRESH 20 +#define MAIN_QSORT_DEPTH_THRESH (BZ_N_RADIX + BZ_N_QSORT) +#define MAIN_QSORT_STACK_SIZE 100 + +static NOINLINE +void mainQSort3(uint32_t* ptr, + uint8_t* block, + uint16_t* quadrant, + int32_t nblock, + int32_t loSt, + int32_t hiSt, + int32_t dSt, + int32_t* budget) +{ + int32_t unLo, unHi, ltLo, gtHi, n, m, med; + int32_t sp, lo, hi, d; + + int32_t stackLo[MAIN_QSORT_STACK_SIZE]; + int32_t stackHi[MAIN_QSORT_STACK_SIZE]; + int32_t stackD [MAIN_QSORT_STACK_SIZE]; + + int32_t nextLo[3]; + int32_t nextHi[3]; + int32_t nextD [3]; + + sp = 0; + mpush(loSt, hiSt, dSt); + + while (sp > 0) { + AssertH(sp < MAIN_QSORT_STACK_SIZE - 2, 1001); + + mpop(lo, hi, d); + if (hi - lo < MAIN_QSORT_SMALL_THRESH + || d > MAIN_QSORT_DEPTH_THRESH + ) { + mainSimpleSort(ptr, block, quadrant, nblock, lo, hi, d, budget); + if (*budget < 0) + return; + continue; + } + med = (int32_t) mmed3(block[ptr[lo ] + d], + block[ptr[hi ] + d], + block[ptr[(lo+hi) >> 1] + d]); + + unLo = ltLo = lo; + unHi = gtHi = hi; + + while (1) { + while (1) { + if (unLo > unHi) + break; + n = ((int32_t)block[ptr[unLo]+d]) - med; + if (n == 0) { + mswap(ptr[unLo], ptr[ltLo]); + ltLo++; + unLo++; + continue; + }; + if (n > 0) break; + unLo++; + } + while (1) { + if (unLo > unHi) + break; + n = ((int32_t)block[ptr[unHi]+d]) - med; + if (n == 0) { + mswap(ptr[unHi], ptr[gtHi]); + gtHi--; + unHi--; + continue; + }; + if (n < 0) break; + unHi--; + } + if (unLo > unHi) + break; + mswap(ptr[unLo], ptr[unHi]); + unLo++; + unHi--; + } + + AssertD(unHi == unLo-1, "mainQSort3(2)"); + + if (gtHi < ltLo) { + mpush(lo, hi, d + 1); + continue; + } + + n = mmin(ltLo-lo, unLo-ltLo); mvswap(ptr, lo, unLo-n, n); + m = mmin(hi-gtHi, gtHi-unHi); mvswap(ptr, unLo, hi-m+1, m); + + n = lo + unLo - ltLo - 1; + m = hi - (gtHi - unHi) + 1; + + nextLo[0] = lo; nextHi[0] = n; nextD[0] = d; + nextLo[1] = m; nextHi[1] = hi; nextD[1] = d; + nextLo[2] = n+1; nextHi[2] = m-1; nextD[2] = d+1; + + if (mnextsize(0) < mnextsize(1)) mnextswap(0, 1); + if (mnextsize(1) < mnextsize(2)) mnextswap(1, 2); + if (mnextsize(0) < mnextsize(1)) mnextswap(0, 1); + + AssertD (mnextsize(0) >= mnextsize(1), "mainQSort3(8)"); + AssertD (mnextsize(1) >= mnextsize(2), "mainQSort3(9)"); + + mpush(nextLo[0], nextHi[0], nextD[0]); + mpush(nextLo[1], nextHi[1], nextD[1]); + mpush(nextLo[2], nextHi[2], nextD[2]); + } +} + +#undef mpush +#undef mpop +#undef mnextsize +#undef mnextswap +#undef MAIN_QSORT_SMALL_THRESH +#undef MAIN_QSORT_DEPTH_THRESH +#undef MAIN_QSORT_STACK_SIZE + + +/*---------------------------------------------*/ +/* Pre: + * nblock > N_OVERSHOOT + * block32 exists for [0 .. nblock-1 +N_OVERSHOOT] + * ((uint8_t*)block32) [0 .. nblock-1] holds block + * ptr exists for [0 .. nblock-1] + * + * Post: + * ((uint8_t*)block32) [0 .. nblock-1] holds block + * All other areas of block32 destroyed + * ftab[0 .. 65536] destroyed + * ptr [0 .. nblock-1] holds sorted order + * if (*budget < 0), sorting was abandoned + */ + +#define BIGFREQ(b) (ftab[((b)+1) << 8] - ftab[(b) << 8]) +#define SETMASK (1 << 21) +#define CLEARMASK (~(SETMASK)) + +static NOINLINE +void mainSort(EState* state, + uint32_t* ptr, + uint8_t* block, + uint16_t* quadrant, + uint32_t* ftab, + int32_t nblock, + int32_t* budget) +{ + int32_t i, j, k, ss, sb; + uint8_t c1; + int32_t numQSorted; + uint16_t s; + Bool bigDone[256]; + /* bbox: moved to EState to save stack + int32_t runningOrder[256]; + int32_t copyStart[256]; + int32_t copyEnd [256]; + */ +#define runningOrder (state->mainSort__runningOrder) +#define copyStart (state->mainSort__copyStart) +#define copyEnd (state->mainSort__copyEnd) + + /*-- set up the 2-byte frequency table --*/ + /* was: for (i = 65536; i >= 0; i--) ftab[i] = 0; */ + memset(ftab, 0, 65537 * sizeof(ftab[0])); + + j = block[0] << 8; + i = nblock - 1; +/* 3%, +300 bytes */ +#if CONFIG_BZIP2_FAST >= 2 + for (; i >= 3; i -= 4) { + quadrant[i] = 0; + j = (j >> 8) | (((uint16_t)block[i]) << 8); + ftab[j]++; + quadrant[i-1] = 0; + j = (j >> 8) | (((uint16_t)block[i-1]) << 8); + ftab[j]++; + quadrant[i-2] = 0; + j = (j >> 8) | (((uint16_t)block[i-2]) << 8); + ftab[j]++; + quadrant[i-3] = 0; + j = (j >> 8) | (((uint16_t)block[i-3]) << 8); + ftab[j]++; + } +#endif + for (; i >= 0; i--) { + quadrant[i] = 0; + j = (j >> 8) | (((uint16_t)block[i]) << 8); + ftab[j]++; + } + + /*-- (emphasises close relationship of block & quadrant) --*/ + for (i = 0; i < BZ_N_OVERSHOOT; i++) { + block [nblock+i] = block[i]; + quadrant[nblock+i] = 0; + } + + /*-- Complete the initial radix sort --*/ + j = ftab[0]; /* bbox: optimized */ + for (i = 1; i <= 65536; i++) { + j += ftab[i]; + ftab[i] = j; + } + + s = block[0] << 8; + i = nblock - 1; +#if CONFIG_BZIP2_FAST >= 2 + for (; i >= 3; i -= 4) { + s = (s >> 8) | (block[i] << 8); + j = ftab[s] - 1; + ftab[s] = j; + ptr[j] = i; + s = (s >> 8) | (block[i-1] << 8); + j = ftab[s] - 1; + ftab[s] = j; + ptr[j] = i-1; + s = (s >> 8) | (block[i-2] << 8); + j = ftab[s] - 1; + ftab[s] = j; + ptr[j] = i-2; + s = (s >> 8) | (block[i-3] << 8); + j = ftab[s] - 1; + ftab[s] = j; + ptr[j] = i-3; + } +#endif + for (; i >= 0; i--) { + s = (s >> 8) | (block[i] << 8); + j = ftab[s] - 1; + ftab[s] = j; + ptr[j] = i; + } + + /* + * Now ftab contains the first loc of every small bucket. + * Calculate the running order, from smallest to largest + * big bucket. + */ + for (i = 0; i <= 255; i++) { + bigDone [i] = False; + runningOrder[i] = i; + } + + { + int32_t vv; + /* bbox: was: int32_t h = 1; */ + /* do h = 3 * h + 1; while (h <= 256); */ + uint32_t h = 364; + + do { + /*h = h / 3;*/ + h = (h * 171) >> 9; /* bbox: fast h/3 */ + for (i = h; i <= 255; i++) { + vv = runningOrder[i]; + j = i; + while (BIGFREQ(runningOrder[j-h]) > BIGFREQ(vv)) { + runningOrder[j] = runningOrder[j-h]; + j = j - h; + if (j <= (h - 1)) + goto zero; + } + zero: + runningOrder[j] = vv; + } + } while (h != 1); + } + + /* + * The main sorting loop. + */ + + numQSorted = 0; + + for (i = 0; i <= 255; i++) { + + /* + * Process big buckets, starting with the least full. + * Basically this is a 3-step process in which we call + * mainQSort3 to sort the small buckets [ss, j], but + * also make a big effort to avoid the calls if we can. + */ + ss = runningOrder[i]; + + /* + * Step 1: + * Complete the big bucket [ss] by quicksorting + * any unsorted small buckets [ss, j], for j != ss. + * Hopefully previous pointer-scanning phases have already + * completed many of the small buckets [ss, j], so + * we don't have to sort them at all. + */ + for (j = 0; j <= 255; j++) { + if (j != ss) { + sb = (ss << 8) + j; + if (!(ftab[sb] & SETMASK)) { + int32_t lo = ftab[sb] & CLEARMASK; + int32_t hi = (ftab[sb+1] & CLEARMASK) - 1; + if (hi > lo) { + mainQSort3( + ptr, block, quadrant, nblock, + lo, hi, BZ_N_RADIX, budget + ); + if (*budget < 0) return; + numQSorted += (hi - lo + 1); + } + } + ftab[sb] |= SETMASK; + } + } + + AssertH(!bigDone[ss], 1006); + + /* + * Step 2: + * Now scan this big bucket [ss] so as to synthesise the + * sorted order for small buckets [t, ss] for all t, + * including, magically, the bucket [ss,ss] too. + * This will avoid doing Real Work in subsequent Step 1's. + */ + { + for (j = 0; j <= 255; j++) { + copyStart[j] = ftab[(j << 8) + ss] & CLEARMASK; + copyEnd [j] = (ftab[(j << 8) + ss + 1] & CLEARMASK) - 1; + } + for (j = ftab[ss << 8] & CLEARMASK; j < copyStart[ss]; j++) { + k = ptr[j] - 1; + if (k < 0) + k += nblock; + c1 = block[k]; + if (!bigDone[c1]) + ptr[copyStart[c1]++] = k; + } + for (j = (ftab[(ss+1) << 8] & CLEARMASK) - 1; j > copyEnd[ss]; j--) { + k = ptr[j]-1; + if (k < 0) + k += nblock; + c1 = block[k]; + if (!bigDone[c1]) + ptr[copyEnd[c1]--] = k; + } + } + + /* Extremely rare case missing in bzip2-1.0.0 and 1.0.1. + * Necessity for this case is demonstrated by compressing + * a sequence of approximately 48.5 million of character + * 251; 1.0.0/1.0.1 will then die here. */ + AssertH((copyStart[ss]-1 == copyEnd[ss]) \ + || (copyStart[ss] == 0 && copyEnd[ss] == nblock-1), 1007); + + for (j = 0; j <= 255; j++) + ftab[(j << 8) + ss] |= SETMASK; + + /* + * Step 3: + * The [ss] big bucket is now done. Record this fact, + * and update the quadrant descriptors. Remember to + * update quadrants in the overshoot area too, if + * necessary. The "if (i < 255)" test merely skips + * this updating for the last bucket processed, since + * updating for the last bucket is pointless. + * + * The quadrant array provides a way to incrementally + * cache sort orderings, as they appear, so as to + * make subsequent comparisons in fullGtU() complete + * faster. For repetitive blocks this makes a big + * difference (but not big enough to be able to avoid + * the fallback sorting mechanism, exponential radix sort). + * + * The precise meaning is: at all times: + * + * for 0 <= i < nblock and 0 <= j <= nblock + * + * if block[i] != block[j], + * + * then the relative values of quadrant[i] and + * quadrant[j] are meaningless. + * + * else { + * if quadrant[i] < quadrant[j] + * then the string starting at i lexicographically + * precedes the string starting at j + * + * else if quadrant[i] > quadrant[j] + * then the string starting at j lexicographically + * precedes the string starting at i + * + * else + * the relative ordering of the strings starting + * at i and j has not yet been determined. + * } + */ + bigDone[ss] = True; + + if (i < 255) { + int32_t bbStart = ftab[ss << 8] & CLEARMASK; + int32_t bbSize = (ftab[(ss+1) << 8] & CLEARMASK) - bbStart; + int32_t shifts = 0; + + while ((bbSize >> shifts) > 65534) shifts++; + + for (j = bbSize-1; j >= 0; j--) { + int32_t a2update = ptr[bbStart + j]; + uint16_t qVal = (uint16_t)(j >> shifts); + quadrant[a2update] = qVal; + if (a2update < BZ_N_OVERSHOOT) + quadrant[a2update + nblock] = qVal; + } + AssertH(((bbSize-1) >> shifts) <= 65535, 1002); + } + } +#undef runningOrder +#undef copyStart +#undef copyEnd +} + +#undef BIGFREQ +#undef SETMASK +#undef CLEARMASK + + +/*---------------------------------------------*/ +/* Pre: + * nblock > 0 + * arr2 exists for [0 .. nblock-1 +N_OVERSHOOT] + * ((uint8_t*)arr2)[0 .. nblock-1] holds block + * arr1 exists for [0 .. nblock-1] + * + * Post: + * ((uint8_t*)arr2) [0 .. nblock-1] holds block + * All other areas of block destroyed + * ftab[0 .. 65536] destroyed + * arr1[0 .. nblock-1] holds sorted order + */ +static NOINLINE +void BZ2_blockSort(EState* s) +{ + /* In original bzip2 1.0.4, it's a parameter, but 30 + * (which was the default) should work ok. */ + enum { wfact = 30 }; + + uint32_t* ptr = s->ptr; + uint8_t* block = s->block; + uint32_t* ftab = s->ftab; + int32_t nblock = s->nblock; + uint16_t* quadrant; + int32_t budget; + int32_t i; + + if (nblock < 10000) { + fallbackSort(s->arr1, s->arr2, ftab, nblock); + } else { + /* Calculate the location for quadrant, remembering to get + * the alignment right. Assumes that &(block[0]) is at least + * 2-byte aligned -- this should be ok since block is really + * the first section of arr2. + */ + i = nblock + BZ_N_OVERSHOOT; + if (i & 1) i++; + quadrant = (uint16_t*)(&(block[i])); + + /* (wfact-1) / 3 puts the default-factor-30 + * transition point at very roughly the same place as + * with v0.1 and v0.9.0. + * Not that it particularly matters any more, since the + * resulting compressed stream is now the same regardless + * of whether or not we use the main sort or fallback sort. + */ + budget = nblock * ((wfact-1) / 3); + + mainSort(s, ptr, block, quadrant, ftab, nblock, &budget); + if (budget < 0) { + fallbackSort(s->arr1, s->arr2, ftab, nblock); + } + } + + s->origPtr = -1; + for (i = 0; i < s->nblock; i++) + if (ptr[i] == 0) { + s->origPtr = i; + break; + }; + + AssertH(s->origPtr != -1, 1003); +} + + +/*-------------------------------------------------------------*/ +/*--- end blocksort.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/probe-busybox/archival/libarchive/bz/bzlib.c b/probe-busybox/archival/libarchive/bz/bzlib.c new file mode 100644 index 00000000..5f7db747 --- /dev/null +++ b/probe-busybox/archival/libarchive/bz/bzlib.c @@ -0,0 +1,429 @@ +/* + * bzip2 is written by Julian Seward . + * Adapted for busybox by Denys Vlasenko . + * See README and LICENSE files in this directory for more information. + */ + +/*-------------------------------------------------------------*/ +/*--- Library top-level functions. ---*/ +/*--- bzlib.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ +This file is part of bzip2/libbzip2, a program and library for +lossless, block-sorting data compression. + +bzip2/libbzip2 version 1.0.4 of 20 December 2006 +Copyright (C) 1996-2006 Julian Seward + +Please read the WARNING, DISCLAIMER and PATENTS sections in the +README file. + +This program is released under the terms of the license contained +in the file LICENSE. +------------------------------------------------------------------ */ + +/* CHANGES + * 0.9.0 -- original version. + * 0.9.0a/b -- no changes in this file. + * 0.9.0c -- made zero-length BZ_FLUSH work correctly in bzCompress(). + * fixed bzWrite/bzRead to ignore zero-length requests. + * fixed bzread to correctly handle read requests after EOF. + * wrong parameter order in call to bzDecompressInit in + * bzBuffToBuffDecompress. Fixed. + */ + +/* #include "bzlib_private.h" */ + +/*---------------------------------------------------*/ +/*--- Compression stuff ---*/ +/*---------------------------------------------------*/ + +/*---------------------------------------------------*/ +#if BZ_LIGHT_DEBUG +static +void bz_assert_fail(int errcode) +{ + /* if (errcode == 1007) bb_error_msg_and_die("probably bad RAM"); */ + bb_error_msg_and_die("internal error %d", errcode); +} +#endif + +/*---------------------------------------------------*/ +static +void prepare_new_block(EState* s) +{ + int i; + s->nblock = 0; + s->numZ = 0; + s->state_out_pos = 0; + BZ_INITIALISE_CRC(s->blockCRC); + /* inlined memset would be nice to have here */ + for (i = 0; i < 256; i++) + s->inUse[i] = 0; + s->blockNo++; +} + + +/*---------------------------------------------------*/ +static +ALWAYS_INLINE +void init_RL(EState* s) +{ + s->state_in_ch = 256; + s->state_in_len = 0; +} + + +static +int isempty_RL(EState* s) +{ + return (s->state_in_ch >= 256 || s->state_in_len <= 0); +} + + +/*---------------------------------------------------*/ +static +void BZ2_bzCompressInit(bz_stream *strm, int blockSize100k) +{ + int32_t n; + EState* s; + + s = xzalloc(sizeof(EState)); + s->strm = strm; + + n = 100000 * blockSize100k; + s->arr1 = xmalloc(n * sizeof(uint32_t)); + s->mtfv = (uint16_t*)s->arr1; + s->ptr = (uint32_t*)s->arr1; + s->arr2 = xmalloc((n + BZ_N_OVERSHOOT) * sizeof(uint32_t)); + s->block = (uint8_t*)s->arr2; + s->ftab = xmalloc(65537 * sizeof(uint32_t)); + + s->crc32table = crc32_filltable(NULL, 1); + + s->state = BZ_S_INPUT; + s->mode = BZ_M_RUNNING; + s->blockSize100k = blockSize100k; + s->nblockMAX = n - 19; + + strm->state = s; + /*strm->total_in = 0;*/ + strm->total_out = 0; + init_RL(s); + prepare_new_block(s); +} + + +/*---------------------------------------------------*/ +static +void add_pair_to_block(EState* s) +{ + int32_t i; + uint8_t ch = (uint8_t)(s->state_in_ch); + for (i = 0; i < s->state_in_len; i++) { + BZ_UPDATE_CRC(s, s->blockCRC, ch); + } + s->inUse[s->state_in_ch] = 1; + switch (s->state_in_len) { + case 3: + s->block[s->nblock] = (uint8_t)ch; s->nblock++; + /* fall through */ + case 2: + s->block[s->nblock] = (uint8_t)ch; s->nblock++; + /* fall through */ + case 1: + s->block[s->nblock] = (uint8_t)ch; s->nblock++; + break; + default: + s->inUse[s->state_in_len - 4] = 1; + s->block[s->nblock] = (uint8_t)ch; s->nblock++; + s->block[s->nblock] = (uint8_t)ch; s->nblock++; + s->block[s->nblock] = (uint8_t)ch; s->nblock++; + s->block[s->nblock] = (uint8_t)ch; s->nblock++; + s->block[s->nblock] = (uint8_t)(s->state_in_len - 4); + s->nblock++; + break; + } +} + + +/*---------------------------------------------------*/ +static +void flush_RL(EState* s) +{ + if (s->state_in_ch < 256) add_pair_to_block(s); + init_RL(s); +} + + +/*---------------------------------------------------*/ +#define ADD_CHAR_TO_BLOCK(zs, zchh0) \ +{ \ + uint32_t zchh = (uint32_t)(zchh0); \ + /*-- fast track the common case --*/ \ + if (zchh != zs->state_in_ch && zs->state_in_len == 1) { \ + uint8_t ch = (uint8_t)(zs->state_in_ch); \ + BZ_UPDATE_CRC(zs, zs->blockCRC, ch); \ + zs->inUse[zs->state_in_ch] = 1; \ + zs->block[zs->nblock] = (uint8_t)ch; \ + zs->nblock++; \ + zs->state_in_ch = zchh; \ + } \ + else \ + /*-- general, uncommon cases --*/ \ + if (zchh != zs->state_in_ch || zs->state_in_len == 255) { \ + if (zs->state_in_ch < 256) \ + add_pair_to_block(zs); \ + zs->state_in_ch = zchh; \ + zs->state_in_len = 1; \ + } else { \ + zs->state_in_len++; \ + } \ +} + + +/*---------------------------------------------------*/ +static +void /*Bool*/ copy_input_until_stop(EState* s) +{ + /*Bool progress_in = False;*/ + +#ifdef SAME_CODE_AS_BELOW + if (s->mode == BZ_M_RUNNING) { + /*-- fast track the common case --*/ + while (1) { + /*-- no input? --*/ + if (s->strm->avail_in == 0) break; + /*-- block full? --*/ + if (s->nblock >= s->nblockMAX) break; + /*progress_in = True;*/ + ADD_CHAR_TO_BLOCK(s, (uint32_t)(*(uint8_t*)(s->strm->next_in))); + s->strm->next_in++; + s->strm->avail_in--; + /*s->strm->total_in++;*/ + } + } else +#endif + { + /*-- general, uncommon case --*/ + while (1) { + /*-- no input? --*/ + if (s->strm->avail_in == 0) break; + /*-- block full? --*/ + if (s->nblock >= s->nblockMAX) break; + //# /*-- flush/finish end? --*/ + //# if (s->avail_in_expect == 0) break; + /*progress_in = True;*/ + ADD_CHAR_TO_BLOCK(s, *(uint8_t*)(s->strm->next_in)); + s->strm->next_in++; + s->strm->avail_in--; + /*s->strm->total_in++;*/ + //# s->avail_in_expect--; + } + } + /*return progress_in;*/ +} + + +/*---------------------------------------------------*/ +static +void /*Bool*/ copy_output_until_stop(EState* s) +{ + /*Bool progress_out = False;*/ + + while (1) { + /*-- no output space? --*/ + if (s->strm->avail_out == 0) break; + + /*-- block done? --*/ + if (s->state_out_pos >= s->numZ) break; + + /*progress_out = True;*/ + *(s->strm->next_out) = s->zbits[s->state_out_pos]; + s->state_out_pos++; + s->strm->avail_out--; + s->strm->next_out++; + s->strm->total_out++; + } + /*return progress_out;*/ +} + + +/*---------------------------------------------------*/ +static +void /*Bool*/ handle_compress(bz_stream *strm) +{ + /*Bool progress_in = False;*/ + /*Bool progress_out = False;*/ + EState* s = strm->state; + + while (1) { + if (s->state == BZ_S_OUTPUT) { + /*progress_out |=*/ copy_output_until_stop(s); + if (s->state_out_pos < s->numZ) break; + if (s->mode == BZ_M_FINISHING + //# && s->avail_in_expect == 0 + && s->strm->avail_in == 0 + && isempty_RL(s)) + break; + prepare_new_block(s); + s->state = BZ_S_INPUT; +#ifdef FLUSH_IS_UNUSED + if (s->mode == BZ_M_FLUSHING + && s->avail_in_expect == 0 + && isempty_RL(s)) + break; +#endif + } + + if (s->state == BZ_S_INPUT) { + /*progress_in |=*/ copy_input_until_stop(s); + //#if (s->mode != BZ_M_RUNNING && s->avail_in_expect == 0) { + if (s->mode != BZ_M_RUNNING && s->strm->avail_in == 0) { + flush_RL(s); + BZ2_compressBlock(s, (s->mode == BZ_M_FINISHING)); + s->state = BZ_S_OUTPUT; + } else + if (s->nblock >= s->nblockMAX) { + BZ2_compressBlock(s, 0); + s->state = BZ_S_OUTPUT; + } else + if (s->strm->avail_in == 0) { + break; + } + } + } + + /*return progress_in || progress_out;*/ +} + + +/*---------------------------------------------------*/ +static +int BZ2_bzCompress(bz_stream *strm, int action) +{ + /*Bool progress;*/ + EState* s; + + s = strm->state; + + switch (s->mode) { + case BZ_M_RUNNING: + if (action == BZ_RUN) { + /*progress =*/ handle_compress(strm); + /*return progress ? BZ_RUN_OK : BZ_PARAM_ERROR;*/ + return BZ_RUN_OK; + } +#ifdef FLUSH_IS_UNUSED + else + if (action == BZ_FLUSH) { + //#s->avail_in_expect = strm->avail_in; + s->mode = BZ_M_FLUSHING; + goto case_BZ_M_FLUSHING; + } +#endif + else + /*if (action == BZ_FINISH)*/ { + //#s->avail_in_expect = strm->avail_in; + s->mode = BZ_M_FINISHING; + goto case_BZ_M_FINISHING; + } + +#ifdef FLUSH_IS_UNUSED + case_BZ_M_FLUSHING: + case BZ_M_FLUSHING: + /*if (s->avail_in_expect != s->strm->avail_in) + return BZ_SEQUENCE_ERROR;*/ + /*progress =*/ handle_compress(strm); + if (s->avail_in_expect > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ) + return BZ_FLUSH_OK; + s->mode = BZ_M_RUNNING; + return BZ_RUN_OK; +#endif + + case_BZ_M_FINISHING: + /*case BZ_M_FINISHING:*/ + default: + /*if (s->avail_in_expect != s->strm->avail_in) + return BZ_SEQUENCE_ERROR;*/ + /*progress =*/ handle_compress(strm); + /*if (!progress) return BZ_SEQUENCE_ERROR;*/ + //#if (s->avail_in_expect > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ) + //# return BZ_FINISH_OK; + if (s->strm->avail_in > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ) + return BZ_FINISH_OK; + /*s->mode = BZ_M_IDLE;*/ + return BZ_STREAM_END; + } + /* return BZ_OK; --not reached--*/ +} + + +/*---------------------------------------------------*/ +static +void BZ2_bzCompressEnd(bz_stream *strm) +{ + EState* s; + + s = strm->state; + free(s->arr1); + free(s->arr2); + free(s->ftab); + free(s->crc32table); + free(s); +} + + +/*---------------------------------------------------*/ +/*--- Misc convenience stuff ---*/ +/*---------------------------------------------------*/ + +/*---------------------------------------------------*/ +#ifdef EXAMPLE_CODE_FOR_MEM_TO_MEM_COMPRESSION +static +int BZ2_bzBuffToBuffCompress(char* dest, + unsigned int* destLen, + char* source, + unsigned int sourceLen, + int blockSize100k) +{ + bz_stream strm; + int ret; + + if (dest == NULL || destLen == NULL + || source == NULL + || blockSize100k < 1 || blockSize100k > 9 + ) { + return BZ_PARAM_ERROR; + } + + BZ2_bzCompressInit(&strm, blockSize100k); + + strm.next_in = source; + strm.next_out = dest; + strm.avail_in = sourceLen; + strm.avail_out = *destLen; + + ret = BZ2_bzCompress(&strm, BZ_FINISH); + if (ret == BZ_FINISH_OK) goto output_overflow; + if (ret != BZ_STREAM_END) goto errhandler; + + /* normal termination */ + *destLen -= strm.avail_out; + BZ2_bzCompressEnd(&strm); + return BZ_OK; + + output_overflow: + BZ2_bzCompressEnd(&strm); + return BZ_OUTBUFF_FULL; + + errhandler: + BZ2_bzCompressEnd(&strm); + return ret; +} +#endif + +/*-------------------------------------------------------------*/ +/*--- end bzlib.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/probe-busybox/archival/libarchive/bz/bzlib.h b/probe-busybox/archival/libarchive/bz/bzlib.h new file mode 100644 index 00000000..1bb811c4 --- /dev/null +++ b/probe-busybox/archival/libarchive/bz/bzlib.h @@ -0,0 +1,65 @@ +/* + * bzip2 is written by Julian Seward . + * Adapted for busybox by Denys Vlasenko . + * See README and LICENSE files in this directory for more information. + */ + +/*-------------------------------------------------------------*/ +/*--- Public header file for the library. ---*/ +/*--- bzlib.h ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ +This file is part of bzip2/libbzip2, a program and library for +lossless, block-sorting data compression. + +bzip2/libbzip2 version 1.0.4 of 20 December 2006 +Copyright (C) 1996-2006 Julian Seward + +Please read the WARNING, DISCLAIMER and PATENTS sections in the +README file. + +This program is released under the terms of the license contained +in the file LICENSE. +------------------------------------------------------------------ */ + +#define BZ_RUN 0 +#define BZ_FLUSH 1 +#define BZ_FINISH 2 + +#define BZ_OK 0 +#define BZ_RUN_OK 1 +#define BZ_FLUSH_OK 2 +#define BZ_FINISH_OK 3 +#define BZ_STREAM_END 4 +#define BZ_SEQUENCE_ERROR (-1) +#define BZ_PARAM_ERROR (-2) +#define BZ_MEM_ERROR (-3) +#define BZ_DATA_ERROR (-4) +#define BZ_DATA_ERROR_MAGIC (-5) +#define BZ_IO_ERROR (-6) +#define BZ_UNEXPECTED_EOF (-7) +#define BZ_OUTBUFF_FULL (-8) +#define BZ_CONFIG_ERROR (-9) + +typedef struct bz_stream { + void *state; + char *next_in; + char *next_out; + unsigned avail_in; + unsigned avail_out; + /*unsigned long long total_in;*/ + unsigned long long total_out; +} bz_stream; + +/*-- Core (low-level) library functions --*/ + +static void BZ2_bzCompressInit(bz_stream *strm, int blockSize100k); +static int BZ2_bzCompress(bz_stream *strm, int action); +#if ENABLE_FEATURE_CLEAN_UP +static void BZ2_bzCompressEnd(bz_stream *strm); +#endif + +/*-------------------------------------------------------------*/ +/*--- end bzlib.h ---*/ +/*-------------------------------------------------------------*/ diff --git a/probe-busybox/archival/libarchive/bz/bzlib_private.h b/probe-busybox/archival/libarchive/bz/bzlib_private.h new file mode 100644 index 00000000..43e674be --- /dev/null +++ b/probe-busybox/archival/libarchive/bz/bzlib_private.h @@ -0,0 +1,219 @@ +/* + * bzip2 is written by Julian Seward . + * Adapted for busybox by Denys Vlasenko . + * See README and LICENSE files in this directory for more information. + */ + +/*-------------------------------------------------------------*/ +/*--- Private header file for the library. ---*/ +/*--- bzlib_private.h ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ +This file is part of bzip2/libbzip2, a program and library for +lossless, block-sorting data compression. + +bzip2/libbzip2 version 1.0.4 of 20 December 2006 +Copyright (C) 1996-2006 Julian Seward + +Please read the WARNING, DISCLAIMER and PATENTS sections in the +README file. + +This program is released under the terms of the license contained +in the file LICENSE. +------------------------------------------------------------------ */ + +/* #include "bzlib.h" */ + +/*-- General stuff. --*/ + +typedef unsigned char Bool; + +#define True ((Bool)1) +#define False ((Bool)0) + +#if BZ_LIGHT_DEBUG +static void bz_assert_fail(int errcode) NORETURN; +#define AssertH(cond, errcode) \ +do { \ + if (!(cond)) \ + bz_assert_fail(errcode); \ +} while (0) +#else +#define AssertH(cond, msg) do { } while (0) +#endif + +#if BZ_DEBUG +#define AssertD(cond, msg) \ +do { \ + if (!(cond)) \ + bb_error_msg_and_die("(debug build): internal error %s", msg); \ +} while (0) +#else +#define AssertD(cond, msg) do { } while (0) +#endif + + +/*-- Header bytes. --*/ + +#define BZ_HDR_B 0x42 /* 'B' */ +#define BZ_HDR_Z 0x5a /* 'Z' */ +#define BZ_HDR_h 0x68 /* 'h' */ +#define BZ_HDR_0 0x30 /* '0' */ + +#define BZ_HDR_BZh0 0x425a6830 + +/*-- Constants for the back end. --*/ + +#define BZ_MAX_ALPHA_SIZE 258 +#define BZ_MAX_CODE_LEN 23 + +#define BZ_RUNA 0 +#define BZ_RUNB 1 + +#define BZ_N_GROUPS 6 +#define BZ_G_SIZE 50 +#define BZ_N_ITERS 4 + +#define BZ_MAX_SELECTORS (2 + (900000 / BZ_G_SIZE)) + + +/*-- Stuff for doing CRCs. --*/ + +#define BZ_INITIALISE_CRC(crcVar) \ +{ \ + crcVar = 0xffffffffL; \ +} + +#define BZ_FINALISE_CRC(crcVar) \ +{ \ + crcVar = ~(crcVar); \ +} + +#define BZ_UPDATE_CRC(s, crcVar, cha) \ +{ \ + crcVar = (crcVar << 8) ^ s->crc32table[(crcVar >> 24) ^ ((uint8_t)cha)]; \ +} + + +/*-- States and modes for compression. --*/ + +#define BZ_M_IDLE 1 +#define BZ_M_RUNNING 2 +#define BZ_M_FLUSHING 3 +#define BZ_M_FINISHING 4 + +#define BZ_S_OUTPUT 1 +#define BZ_S_INPUT 2 + +#define BZ_N_RADIX 2 +#define BZ_N_QSORT 12 +#define BZ_N_SHELL 18 +#define BZ_N_OVERSHOOT (BZ_N_RADIX + BZ_N_QSORT + BZ_N_SHELL + 2) + + +/*-- Structure holding all the compression-side stuff. --*/ + +typedef struct EState { + /* pointer back to the struct bz_stream */ + bz_stream *strm; + + /* mode this stream is in, and whether inputting */ + /* or outputting data */ + int32_t mode; + int32_t state; + + /* remembers avail_in when flush/finish requested */ +/* bbox: not needed, strm->avail_in always has the same value */ +/* commented out with '//#' throughout the code */ + /* uint32_t avail_in_expect; */ + + /* for doing the block sorting */ + int32_t origPtr; + uint32_t *arr1; + uint32_t *arr2; + uint32_t *ftab; + + /* aliases for arr1 and arr2 */ + uint32_t *ptr; + uint8_t *block; + uint16_t *mtfv; + uint8_t *zbits; + + /* guess what */ + uint32_t *crc32table; + + /* run-length-encoding of the input */ + uint32_t state_in_ch; + int32_t state_in_len; + + /* input and output limits and current posns */ + int32_t nblock; + int32_t nblockMAX; + int32_t numZ; + int32_t state_out_pos; + + /* the buffer for bit stream creation */ + uint32_t bsBuff; + int32_t bsLive; + + /* block and combined CRCs */ + uint32_t blockCRC; + uint32_t combinedCRC; + + /* misc administratium */ + int32_t blockNo; + int32_t blockSize100k; + + /* stuff for coding the MTF values */ + int32_t nMTF; + + /* map of bytes used in block */ + int32_t nInUse; + Bool inUse[256] ALIGNED(sizeof(long)); + uint8_t unseqToSeq[256]; + + /* stuff for coding the MTF values */ + int32_t mtfFreq [BZ_MAX_ALPHA_SIZE]; + uint8_t selector [BZ_MAX_SELECTORS]; + uint8_t selectorMtf[BZ_MAX_SELECTORS]; + + uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + + /* stack-saving measures: these can be local, but they are too big */ + int32_t sendMTFValues__code [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + int32_t sendMTFValues__rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; +#if CONFIG_BZIP2_FAST >= 5 + /* second dimension: only 3 needed; 4 makes index calculations faster */ + uint32_t sendMTFValues__len_pack[BZ_MAX_ALPHA_SIZE][4]; +#endif + int32_t BZ2_hbMakeCodeLengths__heap [BZ_MAX_ALPHA_SIZE + 2]; + int32_t BZ2_hbMakeCodeLengths__weight[BZ_MAX_ALPHA_SIZE * 2]; + int32_t BZ2_hbMakeCodeLengths__parent[BZ_MAX_ALPHA_SIZE * 2]; + + int32_t mainSort__runningOrder[256]; + int32_t mainSort__copyStart[256]; + int32_t mainSort__copyEnd[256]; +} EState; + + +/*-- compression. --*/ + +static void +BZ2_blockSort(EState*); + +static void +BZ2_compressBlock(EState*, int); + +static void +BZ2_bsInitWrite(EState*); + +static void +BZ2_hbAssignCodes(int32_t*, uint8_t*, int32_t, int32_t, int32_t); + +static void +BZ2_hbMakeCodeLengths(EState*, uint8_t*, int32_t*, int32_t, int32_t); + +/*-------------------------------------------------------------*/ +/*--- end bzlib_private.h ---*/ +/*-------------------------------------------------------------*/ diff --git a/probe-busybox/archival/libarchive/bz/compress.c b/probe-busybox/archival/libarchive/bz/compress.c new file mode 100644 index 00000000..2d994685 --- /dev/null +++ b/probe-busybox/archival/libarchive/bz/compress.c @@ -0,0 +1,677 @@ +/* + * bzip2 is written by Julian Seward . + * Adapted for busybox by Denys Vlasenko . + * See README and LICENSE files in this directory for more information. + */ + +/*-------------------------------------------------------------*/ +/*--- Compression machinery (not incl block sorting) ---*/ +/*--- compress.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ +This file is part of bzip2/libbzip2, a program and library for +lossless, block-sorting data compression. + +bzip2/libbzip2 version 1.0.4 of 20 December 2006 +Copyright (C) 1996-2006 Julian Seward + +Please read the WARNING, DISCLAIMER and PATENTS sections in the +README file. + +This program is released under the terms of the license contained +in the file LICENSE. +------------------------------------------------------------------ */ + +/* CHANGES + * 0.9.0 -- original version. + * 0.9.0a/b -- no changes in this file. + * 0.9.0c -- changed setting of nGroups in sendMTFValues() + * so as to do a bit better on small files +*/ + +/* #include "bzlib_private.h" */ + +/*---------------------------------------------------*/ +/*--- Bit stream I/O ---*/ +/*---------------------------------------------------*/ + +/*---------------------------------------------------*/ +static +void BZ2_bsInitWrite(EState* s) +{ + s->bsLive = 0; + s->bsBuff = 0; +} + + +/*---------------------------------------------------*/ +static NOINLINE +void bsFinishWrite(EState* s) +{ + while (s->bsLive > 0) { + s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24); + s->numZ++; + s->bsBuff <<= 8; + s->bsLive -= 8; + } +} + + +/*---------------------------------------------------*/ +static +/* Helps only on level 5, on other levels hurts. ? */ +#if CONFIG_BZIP2_FAST >= 5 +ALWAYS_INLINE +#endif +void bsW(EState* s, int32_t n, uint32_t v) +{ + while (s->bsLive >= 8) { + s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24); + s->numZ++; + s->bsBuff <<= 8; + s->bsLive -= 8; + } + s->bsBuff |= (v << (32 - s->bsLive - n)); + s->bsLive += n; +} + + +/*---------------------------------------------------*/ +static +void bsPutU32(EState* s, unsigned u) +{ + bsW(s, 8, (u >> 24) & 0xff); + bsW(s, 8, (u >> 16) & 0xff); + bsW(s, 8, (u >> 8) & 0xff); + bsW(s, 8, u & 0xff); +} + + +/*---------------------------------------------------*/ +static +void bsPutU16(EState* s, unsigned u) +{ + bsW(s, 8, (u >> 8) & 0xff); + bsW(s, 8, u & 0xff); +} + + +/*---------------------------------------------------*/ +/*--- The back end proper ---*/ +/*---------------------------------------------------*/ + +/*---------------------------------------------------*/ +static +void makeMaps_e(EState* s) +{ + int i; + s->nInUse = 0; + for (i = 0; i < 256; i++) { + if (s->inUse[i]) { + s->unseqToSeq[i] = s->nInUse; + s->nInUse++; + } + } +} + + +/*---------------------------------------------------*/ +static NOINLINE +void generateMTFValues(EState* s) +{ + uint8_t yy[256]; + int32_t i, j; + int32_t zPend; + int32_t wr; + int32_t EOB; + + /* + * After sorting (eg, here), + * s->arr1[0 .. s->nblock-1] holds sorted order, + * and + * ((uint8_t*)s->arr2)[0 .. s->nblock-1] + * holds the original block data. + * + * The first thing to do is generate the MTF values, + * and put them in ((uint16_t*)s->arr1)[0 .. s->nblock-1]. + * + * Because there are strictly fewer or equal MTF values + * than block values, ptr values in this area are overwritten + * with MTF values only when they are no longer needed. + * + * The final compressed bitstream is generated into the + * area starting at &((uint8_t*)s->arr2)[s->nblock] + * + * These storage aliases are set up in bzCompressInit(), + * except for the last one, which is arranged in + * compressBlock(). + */ + uint32_t* ptr = s->ptr; + uint8_t* block = s->block; + uint16_t* mtfv = s->mtfv; + + makeMaps_e(s); + EOB = s->nInUse+1; + + for (i = 0; i <= EOB; i++) + s->mtfFreq[i] = 0; + + wr = 0; + zPend = 0; + for (i = 0; i < s->nInUse; i++) + yy[i] = (uint8_t) i; + + for (i = 0; i < s->nblock; i++) { + uint8_t ll_i; + AssertD(wr <= i, "generateMTFValues(1)"); + j = ptr[i] - 1; + if (j < 0) + j += s->nblock; + ll_i = s->unseqToSeq[block[j]]; + AssertD(ll_i < s->nInUse, "generateMTFValues(2a)"); + + if (yy[0] == ll_i) { + zPend++; + } else { + if (zPend > 0) { + zPend--; + while (1) { + if (zPend & 1) { + mtfv[wr] = BZ_RUNB; wr++; + s->mtfFreq[BZ_RUNB]++; + } else { + mtfv[wr] = BZ_RUNA; wr++; + s->mtfFreq[BZ_RUNA]++; + } + if (zPend < 2) break; + zPend = (uint32_t)(zPend - 2) / 2; + /* bbox: unsigned div is easier */ + }; + zPend = 0; + } + { + register uint8_t rtmp; + register uint8_t* ryy_j; + register uint8_t rll_i; + rtmp = yy[1]; + yy[1] = yy[0]; + ryy_j = &(yy[1]); + rll_i = ll_i; + while (rll_i != rtmp) { + register uint8_t rtmp2; + ryy_j++; + rtmp2 = rtmp; + rtmp = *ryy_j; + *ryy_j = rtmp2; + }; + yy[0] = rtmp; + j = ryy_j - &(yy[0]); + mtfv[wr] = j+1; + wr++; + s->mtfFreq[j+1]++; + } + } + } + + if (zPend > 0) { + zPend--; + while (1) { + if (zPend & 1) { + mtfv[wr] = BZ_RUNB; + wr++; + s->mtfFreq[BZ_RUNB]++; + } else { + mtfv[wr] = BZ_RUNA; + wr++; + s->mtfFreq[BZ_RUNA]++; + } + if (zPend < 2) + break; + zPend = (uint32_t)(zPend - 2) / 2; + /* bbox: unsigned div is easier */ + }; + zPend = 0; + } + + mtfv[wr] = EOB; + wr++; + s->mtfFreq[EOB]++; + + s->nMTF = wr; +} + + +/*---------------------------------------------------*/ +#define BZ_LESSER_ICOST 0 +#define BZ_GREATER_ICOST 15 + +static NOINLINE +void sendMTFValues(EState* s) +{ + int32_t v, t, i, j, gs, ge, bt, bc, iter; + int32_t nSelectors, alphaSize, minLen, maxLen, selCtr; + int32_t nGroups; + + /* + * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + * is a global since the decoder also needs it. + * + * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; + * are also globals only used in this proc. + * Made global to keep stack frame size small. + */ +#define code sendMTFValues__code +#define rfreq sendMTFValues__rfreq +#define len_pack sendMTFValues__len_pack + + uint16_t cost[BZ_N_GROUPS]; + int32_t fave[BZ_N_GROUPS]; + + uint16_t* mtfv = s->mtfv; + + alphaSize = s->nInUse + 2; + for (t = 0; t < BZ_N_GROUPS; t++) + for (v = 0; v < alphaSize; v++) + s->len[t][v] = BZ_GREATER_ICOST; + + /*--- Decide how many coding tables to use ---*/ + AssertH(s->nMTF > 0, 3001); + if (s->nMTF < 200) nGroups = 2; else + if (s->nMTF < 600) nGroups = 3; else + if (s->nMTF < 1200) nGroups = 4; else + if (s->nMTF < 2400) nGroups = 5; else + nGroups = 6; + + /*--- Generate an initial set of coding tables ---*/ + { + int32_t nPart, remF, tFreq, aFreq; + + nPart = nGroups; + remF = s->nMTF; + gs = 0; + while (nPart > 0) { + tFreq = remF / nPart; + ge = gs - 1; + aFreq = 0; + while (aFreq < tFreq && ge < alphaSize-1) { + ge++; + aFreq += s->mtfFreq[ge]; + } + + if (ge > gs + && nPart != nGroups && nPart != 1 + && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */ + ) { + aFreq -= s->mtfFreq[ge]; + ge--; + } + + for (v = 0; v < alphaSize; v++) + if (v >= gs && v <= ge) + s->len[nPart-1][v] = BZ_LESSER_ICOST; + else + s->len[nPart-1][v] = BZ_GREATER_ICOST; + + nPart--; + gs = ge + 1; + remF -= aFreq; + } + } + + /* + * Iterate up to BZ_N_ITERS times to improve the tables. + */ + for (iter = 0; iter < BZ_N_ITERS; iter++) { + for (t = 0; t < nGroups; t++) + fave[t] = 0; + + for (t = 0; t < nGroups; t++) + for (v = 0; v < alphaSize; v++) + s->rfreq[t][v] = 0; + +#if CONFIG_BZIP2_FAST >= 5 + /* + * Set up an auxiliary length table which is used to fast-track + * the common case (nGroups == 6). + */ + if (nGroups == 6) { + for (v = 0; v < alphaSize; v++) { + s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v]; + s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v]; + s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v]; + } + } +#endif + nSelectors = 0; + gs = 0; + while (1) { + /*--- Set group start & end marks. --*/ + if (gs >= s->nMTF) + break; + ge = gs + BZ_G_SIZE - 1; + if (ge >= s->nMTF) + ge = s->nMTF-1; + + /* + * Calculate the cost of this group as coded + * by each of the coding tables. + */ + for (t = 0; t < nGroups; t++) + cost[t] = 0; +#if CONFIG_BZIP2_FAST >= 5 + if (nGroups == 6 && 50 == ge-gs+1) { + /*--- fast track the common case ---*/ + register uint32_t cost01, cost23, cost45; + register uint16_t icv; + cost01 = cost23 = cost45 = 0; +#define BZ_ITER(nn) \ + icv = mtfv[gs+(nn)]; \ + cost01 += s->len_pack[icv][0]; \ + cost23 += s->len_pack[icv][1]; \ + cost45 += s->len_pack[icv][2]; + BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4); + BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9); + BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14); + BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19); + BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24); + BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29); + BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34); + BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39); + BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44); + BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49); +#undef BZ_ITER + cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16; + cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16; + cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16; + } else +#endif + { + /*--- slow version which correctly handles all situations ---*/ + for (i = gs; i <= ge; i++) { + uint16_t icv = mtfv[i]; + for (t = 0; t < nGroups; t++) + cost[t] += s->len[t][icv]; + } + } + /* + * Find the coding table which is best for this group, + * and record its identity in the selector table. + */ + /*bc = 999999999;*/ + /*bt = -1;*/ + bc = cost[0]; + bt = 0; + for (t = 1 /*0*/; t < nGroups; t++) { + if (cost[t] < bc) { + bc = cost[t]; + bt = t; + } + } + fave[bt]++; + s->selector[nSelectors] = bt; + nSelectors++; + + /* + * Increment the symbol frequencies for the selected table. + */ +/* 1% faster compress. +800 bytes */ +#if CONFIG_BZIP2_FAST >= 4 + if (nGroups == 6 && 50 == ge-gs+1) { + /*--- fast track the common case ---*/ +#define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++ + BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4); + BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9); + BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14); + BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19); + BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24); + BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29); + BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34); + BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39); + BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44); + BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49); +#undef BZ_ITUR + gs = ge + 1; + } else +#endif + { + /*--- slow version which correctly handles all situations ---*/ + while (gs <= ge) { + s->rfreq[bt][mtfv[gs]]++; + gs++; + } + /* already is: gs = ge + 1; */ + } + } + + /* + * Recompute the tables based on the accumulated frequencies. + */ + /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See + * comment in huffman.c for details. */ + for (t = 0; t < nGroups; t++) + BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/); + } + + AssertH(nGroups < 8, 3002); + AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003); + + /*--- Compute MTF values for the selectors. ---*/ + { + uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp; + + for (i = 0; i < nGroups; i++) + pos[i] = i; + for (i = 0; i < nSelectors; i++) { + ll_i = s->selector[i]; + j = 0; + tmp = pos[j]; + while (ll_i != tmp) { + j++; + tmp2 = tmp; + tmp = pos[j]; + pos[j] = tmp2; + }; + pos[0] = tmp; + s->selectorMtf[i] = j; + } + }; + + /*--- Assign actual codes for the tables. --*/ + for (t = 0; t < nGroups; t++) { + minLen = 32; + maxLen = 0; + for (i = 0; i < alphaSize; i++) { + if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; + if (s->len[t][i] < minLen) minLen = s->len[t][i]; + } + AssertH(!(maxLen > 17 /*20*/), 3004); + AssertH(!(minLen < 1), 3005); + BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize); + } + + /*--- Transmit the mapping table. ---*/ + { + /* bbox: optimized a bit more than in bzip2 */ + int inUse16 = 0; + for (i = 0; i < 16; i++) { + if (sizeof(long) <= 4) { + inUse16 = inUse16*2 + + ((*(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 0]) + | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 4]) + | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 8]) + | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 12])) != 0); + } else { /* Our CPU can do better */ + inUse16 = inUse16*2 + + ((*(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 0]) + | *(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 8])) != 0); + } + } + + bsW(s, 16, inUse16); + + inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */ + for (i = 0; i < 16; i++) { + if (inUse16 < 0) { + unsigned v16 = 0; + for (j = 0; j < 16; j++) + v16 = v16*2 + s->inUse[i * 16 + j]; + bsW(s, 16, v16); + } + inUse16 <<= 1; + } + } + + /*--- Now the selectors. ---*/ + bsW(s, 3, nGroups); + bsW(s, 15, nSelectors); + for (i = 0; i < nSelectors; i++) { + for (j = 0; j < s->selectorMtf[i]; j++) + bsW(s, 1, 1); + bsW(s, 1, 0); + } + + /*--- Now the coding tables. ---*/ + for (t = 0; t < nGroups; t++) { + int32_t curr = s->len[t][0]; + bsW(s, 5, curr); + for (i = 0; i < alphaSize; i++) { + while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ }; + while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ }; + bsW(s, 1, 0); + } + } + + /*--- And finally, the block data proper ---*/ + selCtr = 0; + gs = 0; + while (1) { + if (gs >= s->nMTF) + break; + ge = gs + BZ_G_SIZE - 1; + if (ge >= s->nMTF) + ge = s->nMTF-1; + AssertH(s->selector[selCtr] < nGroups, 3006); + +/* Costs 1300 bytes and is _slower_ (on Intel Core 2) */ +#if 0 + if (nGroups == 6 && 50 == ge-gs+1) { + /*--- fast track the common case ---*/ + uint16_t mtfv_i; + uint8_t* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]); + int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]); +#define BZ_ITAH(nn) \ + mtfv_i = mtfv[gs+(nn)]; \ + bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i]) + BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4); + BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9); + BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14); + BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19); + BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24); + BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29); + BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34); + BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39); + BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44); + BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49); +#undef BZ_ITAH + gs = ge+1; + } else +#endif + { + /*--- slow version which correctly handles all situations ---*/ + /* code is bit bigger, but moves multiply out of the loop */ + uint8_t* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]); + int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]); + while (gs <= ge) { + bsW(s, + s_len_sel_selCtr[mtfv[gs]], + s_code_sel_selCtr[mtfv[gs]] + ); + gs++; + } + /* already is: gs = ge+1; */ + } + selCtr++; + } + AssertH(selCtr == nSelectors, 3007); +#undef code +#undef rfreq +#undef len_pack +} + + +/*---------------------------------------------------*/ +static +void BZ2_compressBlock(EState* s, int is_last_block) +{ + if (s->nblock > 0) { + BZ_FINALISE_CRC(s->blockCRC); + s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31); + s->combinedCRC ^= s->blockCRC; + if (s->blockNo > 1) + s->numZ = 0; + + BZ2_blockSort(s); + } + + s->zbits = &((uint8_t*)s->arr2)[s->nblock]; + + /*-- If this is the first block, create the stream header. --*/ + if (s->blockNo == 1) { + BZ2_bsInitWrite(s); + /*bsPutU8(s, BZ_HDR_B);*/ + /*bsPutU8(s, BZ_HDR_Z);*/ + /*bsPutU8(s, BZ_HDR_h);*/ + /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/ + bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k); + } + + if (s->nblock > 0) { + /*bsPutU8(s, 0x31);*/ + /*bsPutU8(s, 0x41);*/ + /*bsPutU8(s, 0x59);*/ + /*bsPutU8(s, 0x26);*/ + bsPutU32(s, 0x31415926); + /*bsPutU8(s, 0x53);*/ + /*bsPutU8(s, 0x59);*/ + bsPutU16(s, 0x5359); + + /*-- Now the block's CRC, so it is in a known place. --*/ + bsPutU32(s, s->blockCRC); + + /* + * Now a single bit indicating (non-)randomisation. + * As of version 0.9.5, we use a better sorting algorithm + * which makes randomisation unnecessary. So always set + * the randomised bit to 'no'. Of course, the decoder + * still needs to be able to handle randomised blocks + * so as to maintain backwards compatibility with + * older versions of bzip2. + */ + bsW(s, 1, 0); + + bsW(s, 24, s->origPtr); + generateMTFValues(s); + sendMTFValues(s); + } + + /*-- If this is the last block, add the stream trailer. --*/ + if (is_last_block) { + /*bsPutU8(s, 0x17);*/ + /*bsPutU8(s, 0x72);*/ + /*bsPutU8(s, 0x45);*/ + /*bsPutU8(s, 0x38);*/ + bsPutU32(s, 0x17724538); + /*bsPutU8(s, 0x50);*/ + /*bsPutU8(s, 0x90);*/ + bsPutU16(s, 0x5090); + bsPutU32(s, s->combinedCRC); + bsFinishWrite(s); + } +} + + +/*-------------------------------------------------------------*/ +/*--- end compress.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/probe-busybox/archival/libarchive/bz/huffman.c b/probe-busybox/archival/libarchive/bz/huffman.c new file mode 100644 index 00000000..bbec11ad --- /dev/null +++ b/probe-busybox/archival/libarchive/bz/huffman.c @@ -0,0 +1,229 @@ +/* + * bzip2 is written by Julian Seward . + * Adapted for busybox by Denys Vlasenko . + * See README and LICENSE files in this directory for more information. + */ + +/*-------------------------------------------------------------*/ +/*--- Huffman coding low-level stuff ---*/ +/*--- huffman.c ---*/ +/*-------------------------------------------------------------*/ + +/* ------------------------------------------------------------------ +This file is part of bzip2/libbzip2, a program and library for +lossless, block-sorting data compression. + +bzip2/libbzip2 version 1.0.4 of 20 December 2006 +Copyright (C) 1996-2006 Julian Seward + +Please read the WARNING, DISCLAIMER and PATENTS sections in the +README file. + +This program is released under the terms of the license contained +in the file LICENSE. +------------------------------------------------------------------ */ + +/* #include "bzlib_private.h" */ + +/*---------------------------------------------------*/ +#define WEIGHTOF(zz0) ((zz0) & 0xffffff00) +#define DEPTHOF(zz1) ((zz1) & 0x000000ff) +#define MYMAX(zz2,zz3) ((zz2) > (zz3) ? (zz2) : (zz3)) + +#define ADDWEIGHTS(zw1,zw2) \ + (WEIGHTOF(zw1)+WEIGHTOF(zw2)) | \ + (1 + MYMAX(DEPTHOF(zw1),DEPTHOF(zw2))) + +#define UPHEAP(z) \ +{ \ + int32_t zz, tmp; \ + zz = z; \ + tmp = heap[zz]; \ + while (weight[tmp] < weight[heap[zz >> 1]]) { \ + heap[zz] = heap[zz >> 1]; \ + zz >>= 1; \ + } \ + heap[zz] = tmp; \ +} + + +/* 90 bytes, 0.3% of overall compress speed */ +#if CONFIG_BZIP2_FAST >= 1 + +/* macro works better than inline (gcc 4.2.1) */ +#define DOWNHEAP1(heap, weight, Heap) \ +{ \ + int32_t zz, yy, tmp; \ + zz = 1; \ + tmp = heap[zz]; \ + while (1) { \ + yy = zz << 1; \ + if (yy > nHeap) \ + break; \ + if (yy < nHeap \ + && weight[heap[yy+1]] < weight[heap[yy]]) \ + yy++; \ + if (weight[tmp] < weight[heap[yy]]) \ + break; \ + heap[zz] = heap[yy]; \ + zz = yy; \ + } \ + heap[zz] = tmp; \ +} + +#else + +static +void DOWNHEAP1(int32_t *heap, int32_t *weight, int32_t nHeap) +{ + int32_t zz, yy, tmp; + zz = 1; + tmp = heap[zz]; + while (1) { + yy = zz << 1; + if (yy > nHeap) + break; + if (yy < nHeap + && weight[heap[yy + 1]] < weight[heap[yy]]) + yy++; + if (weight[tmp] < weight[heap[yy]]) + break; + heap[zz] = heap[yy]; + zz = yy; + } + heap[zz] = tmp; +} + +#endif + +/*---------------------------------------------------*/ +static +void BZ2_hbMakeCodeLengths(EState *s, + uint8_t *len, + int32_t *freq, + int32_t alphaSize, + int32_t maxLen) +{ + /* + * Nodes and heap entries run from 1. Entry 0 + * for both the heap and nodes is a sentinel. + */ + int32_t nNodes, nHeap, n1, n2, i, j, k; + Bool tooLong; + + /* bbox: moved to EState to save stack + int32_t heap [BZ_MAX_ALPHA_SIZE + 2]; + int32_t weight[BZ_MAX_ALPHA_SIZE * 2]; + int32_t parent[BZ_MAX_ALPHA_SIZE * 2]; + */ +#define heap (s->BZ2_hbMakeCodeLengths__heap) +#define weight (s->BZ2_hbMakeCodeLengths__weight) +#define parent (s->BZ2_hbMakeCodeLengths__parent) + + for (i = 0; i < alphaSize; i++) + weight[i+1] = (freq[i] == 0 ? 1 : freq[i]) << 8; + + while (1) { + nNodes = alphaSize; + nHeap = 0; + + heap[0] = 0; + weight[0] = 0; + parent[0] = -2; + + for (i = 1; i <= alphaSize; i++) { + parent[i] = -1; + nHeap++; + heap[nHeap] = i; + UPHEAP(nHeap); + } + + AssertH(nHeap < (BZ_MAX_ALPHA_SIZE+2), 2001); + + while (nHeap > 1) { + n1 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP1(heap, weight, nHeap); + n2 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP1(heap, weight, nHeap); + nNodes++; + parent[n1] = parent[n2] = nNodes; + weight[nNodes] = ADDWEIGHTS(weight[n1], weight[n2]); + parent[nNodes] = -1; + nHeap++; + heap[nHeap] = nNodes; + UPHEAP(nHeap); + } + + AssertH(nNodes < (BZ_MAX_ALPHA_SIZE * 2), 2002); + + tooLong = False; + for (i = 1; i <= alphaSize; i++) { + j = 0; + k = i; + while (parent[k] >= 0) { + k = parent[k]; + j++; + } + len[i-1] = j; + if (j > maxLen) + tooLong = True; + } + + if (!tooLong) + break; + + /* 17 Oct 04: keep-going condition for the following loop used + to be 'i < alphaSize', which missed the last element, + theoretically leading to the possibility of the compressor + looping. However, this count-scaling step is only needed if + one of the generated Huffman code words is longer than + maxLen, which up to and including version 1.0.2 was 20 bits, + which is extremely unlikely. In version 1.0.3 maxLen was + changed to 17 bits, which has minimal effect on compression + ratio, but does mean this scaling step is used from time to + time, enough to verify that it works. + + This means that bzip2-1.0.3 and later will only produce + Huffman codes with a maximum length of 17 bits. However, in + order to preserve backwards compatibility with bitstreams + produced by versions pre-1.0.3, the decompressor must still + handle lengths of up to 20. */ + + for (i = 1; i <= alphaSize; i++) { + j = weight[i] >> 8; + /* bbox: yes, it is a signed division. + * don't replace with shift! */ + j = 1 + (j / 2); + weight[i] = j << 8; + } + } +#undef heap +#undef weight +#undef parent +} + + +/*---------------------------------------------------*/ +static +void BZ2_hbAssignCodes(int32_t *code, + uint8_t *length, + int32_t minLen, + int32_t maxLen, + int32_t alphaSize) +{ + int32_t n, vec, i; + + vec = 0; + for (n = minLen; n <= maxLen; n++) { + for (i = 0; i < alphaSize; i++) { + if (length[i] == n) { + code[i] = vec; + vec++; + }; + } + vec <<= 1; + } +} + + +/*-------------------------------------------------------------*/ +/*--- end huffman.c ---*/ +/*-------------------------------------------------------------*/ diff --git a/probe-busybox/archival/libarchive/common.c b/probe-busybox/archival/libarchive/common.c new file mode 100644 index 00000000..389cb785 --- /dev/null +++ b/probe-busybox/archival/libarchive/common.c @@ -0,0 +1,9 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +const char cpio_TRAILER[] ALIGN1 = "TRAILER!!!"; diff --git a/probe-busybox/archival/libarchive/data_align.c b/probe-busybox/archival/libarchive/data_align.c new file mode 100644 index 00000000..a6b84a44 --- /dev/null +++ b/probe-busybox/archival/libarchive/data_align.c @@ -0,0 +1,15 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +void FAST_FUNC data_align(archive_handle_t *archive_handle, unsigned boundary) +{ + unsigned skip_amount = (boundary - (archive_handle->offset % boundary)) % boundary; + + archive_handle->seek(archive_handle->src_fd, skip_amount); + archive_handle->offset += skip_amount; +} diff --git a/probe-busybox/archival/libarchive/data_extract_all.c b/probe-busybox/archival/libarchive/data_extract_all.c new file mode 100644 index 00000000..06a9c5c7 --- /dev/null +++ b/probe-busybox/archival/libarchive/data_extract_all.c @@ -0,0 +1,250 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +void FAST_FUNC data_extract_all(archive_handle_t *archive_handle) +{ + file_header_t *file_header = archive_handle->file_header; + int dst_fd; + int res; + char *hard_link; +#if ENABLE_FEATURE_TAR_LONG_OPTIONS + char *dst_name; +#else +# define dst_name (file_header->name) +#endif + +#ifndef ENABLE_FEATURE_TAR_SELINUX +#define ENABLE_FEATURE_TAR_SELINUX 0 +#endif +#if ENABLE_FEATURE_TAR_SELINUX + char *sctx = archive_handle->tar__sctx[PAX_NEXT_FILE]; + if (!sctx) + sctx = archive_handle->tar__sctx[PAX_GLOBAL]; + if (sctx) { /* setfscreatecon is 4 syscalls, avoid if possible */ + setfscreatecon(sctx); + free(archive_handle->tar__sctx[PAX_NEXT_FILE]); + archive_handle->tar__sctx[PAX_NEXT_FILE] = NULL; + } +#endif + + /* Hard links are encoded as regular files of size 0 + * with a nonempty link field */ + hard_link = NULL; + if (S_ISREG(file_header->mode) && file_header->size == 0) + hard_link = file_header->link_target; + +#if ENABLE_FEATURE_TAR_LONG_OPTIONS + dst_name = file_header->name; + if (archive_handle->tar__strip_components) { + unsigned n = archive_handle->tar__strip_components; + do { + dst_name = strchr(dst_name, '/'); + if (!dst_name || dst_name[1] == '\0') { + data_skip(archive_handle); + goto ret; + } + dst_name++; + /* + * Link target is shortened only for hardlinks: + * softlinks restored unchanged. + */ + if (hard_link) { +// GNU tar 1.26 does not check that we reached end of link name: +// if "dir/hardlink" is hardlinked to "file", +// tar xvf a.tar --strip-components=1 says: +// tar: hardlink: Cannot hard link to '': No such file or directory +// and continues processing. We silently skip such entries. + hard_link = strchr(hard_link, '/'); + if (!hard_link || hard_link[1] == '\0') { + data_skip(archive_handle); + goto ret; + } + hard_link++; + } + } while (--n != 0); + } +#endif + + if (archive_handle->ah_flags & ARCHIVE_CREATE_LEADING_DIRS) { + char *slash = strrchr(dst_name, '/'); + if (slash) { + *slash = '\0'; + bb_make_directory(dst_name, -1, FILEUTILS_RECUR); + *slash = '/'; + } + } + + if (archive_handle->ah_flags & ARCHIVE_UNLINK_OLD) { + /* Remove the entry if it exists */ + if (!S_ISDIR(file_header->mode)) { + if (hard_link) { + /* Ugly special case: + * tar cf t.tar hardlink1 hardlink2 hardlink1 + * results in this tarball structure: + * hardlink1 + * hardlink2 -> hardlink1 + * hardlink1 -> hardlink1 <== !!! + */ + if (strcmp(hard_link, dst_name) == 0) + goto ret; + } + /* Proceed with deleting */ + if (unlink(dst_name) == -1 + && errno != ENOENT + ) { + bb_perror_msg_and_die("can't remove old file %s", + dst_name); + } + } + } + else if (archive_handle->ah_flags & ARCHIVE_EXTRACT_NEWER) { + /* Remove the existing entry if its older than the extracted entry */ + struct stat existing_sb; + if (lstat(dst_name, &existing_sb) == -1) { + if (errno != ENOENT) { + bb_perror_msg_and_die("can't stat old file"); + } + } + else if (existing_sb.st_mtime >= file_header->mtime) { + if (!(archive_handle->ah_flags & ARCHIVE_EXTRACT_QUIET) + && !S_ISDIR(file_header->mode) + ) { + bb_error_msg("%s not created: newer or " + "same age file exists", dst_name); + } + data_skip(archive_handle); + goto ret; + } + else if ((unlink(dst_name) == -1) && (errno != EISDIR)) { + bb_perror_msg_and_die("can't remove old file %s", + dst_name); + } + } + + /* Handle hard links separately */ + if (hard_link) { + res = link(hard_link, dst_name); + if (res != 0 && !(archive_handle->ah_flags & ARCHIVE_EXTRACT_QUIET)) { + bb_perror_msg("can't create %slink " + "from %s to %s", "hard", + dst_name, + hard_link); + } + /* Hardlinks have no separate mode/ownership, skip chown/chmod */ + goto ret; + } + + /* Create the filesystem entry */ + switch (file_header->mode & S_IFMT) { + case S_IFREG: { + /* Regular file */ + char *dst_nameN; + int flags = O_WRONLY | O_CREAT | O_EXCL; + if (archive_handle->ah_flags & ARCHIVE_O_TRUNC) + flags = O_WRONLY | O_CREAT | O_TRUNC; + dst_nameN = dst_name; +#ifdef ARCHIVE_REPLACE_VIA_RENAME + if (archive_handle->ah_flags & ARCHIVE_REPLACE_VIA_RENAME) + /* rpm-style temp file name */ + dst_nameN = xasprintf("%s;%x", dst_name, (int)getpid()); +#endif + dst_fd = xopen3(dst_nameN, + flags, + file_header->mode + ); + bb_copyfd_exact_size(archive_handle->src_fd, dst_fd, file_header->size); + close(dst_fd); +#ifdef ARCHIVE_REPLACE_VIA_RENAME + if (archive_handle->ah_flags & ARCHIVE_REPLACE_VIA_RENAME) { + xrename(dst_nameN, dst_name); + free(dst_nameN); + } +#endif + break; + } + case S_IFDIR: + res = mkdir(dst_name, file_header->mode); + if ((res == -1) + && (errno != EISDIR) /* btw, Linux doesn't return this */ + && (errno != EEXIST) + && !(archive_handle->ah_flags & ARCHIVE_EXTRACT_QUIET) + ) { + bb_perror_msg("can't make dir %s", dst_name); + } + break; + case S_IFLNK: + /* Symlink */ +//TODO: what if file_header->link_target == NULL (say, corrupted tarball?) + res = symlink(file_header->link_target, dst_name); + if (res != 0 + && !(archive_handle->ah_flags & ARCHIVE_EXTRACT_QUIET) + ) { + bb_perror_msg("can't create %slink " + "from %s to %s", "sym", + dst_name, + file_header->link_target); + } + break; + case S_IFSOCK: + case S_IFBLK: + case S_IFCHR: + case S_IFIFO: + res = mknod(dst_name, file_header->mode, file_header->device); + if ((res == -1) + && !(archive_handle->ah_flags & ARCHIVE_EXTRACT_QUIET) + ) { + bb_perror_msg("can't create node %s", dst_name); + } + break; + default: + bb_error_msg_and_die("unrecognized file type"); + } + + if (!S_ISLNK(file_header->mode)) { + if (!(archive_handle->ah_flags & ARCHIVE_DONT_RESTORE_OWNER)) { + uid_t uid = file_header->uid; + gid_t gid = file_header->gid; +#if ENABLE_FEATURE_TAR_UNAME_GNAME + if (!(archive_handle->ah_flags & ARCHIVE_NUMERIC_OWNER)) { + if (file_header->tar__uname) { +//TODO: cache last name/id pair? + struct passwd *pwd = getpwnam(file_header->tar__uname); + if (pwd) uid = pwd->pw_uid; + } + if (file_header->tar__gname) { + struct group *grp = getgrnam(file_header->tar__gname); + if (grp) gid = grp->gr_gid; + } + } +#endif + /* GNU tar 1.15.1 uses chown, not lchown */ + chown(dst_name, uid, gid); + } + /* uclibc has no lchmod, glibc is even stranger - + * it has lchmod which seems to do nothing! + * so we use chmod... */ + if (!(archive_handle->ah_flags & ARCHIVE_DONT_RESTORE_PERM)) { + chmod(dst_name, file_header->mode); + } + if (archive_handle->ah_flags & ARCHIVE_RESTORE_DATE) { + struct timeval t[2]; + + t[1].tv_sec = t[0].tv_sec = file_header->mtime; + t[1].tv_usec = t[0].tv_usec = 0; + utimes(dst_name, t); + } + } + + ret: ; +#if ENABLE_FEATURE_TAR_SELINUX + if (sctx) { + /* reset the context after creating an entry */ + setfscreatecon(NULL); + } +#endif +} diff --git a/probe-busybox/archival/libarchive/data_extract_to_command.c b/probe-busybox/archival/libarchive/data_extract_to_command.c new file mode 100644 index 00000000..5d876938 --- /dev/null +++ b/probe-busybox/archival/libarchive/data_extract_to_command.c @@ -0,0 +1,137 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +enum { + //TAR_FILETYPE, + TAR_MODE, + TAR_FILENAME, + TAR_REALNAME, +#if ENABLE_FEATURE_TAR_UNAME_GNAME + TAR_UNAME, + TAR_GNAME, +#endif + TAR_SIZE, + TAR_UID, + TAR_GID, + TAR_MAX, +}; + +static const char *const tar_var[] = { + // "FILETYPE", + "MODE", + "FILENAME", + "REALNAME", +#if ENABLE_FEATURE_TAR_UNAME_GNAME + "UNAME", + "GNAME", +#endif + "SIZE", + "UID", + "GID", +}; + +static void xputenv(char *str) +{ + if (putenv(str)) + bb_error_msg_and_die(bb_msg_memory_exhausted); +} + +static void str2env(char *env[], int idx, const char *str) +{ + env[idx] = xasprintf("TAR_%s=%s", tar_var[idx], str); + xputenv(env[idx]); +} + +static void dec2env(char *env[], int idx, unsigned long long val) +{ + env[idx] = xasprintf("TAR_%s=%llu", tar_var[idx], val); + xputenv(env[idx]); +} + +static void oct2env(char *env[], int idx, unsigned long val) +{ + env[idx] = xasprintf("TAR_%s=%lo", tar_var[idx], val); + xputenv(env[idx]); +} + +void FAST_FUNC data_extract_to_command(archive_handle_t *archive_handle) +{ + file_header_t *file_header = archive_handle->file_header; + +#if 0 /* do we need this? ENABLE_FEATURE_TAR_SELINUX */ + char *sctx = archive_handle->tar__sctx[PAX_NEXT_FILE]; + if (!sctx) + sctx = archive_handle->tar__sctx[PAX_GLOBAL]; + if (sctx) { /* setfscreatecon is 4 syscalls, avoid if possible */ + setfscreatecon(sctx); + free(archive_handle->tar__sctx[PAX_NEXT_FILE]); + archive_handle->tar__sctx[PAX_NEXT_FILE] = NULL; + } +#endif + + if ((file_header->mode & S_IFMT) == S_IFREG) { + pid_t pid; + int p[2], status; + char *tar_env[TAR_MAX]; + + memset(tar_env, 0, sizeof(tar_env)); + + xpipe(p); + pid = BB_MMU ? xfork() : xvfork(); + if (pid == 0) { + /* Child */ + /* str2env(tar_env, TAR_FILETYPE, "f"); - parent should do it once */ + oct2env(tar_env, TAR_MODE, file_header->mode); + str2env(tar_env, TAR_FILENAME, file_header->name); + str2env(tar_env, TAR_REALNAME, file_header->name); +#if ENABLE_FEATURE_TAR_UNAME_GNAME + str2env(tar_env, TAR_UNAME, file_header->tar__uname); + str2env(tar_env, TAR_GNAME, file_header->tar__gname); +#endif + dec2env(tar_env, TAR_SIZE, file_header->size); + dec2env(tar_env, TAR_UID, file_header->uid); + dec2env(tar_env, TAR_GID, file_header->gid); + close(p[1]); + xdup2(p[0], STDIN_FILENO); + signal(SIGPIPE, SIG_DFL); + execl(archive_handle->tar__to_command_shell, + archive_handle->tar__to_command_shell, + "-c", + archive_handle->tar__to_command, + (char *)0); + bb_perror_msg_and_die("can't execute '%s'", archive_handle->tar__to_command_shell); + } + close(p[0]); + /* Our caller is expected to do signal(SIGPIPE, SIG_IGN) + * so that we don't die if child don't read all the input: */ + bb_copyfd_exact_size(archive_handle->src_fd, p[1], -file_header->size); + close(p[1]); + + status = wait_for_exitstatus(pid); + if (WIFEXITED(status) && WEXITSTATUS(status)) + bb_error_msg_and_die("'%s' returned status %d", + archive_handle->tar__to_command, WEXITSTATUS(status)); + if (WIFSIGNALED(status)) + bb_error_msg_and_die("'%s' terminated by signal %d", + archive_handle->tar__to_command, WTERMSIG(status)); + + if (!BB_MMU) { + int i; + for (i = 0; i < TAR_MAX; i++) { + if (tar_env[i]) + bb_unsetenv_and_free(tar_env[i]); + } + } + } + +#if 0 /* ENABLE_FEATURE_TAR_SELINUX */ + if (sctx) + /* reset the context after creating an entry */ + setfscreatecon(NULL); +#endif +} diff --git a/probe-busybox/archival/libarchive/data_extract_to_stdout.c b/probe-busybox/archival/libarchive/data_extract_to_stdout.c new file mode 100644 index 00000000..f849f3b4 --- /dev/null +++ b/probe-busybox/archival/libarchive/data_extract_to_stdout.c @@ -0,0 +1,14 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +void FAST_FUNC data_extract_to_stdout(archive_handle_t *archive_handle) +{ + bb_copyfd_exact_size(archive_handle->src_fd, + STDOUT_FILENO, + archive_handle->file_header->size); +} diff --git a/probe-busybox/archival/libarchive/data_skip.c b/probe-busybox/archival/libarchive/data_skip.c new file mode 100644 index 00000000..588167f0 --- /dev/null +++ b/probe-busybox/archival/libarchive/data_skip.c @@ -0,0 +1,12 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +void FAST_FUNC data_skip(archive_handle_t *archive_handle) +{ + archive_handle->seek(archive_handle->src_fd, archive_handle->file_header->size); +} diff --git a/probe-busybox/archival/libarchive/decompress_bunzip2.c b/probe-busybox/archival/libarchive/decompress_bunzip2.c new file mode 100644 index 00000000..fe5953da --- /dev/null +++ b/probe-busybox/archival/libarchive/decompress_bunzip2.c @@ -0,0 +1,829 @@ +/* vi: set sw=4 ts=4: */ +/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). + + Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), + which also acknowledges contributions by Mike Burrows, David Wheeler, + Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, + Robert Sedgewick, and Jon L. Bentley. + + Licensed under GPLv2 or later, see file LICENSE in this source tree. +*/ + +/* + Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). + + More efficient reading of Huffman codes, a streamlined read_bunzip() + function, and various other tweaks. In (limited) tests, approximately + 20% faster than bzcat on x86 and about 10% faster on arm. + + Note that about 2/3 of the time is spent in read_bunzip() reversing + the Burrows-Wheeler transformation. Much of that time is delay + resulting from cache misses. + + (2010 update by vda: profiled "bzcat <84mbyte.bz2 >/dev/null" + on x86-64 CPU with L2 > 1M: get_next_block is hotter than read_bunzip: + %time seconds calls function + 71.01 12.69 444 get_next_block + 28.65 5.12 93065 read_bunzip + 00.22 0.04 7736490 get_bits + 00.11 0.02 47 dealloc_bunzip + 00.00 0.00 93018 full_write + ...) + + + I would ask that anyone benefiting from this work, especially those + using it in commercial products, consider making a donation to my local + non-profit hospice organization (www.hospiceacadiana.com) in the name of + the woman I loved, Toni W. Hagan, who passed away Feb. 12, 2003. + + Manuel + */ + +#include "libbb.h" +#include "bb_archive.h" + +#if 0 +# define dbg(...) bb_error_msg(__VA_ARGS__) +#else +# define dbg(...) ((void)0) +#endif + +/* Constants for Huffman coding */ +#define MAX_GROUPS 6 +#define GROUP_SIZE 50 /* 64 would have been more efficient */ +#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */ +#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ +#define SYMBOL_RUNA 0 +#define SYMBOL_RUNB 1 + +/* Status return values */ +#define RETVAL_OK 0 +#define RETVAL_LAST_BLOCK (dbg("%d", __LINE__), -1) +#define RETVAL_NOT_BZIP_DATA (dbg("%d", __LINE__), -2) +#define RETVAL_UNEXPECTED_INPUT_EOF (dbg("%d", __LINE__), -3) +#define RETVAL_SHORT_WRITE (dbg("%d", __LINE__), -4) +#define RETVAL_DATA_ERROR (dbg("%d", __LINE__), -5) +#define RETVAL_OUT_OF_MEMORY (dbg("%d", __LINE__), -6) +#define RETVAL_OBSOLETE_INPUT (dbg("%d", __LINE__), -7) + +/* Other housekeeping constants */ +#define IOBUF_SIZE 4096 + +/* This is what we know about each Huffman coding group */ +struct group_data { + /* We have an extra slot at the end of limit[] for a sentinel value. */ + int limit[MAX_HUFCODE_BITS+1], base[MAX_HUFCODE_BITS], permute[MAX_SYMBOLS]; + int minLen, maxLen; +}; + +/* Structure holding all the housekeeping data, including IO buffers and + * memory that persists between calls to bunzip + * Found the most used member: + * cat this_file.c | sed -e 's/"/ /g' -e "s/'/ /g" | xargs -n1 \ + * | grep 'bd->' | sed 's/^.*bd->/bd->/' | sort | $PAGER + * and moved it (inbufBitCount) to offset 0. + */ +struct bunzip_data { + /* I/O tracking data (file handles, buffers, positions, etc.) */ + unsigned inbufBitCount, inbufBits; + int in_fd, out_fd, inbufCount, inbufPos /*, outbufPos*/; + uint8_t *inbuf /*,*outbuf*/; + + /* State for interrupting output loop */ + int writeCopies, writePos, writeRunCountdown, writeCount; + int writeCurrent; /* actually a uint8_t */ + + /* The CRC values stored in the block header and calculated from the data */ + uint32_t headerCRC, totalCRC, writeCRC; + + /* Intermediate buffer and its size (in bytes) */ + uint32_t *dbuf; + unsigned dbufSize; + + /* For I/O error handling */ + jmp_buf jmpbuf; + + /* Big things go last (register-relative addressing can be larger for big offsets) */ + uint32_t crc32Table[256]; + uint8_t selectors[32768]; /* nSelectors=15 bits */ + struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ +}; +/* typedef struct bunzip_data bunzip_data; -- done in .h file */ + + +/* Return the next nnn bits of input. All reads from the compressed input + are done through this function. All reads are big endian */ +static unsigned get_bits(bunzip_data *bd, int bits_wanted) +{ + unsigned bits = 0; + /* Cache bd->inbufBitCount in a CPU register (hopefully): */ + int bit_count = bd->inbufBitCount; + + /* If we need to get more data from the byte buffer, do so. (Loop getting + one byte at a time to enforce endianness and avoid unaligned access.) */ + while (bit_count < bits_wanted) { + + /* If we need to read more data from file into byte buffer, do so */ + if (bd->inbufPos == bd->inbufCount) { + /* if "no input fd" case: in_fd == -1, read fails, we jump */ + bd->inbufCount = read(bd->in_fd, bd->inbuf, IOBUF_SIZE); + if (bd->inbufCount <= 0) + longjmp(bd->jmpbuf, RETVAL_UNEXPECTED_INPUT_EOF); + bd->inbufPos = 0; + } + + /* Avoid 32-bit overflow (dump bit buffer to top of output) */ + if (bit_count >= 24) { + bits = bd->inbufBits & ((1 << bit_count) - 1); + bits_wanted -= bit_count; + bits <<= bits_wanted; + bit_count = 0; + } + + /* Grab next 8 bits of input from buffer. */ + bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; + bit_count += 8; + } + + /* Calculate result */ + bit_count -= bits_wanted; + bd->inbufBitCount = bit_count; + bits |= (bd->inbufBits >> bit_count) & ((1 << bits_wanted) - 1); + + return bits; +} + +/* Unpacks the next block and sets up for the inverse Burrows-Wheeler step. */ +static int get_next_block(bunzip_data *bd) +{ + struct group_data *hufGroup; + int dbufCount, dbufSize, groupCount, *base, *limit, selector, + i, j, t, runPos, symCount, symTotal, nSelectors, byteCount[256]; + int runCnt = runCnt; /* for compiler */ + uint8_t uc, symToByte[256], mtfSymbol[256], *selectors; + uint32_t *dbuf; + unsigned origPtr; + + dbuf = bd->dbuf; + dbufSize = bd->dbufSize; + selectors = bd->selectors; + +/* In bbox, we are ok with aborting through setjmp which is set up in start_bunzip */ +#if 0 + /* Reset longjmp I/O error handling */ + i = setjmp(bd->jmpbuf); + if (i) return i; +#endif + + /* Read in header signature and CRC, then validate signature. + (last block signature means CRC is for whole file, return now) */ + i = get_bits(bd, 24); + j = get_bits(bd, 24); + bd->headerCRC = get_bits(bd, 32); + if ((i == 0x177245) && (j == 0x385090)) return RETVAL_LAST_BLOCK; + if ((i != 0x314159) || (j != 0x265359)) return RETVAL_NOT_BZIP_DATA; + + /* We can add support for blockRandomised if anybody complains. There was + some code for this in busybox 1.0.0-pre3, but nobody ever noticed that + it didn't actually work. */ + if (get_bits(bd, 1)) return RETVAL_OBSOLETE_INPUT; + origPtr = get_bits(bd, 24); + if ((int)origPtr > dbufSize) return RETVAL_DATA_ERROR; + + /* mapping table: if some byte values are never used (encoding things + like ascii text), the compression code removes the gaps to have fewer + symbols to deal with, and writes a sparse bitfield indicating which + values were present. We make a translation table to convert the symbols + back to the corresponding bytes. */ + symTotal = 0; + i = 0; + t = get_bits(bd, 16); + do { + if (t & (1 << 15)) { + unsigned inner_map = get_bits(bd, 16); + do { + if (inner_map & (1 << 15)) + symToByte[symTotal++] = i; + inner_map <<= 1; + i++; + } while (i & 15); + i -= 16; + } + t <<= 1; + i += 16; + } while (i < 256); + + /* How many different Huffman coding groups does this block use? */ + groupCount = get_bits(bd, 3); + if (groupCount < 2 || groupCount > MAX_GROUPS) + return RETVAL_DATA_ERROR; + + /* nSelectors: Every GROUP_SIZE many symbols we select a new Huffman coding + group. Read in the group selector list, which is stored as MTF encoded + bit runs. (MTF=Move To Front, as each value is used it's moved to the + start of the list.) */ + for (i = 0; i < groupCount; i++) + mtfSymbol[i] = i; + nSelectors = get_bits(bd, 15); + if (!nSelectors) + return RETVAL_DATA_ERROR; + for (i = 0; i < nSelectors; i++) { + uint8_t tmp_byte; + /* Get next value */ + int n = 0; + while (get_bits(bd, 1)) { + if (n >= groupCount) return RETVAL_DATA_ERROR; + n++; + } + /* Decode MTF to get the next selector */ + tmp_byte = mtfSymbol[n]; + while (--n >= 0) + mtfSymbol[n + 1] = mtfSymbol[n]; + mtfSymbol[0] = selectors[i] = tmp_byte; + } + + /* Read the Huffman coding tables for each group, which code for symTotal + literal symbols, plus two run symbols (RUNA, RUNB) */ + symCount = symTotal + 2; + for (j = 0; j < groupCount; j++) { + uint8_t length[MAX_SYMBOLS]; + /* 8 bits is ALMOST enough for temp[], see below */ + unsigned temp[MAX_HUFCODE_BITS+1]; + int minLen, maxLen, pp, len_m1; + + /* Read Huffman code lengths for each symbol. They're stored in + a way similar to mtf; record a starting value for the first symbol, + and an offset from the previous value for every symbol after that. + (Subtracting 1 before the loop and then adding it back at the end is + an optimization that makes the test inside the loop simpler: symbol + length 0 becomes negative, so an unsigned inequality catches it.) */ + len_m1 = get_bits(bd, 5) - 1; + for (i = 0; i < symCount; i++) { + for (;;) { + int two_bits; + if ((unsigned)len_m1 > (MAX_HUFCODE_BITS-1)) + return RETVAL_DATA_ERROR; + + /* If first bit is 0, stop. Else second bit indicates whether + to increment or decrement the value. Optimization: grab 2 + bits and unget the second if the first was 0. */ + two_bits = get_bits(bd, 2); + if (two_bits < 2) { + bd->inbufBitCount++; + break; + } + + /* Add one if second bit 1, else subtract 1. Avoids if/else */ + len_m1 += (((two_bits+1) & 2) - 1); + } + + /* Correct for the initial -1, to get the final symbol length */ + length[i] = len_m1 + 1; + } + + /* Find largest and smallest lengths in this group */ + minLen = maxLen = length[0]; + for (i = 1; i < symCount; i++) { + if (length[i] > maxLen) maxLen = length[i]; + else if (length[i] < minLen) minLen = length[i]; + } + + /* Calculate permute[], base[], and limit[] tables from length[]. + * + * permute[] is the lookup table for converting Huffman coded symbols + * into decoded symbols. base[] is the amount to subtract from the + * value of a Huffman symbol of a given length when using permute[]. + * + * limit[] indicates the largest numerical value a symbol with a given + * number of bits can have. This is how the Huffman codes can vary in + * length: each code with a value>limit[length] needs another bit. + */ + hufGroup = bd->groups + j; + hufGroup->minLen = minLen; + hufGroup->maxLen = maxLen; + + /* Note that minLen can't be smaller than 1, so we adjust the base + and limit array pointers so we're not always wasting the first + entry. We do this again when using them (during symbol decoding). */ + base = hufGroup->base - 1; + limit = hufGroup->limit - 1; + + /* Calculate permute[]. Concurently, initialize temp[] and limit[]. */ + pp = 0; + for (i = minLen; i <= maxLen; i++) { + int k; + temp[i] = limit[i] = 0; + for (k = 0; k < symCount; k++) + if (length[k] == i) + hufGroup->permute[pp++] = k; + } + + /* Count symbols coded for at each bit length */ + /* NB: in pathological cases, temp[8] can end ip being 256. + * That's why uint8_t is too small for temp[]. */ + for (i = 0; i < symCount; i++) temp[length[i]]++; + + /* Calculate limit[] (the largest symbol-coding value at each bit + * length, which is (previous limit<<1)+symbols at this level), and + * base[] (number of symbols to ignore at each bit length, which is + * limit minus the cumulative count of symbols coded for already). */ + pp = t = 0; + for (i = minLen; i < maxLen;) { + unsigned temp_i = temp[i]; + + pp += temp_i; + + /* We read the largest possible symbol size and then unget bits + after determining how many we need, and those extra bits could + be set to anything. (They're noise from future symbols.) At + each level we're really only interested in the first few bits, + so here we set all the trailing to-be-ignored bits to 1 so they + don't affect the value>limit[length] comparison. */ + limit[i] = (pp << (maxLen - i)) - 1; + pp <<= 1; + t += temp_i; + base[++i] = pp - t; + } + limit[maxLen] = pp + temp[maxLen] - 1; + limit[maxLen+1] = INT_MAX; /* Sentinel value for reading next sym. */ + base[minLen] = 0; + } + + /* We've finished reading and digesting the block header. Now read this + block's Huffman coded symbols from the file and undo the Huffman coding + and run length encoding, saving the result into dbuf[dbufCount++] = uc */ + + /* Initialize symbol occurrence counters and symbol Move To Front table */ + /*memset(byteCount, 0, sizeof(byteCount)); - smaller, but slower */ + for (i = 0; i < 256; i++) { + byteCount[i] = 0; + mtfSymbol[i] = (uint8_t)i; + } + + /* Loop through compressed symbols. */ + + runPos = dbufCount = selector = 0; + for (;;) { + int nextSym; + + /* Fetch next Huffman coding group from list. */ + symCount = GROUP_SIZE - 1; + if (selector >= nSelectors) return RETVAL_DATA_ERROR; + hufGroup = bd->groups + selectors[selector++]; + base = hufGroup->base - 1; + limit = hufGroup->limit - 1; + + continue_this_group: + /* Read next Huffman-coded symbol. */ + + /* Note: It is far cheaper to read maxLen bits and back up than it is + to read minLen bits and then add additional bit at a time, testing + as we go. Because there is a trailing last block (with file CRC), + there is no danger of the overread causing an unexpected EOF for a + valid compressed file. + */ + if (1) { + /* As a further optimization, we do the read inline + (falling back to a call to get_bits if the buffer runs dry). + */ + int new_cnt; + while ((new_cnt = bd->inbufBitCount - hufGroup->maxLen) < 0) { + /* bd->inbufBitCount < hufGroup->maxLen */ + if (bd->inbufPos == bd->inbufCount) { + nextSym = get_bits(bd, hufGroup->maxLen); + goto got_huff_bits; + } + bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; + bd->inbufBitCount += 8; + }; + bd->inbufBitCount = new_cnt; /* "bd->inbufBitCount -= hufGroup->maxLen;" */ + nextSym = (bd->inbufBits >> new_cnt) & ((1 << hufGroup->maxLen) - 1); + got_huff_bits: ; + } else { /* unoptimized equivalent */ + nextSym = get_bits(bd, hufGroup->maxLen); + } + /* Figure how many bits are in next symbol and unget extras */ + i = hufGroup->minLen; + while (nextSym > limit[i]) ++i; + j = hufGroup->maxLen - i; + if (j < 0) + return RETVAL_DATA_ERROR; + bd->inbufBitCount += j; + + /* Huffman decode value to get nextSym (with bounds checking) */ + nextSym = (nextSym >> j) - base[i]; + if ((unsigned)nextSym >= MAX_SYMBOLS) + return RETVAL_DATA_ERROR; + nextSym = hufGroup->permute[nextSym]; + + /* We have now decoded the symbol, which indicates either a new literal + byte, or a repeated run of the most recent literal byte. First, + check if nextSym indicates a repeated run, and if so loop collecting + how many times to repeat the last literal. */ + if ((unsigned)nextSym <= SYMBOL_RUNB) { /* RUNA or RUNB */ + + /* If this is the start of a new run, zero out counter */ + if (runPos == 0) { + runPos = 1; + runCnt = 0; + } + + /* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at + each bit position, add 1 or 2 instead. For example, + 1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2. + You can make any bit pattern that way using 1 less symbol than + the basic or 0/1 method (except all bits 0, which would use no + symbols, but a run of length 0 doesn't mean anything in this + context). Thus space is saved. */ + runCnt += (runPos << nextSym); /* +runPos if RUNA; +2*runPos if RUNB */ + if (runPos < dbufSize) runPos <<= 1; + goto end_of_huffman_loop; + } + + /* When we hit the first non-run symbol after a run, we now know + how many times to repeat the last literal, so append that many + copies to our buffer of decoded symbols (dbuf) now. (The last + literal used is the one at the head of the mtfSymbol array.) */ + if (runPos != 0) { + uint8_t tmp_byte; + if (dbufCount + runCnt > dbufSize) { + dbg("dbufCount:%d+runCnt:%d %d > dbufSize:%d RETVAL_DATA_ERROR", + dbufCount, runCnt, dbufCount + runCnt, dbufSize); + return RETVAL_DATA_ERROR; + } + tmp_byte = symToByte[mtfSymbol[0]]; + byteCount[tmp_byte] += runCnt; + while (--runCnt >= 0) dbuf[dbufCount++] = (uint32_t)tmp_byte; + runPos = 0; + } + + /* Is this the terminating symbol? */ + if (nextSym > symTotal) break; + + /* At this point, nextSym indicates a new literal character. Subtract + one to get the position in the MTF array at which this literal is + currently to be found. (Note that the result can't be -1 or 0, + because 0 and 1 are RUNA and RUNB. But another instance of the + first symbol in the mtf array, position 0, would have been handled + as part of a run above. Therefore 1 unused mtf position minus + 2 non-literal nextSym values equals -1.) */ + if (dbufCount >= dbufSize) return RETVAL_DATA_ERROR; + i = nextSym - 1; + uc = mtfSymbol[i]; + + /* Adjust the MTF array. Since we typically expect to move only a + * small number of symbols, and are bound by 256 in any case, using + * memmove here would typically be bigger and slower due to function + * call overhead and other assorted setup costs. */ + do { + mtfSymbol[i] = mtfSymbol[i-1]; + } while (--i); + mtfSymbol[0] = uc; + uc = symToByte[uc]; + + /* We have our literal byte. Save it into dbuf. */ + byteCount[uc]++; + dbuf[dbufCount++] = (uint32_t)uc; + + /* Skip group initialization if we're not done with this group. Done + * this way to avoid compiler warning. */ + end_of_huffman_loop: + if (--symCount >= 0) goto continue_this_group; + } + + /* At this point, we've read all the Huffman-coded symbols (and repeated + runs) for this block from the input stream, and decoded them into the + intermediate buffer. There are dbufCount many decoded bytes in dbuf[]. + Now undo the Burrows-Wheeler transform on dbuf. + See http://dogma.net/markn/articles/bwt/bwt.htm + */ + + /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ + j = 0; + for (i = 0; i < 256; i++) { + int tmp_count = j + byteCount[i]; + byteCount[i] = j; + j = tmp_count; + } + + /* Figure out what order dbuf would be in if we sorted it. */ + for (i = 0; i < dbufCount; i++) { + uint8_t tmp_byte = (uint8_t)dbuf[i]; + int tmp_count = byteCount[tmp_byte]; + dbuf[tmp_count] |= (i << 8); + byteCount[tmp_byte] = tmp_count + 1; + } + + /* Decode first byte by hand to initialize "previous" byte. Note that it + doesn't get output, and if the first three characters are identical + it doesn't qualify as a run (hence writeRunCountdown=5). */ + if (dbufCount) { + uint32_t tmp; + if ((int)origPtr >= dbufCount) return RETVAL_DATA_ERROR; + tmp = dbuf[origPtr]; + bd->writeCurrent = (uint8_t)tmp; + bd->writePos = (tmp >> 8); + bd->writeRunCountdown = 5; + } + bd->writeCount = dbufCount; + + return RETVAL_OK; +} + +/* Undo Burrows-Wheeler transform on intermediate buffer to produce output. + If start_bunzip was initialized with out_fd=-1, then up to len bytes of + data are written to outbuf. Return value is number of bytes written or + error (all errors are negative numbers). If out_fd!=-1, outbuf and len + are ignored, data is written to out_fd and return is RETVAL_OK or error. + + NB: read_bunzip returns < 0 on error, or the number of *unfilled* bytes + in outbuf. IOW: on EOF returns len ("all bytes are not filled"), not 0. + (Why? This allows to get rid of one local variable) +*/ +int FAST_FUNC read_bunzip(bunzip_data *bd, char *outbuf, int len) +{ + const uint32_t *dbuf; + int pos, current, previous; + uint32_t CRC; + + /* If we already have error/end indicator, return it */ + if (bd->writeCount < 0) + return bd->writeCount; + + dbuf = bd->dbuf; + + /* Register-cached state (hopefully): */ + pos = bd->writePos; + current = bd->writeCurrent; + CRC = bd->writeCRC; /* small loss on x86-32 (not enough regs), win on x86-64 */ + + /* We will always have pending decoded data to write into the output + buffer unless this is the very first call (in which case we haven't + Huffman-decoded a block into the intermediate buffer yet). */ + if (bd->writeCopies) { + + dec_writeCopies: + /* Inside the loop, writeCopies means extra copies (beyond 1) */ + --bd->writeCopies; + + /* Loop outputting bytes */ + for (;;) { + + /* If the output buffer is full, save cached state and return */ + if (--len < 0) { + /* Unlikely branch. + * Use of "goto" instead of keeping code here + * helps compiler to realize this. */ + goto outbuf_full; + } + + /* Write next byte into output buffer, updating CRC */ + *outbuf++ = current; + CRC = (CRC << 8) ^ bd->crc32Table[(CRC >> 24) ^ current]; + + /* Loop now if we're outputting multiple copies of this byte */ + if (bd->writeCopies) { + /* Unlikely branch */ + /*--bd->writeCopies;*/ + /*continue;*/ + /* Same, but (ab)using other existing --writeCopies operation + * (and this if() compiles into just test+branch pair): */ + goto dec_writeCopies; + } + decode_next_byte: + if (--bd->writeCount < 0) + break; /* input block is fully consumed, need next one */ + + /* Follow sequence vector to undo Burrows-Wheeler transform */ + previous = current; + pos = dbuf[pos]; + current = (uint8_t)pos; + pos >>= 8; + + /* After 3 consecutive copies of the same byte, the 4th + * is a repeat count. We count down from 4 instead + * of counting up because testing for non-zero is faster */ + if (--bd->writeRunCountdown != 0) { + if (current != previous) + bd->writeRunCountdown = 4; + } else { + /* Unlikely branch */ + /* We have a repeated run, this byte indicates the count */ + bd->writeCopies = current; + current = previous; + bd->writeRunCountdown = 5; + + /* Sometimes there are just 3 bytes (run length 0) */ + if (!bd->writeCopies) goto decode_next_byte; + + /* Subtract the 1 copy we'd output anyway to get extras */ + --bd->writeCopies; + } + } /* for(;;) */ + + /* Decompression of this input block completed successfully */ + bd->writeCRC = CRC = ~CRC; + bd->totalCRC = ((bd->totalCRC << 1) | (bd->totalCRC >> 31)) ^ CRC; + + /* If this block had a CRC error, force file level CRC error */ + if (CRC != bd->headerCRC) { + bd->totalCRC = bd->headerCRC + 1; + return RETVAL_LAST_BLOCK; + } + } + + /* Refill the intermediate buffer by Huffman-decoding next block of input */ + { + int r = get_next_block(bd); + if (r) { /* error/end */ + bd->writeCount = r; + return (r != RETVAL_LAST_BLOCK) ? r : len; + } + } + + CRC = ~0; + pos = bd->writePos; + current = bd->writeCurrent; + goto decode_next_byte; + + outbuf_full: + /* Output buffer is full, save cached state and return */ + bd->writePos = pos; + bd->writeCurrent = current; + bd->writeCRC = CRC; + + bd->writeCopies++; + + return 0; +} + +/* Allocate the structure, read file header. If in_fd==-1, inbuf must contain + a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are + ignored, and data is read from file handle into temporary buffer. */ + +/* Because bunzip2 is used for help text unpacking, and because bb_show_usage() + should work for NOFORK applets too, we must be extremely careful to not leak + any allocations! */ +int FAST_FUNC start_bunzip(bunzip_data **bdp, int in_fd, + const void *inbuf, int len) +{ + bunzip_data *bd; + unsigned i; + enum { + BZh0 = ('B' << 24) + ('Z' << 16) + ('h' << 8) + '0', + h0 = ('h' << 8) + '0', + }; + + /* Figure out how much data to allocate */ + i = sizeof(bunzip_data); + if (in_fd != -1) i += IOBUF_SIZE; + + /* Allocate bunzip_data. Most fields initialize to zero. */ + bd = *bdp = xzalloc(i); + + /* Setup input buffer */ + bd->in_fd = in_fd; + if (-1 == in_fd) { + /* in this case, bd->inbuf is read-only */ + bd->inbuf = (void*)inbuf; /* cast away const-ness */ + } else { + bd->inbuf = (uint8_t*)(bd + 1); + memcpy(bd->inbuf, inbuf, len); + } + bd->inbufCount = len; + + /* Init the CRC32 table (big endian) */ + crc32_filltable(bd->crc32Table, 1); + + /* Setup for I/O error handling via longjmp */ + i = setjmp(bd->jmpbuf); + if (i) return i; + + /* Ensure that file starts with "BZh['1'-'9']." */ + /* Update: now caller verifies 1st two bytes, makes .gz/.bz2 + * integration easier */ + /* was: */ + /* i = get_bits(bd, 32); */ + /* if ((unsigned)(i - BZh0 - 1) >= 9) return RETVAL_NOT_BZIP_DATA; */ + i = get_bits(bd, 16); + if ((unsigned)(i - h0 - 1) >= 9) return RETVAL_NOT_BZIP_DATA; + + /* Fourth byte (ascii '1'-'9') indicates block size in units of 100k of + uncompressed data. Allocate intermediate buffer for block. */ + /* bd->dbufSize = 100000 * (i - BZh0); */ + bd->dbufSize = 100000 * (i - h0); + + /* Cannot use xmalloc - may leak bd in NOFORK case! */ + bd->dbuf = malloc_or_warn(bd->dbufSize * sizeof(bd->dbuf[0])); + if (!bd->dbuf) { + free(bd); + xfunc_die(); + } + return RETVAL_OK; +} + +void FAST_FUNC dealloc_bunzip(bunzip_data *bd) +{ + free(bd->dbuf); + free(bd); +} + + +/* Decompress src_fd to dst_fd. Stops at end of bzip data, not end of file. */ +IF_DESKTOP(long long) int FAST_FUNC +unpack_bz2_stream(transformer_state_t *xstate) +{ + IF_DESKTOP(long long total_written = 0;) + bunzip_data *bd; + char *outbuf; + int i; + unsigned len; + + if (check_signature16(xstate, BZIP2_MAGIC)) + return -1; + + outbuf = xmalloc(IOBUF_SIZE); + len = 0; + while (1) { /* "Process one BZ... stream" loop */ + + i = start_bunzip(&bd, xstate->src_fd, outbuf + 2, len); + + if (i == 0) { + while (1) { /* "Produce some output bytes" loop */ + i = read_bunzip(bd, outbuf, IOBUF_SIZE); + if (i < 0) /* error? */ + break; + i = IOBUF_SIZE - i; /* number of bytes produced */ + if (i == 0) /* EOF? */ + break; + if (i != transformer_write(xstate, outbuf, i)) { + i = RETVAL_SHORT_WRITE; + goto release_mem; + } + IF_DESKTOP(total_written += i;) + } + } + + if (i != RETVAL_LAST_BLOCK + /* Observed case when i == RETVAL_OK: + * "bzcat z.bz2", where "z.bz2" is a bzipped zero-length file + * (to be exact, z.bz2 is exactly these 14 bytes: + * 42 5a 68 39 17 72 45 38 50 90 00 00 00 00). + */ + && i != RETVAL_OK + ) { + bb_error_msg("bunzip error %d", i); + break; + } + if (bd->headerCRC != bd->totalCRC) { + bb_error_msg("CRC error"); + break; + } + + /* Successfully unpacked one BZ stream */ + i = RETVAL_OK; + + /* Do we have "BZ..." after last processed byte? + * pbzip2 (parallelized bzip2) produces such files. + */ + len = bd->inbufCount - bd->inbufPos; + memcpy(outbuf, &bd->inbuf[bd->inbufPos], len); + if (len < 2) { + if (safe_read(xstate->src_fd, outbuf + len, 2 - len) != 2 - len) + break; + len = 2; + } + if (*(uint16_t*)outbuf != BZIP2_MAGIC) /* "BZ"? */ + break; + dealloc_bunzip(bd); + len -= 2; + } + + release_mem: + dealloc_bunzip(bd); + free(outbuf); + + return i ? i : IF_DESKTOP(total_written) + 0; +} + +#ifdef TESTING + +static char *const bunzip_errors[] = { + NULL, "Bad file checksum", "Not bzip data", + "Unexpected input EOF", "Unexpected output EOF", "Data error", + "Out of memory", "Obsolete (pre 0.9.5) bzip format not supported" +}; + +/* Dumb little test thing, decompress stdin to stdout */ +int main(int argc, char **argv) +{ + char c; + + int i = unpack_bz2_stream(0, 1); + if (i < 0) + fprintf(stderr, "%s\n", bunzip_errors[-i]); + else if (read(STDIN_FILENO, &c, 1)) + fprintf(stderr, "Trailing garbage ignored\n"); + return -i; +} +#endif diff --git a/probe-busybox/archival/libarchive/decompress_gunzip.c b/probe-busybox/archival/libarchive/decompress_gunzip.c new file mode 100644 index 00000000..c7fa5b52 --- /dev/null +++ b/probe-busybox/archival/libarchive/decompress_gunzip.c @@ -0,0 +1,1280 @@ +/* vi: set sw=4 ts=4: */ +/* + * gunzip implementation for busybox + * + * Based on GNU gzip v1.2.4 Copyright (C) 1992-1993 Jean-loup Gailly. + * + * Originally adjusted for busybox by Sven Rudolph + * based on gzip sources + * + * Adjusted further by Erik Andersen to support + * files as well as stdin/stdout, and to generally behave itself wrt + * command line handling. + * + * General cleanup to better adhere to the style guide and make use of standard + * busybox functions by Glenn McGrath + * + * read_gz interface + associated hacking by Laurence Anderson + * + * Fixed huft_build() so decoding end-of-block code does not grab more bits + * than necessary (this is required by unzip applet), added inflate_cleanup() + * to free leaked bytebuffer memory (used in unzip.c), and some minor style + * guide cleanups by Ed Clark + * + * gzip (GNU zip) -- compress files with zip algorithm and 'compress' interface + * Copyright (C) 1992-1993 Jean-loup Gailly + * The unzip code was written and put in the public domain by Mark Adler. + * Portions of the lzw code are derived from the public domain 'compress' + * written by Spencer Thomas, Joe Orost, James Woods, Jim McKie, Steve Davies, + * Ken Turkowski, Dave Mack and Peter Jannesen. + * + * See the file algorithm.doc for the compression algorithms and file formats. + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include +#include "libbb.h" +#include "bb_archive.h" + +typedef struct huft_t { + unsigned char e; /* number of extra bits or operation */ + unsigned char b; /* number of bits in this code or subcode */ + union { + unsigned short n; /* literal, length base, or distance base */ + struct huft_t *t; /* pointer to next level of table */ + } v; +} huft_t; + +enum { + /* gunzip_window size--must be a power of two, and + * at least 32K for zip's deflate method */ + GUNZIP_WSIZE = 0x8000, + /* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ + BMAX = 16, /* maximum bit length of any code (16 for explode) */ + N_MAX = 288, /* maximum number of codes in any set */ +}; + + +/* This is somewhat complex-looking arrangement, but it allows + * to place decompressor state either in bss or in + * malloc'ed space simply by changing #defines below. + * Sizes on i386: + * text data bss dec hex + * 5256 0 108 5364 14f4 - bss + * 4915 0 0 4915 1333 - malloc + */ +#define STATE_IN_BSS 0 +#define STATE_IN_MALLOC 1 + + +typedef struct state_t { + off_t gunzip_bytes_out; /* number of output bytes */ + uint32_t gunzip_crc; + + int gunzip_src_fd; + unsigned gunzip_outbuf_count; /* bytes in output buffer */ + + unsigned char *gunzip_window; + + uint32_t *gunzip_crc_table; + + /* bitbuffer */ + unsigned gunzip_bb; /* bit buffer */ + unsigned char gunzip_bk; /* bits in bit buffer */ + + /* input (compressed) data */ + unsigned char *bytebuffer; /* buffer itself */ + off_t to_read; /* compressed bytes to read (unzip only, -1 for gunzip) */ +// unsigned bytebuffer_max; /* buffer size */ + unsigned bytebuffer_offset; /* buffer position */ + unsigned bytebuffer_size; /* how much data is there (size <= max) */ + + /* private data of inflate_codes() */ + unsigned inflate_codes_ml; /* masks for bl and bd bits */ + unsigned inflate_codes_md; /* masks for bl and bd bits */ + unsigned inflate_codes_bb; /* bit buffer */ + unsigned inflate_codes_k; /* number of bits in bit buffer */ + unsigned inflate_codes_w; /* current gunzip_window position */ + huft_t *inflate_codes_tl; + huft_t *inflate_codes_td; + unsigned inflate_codes_bl; + unsigned inflate_codes_bd; + unsigned inflate_codes_nn; /* length and index for copy */ + unsigned inflate_codes_dd; + + smallint resume_copy; + + /* private data of inflate_get_next_window() */ + smallint method; /* method == -1 for stored, -2 for codes */ + smallint need_another_block; + smallint end_reached; + + /* private data of inflate_stored() */ + unsigned inflate_stored_n; + unsigned inflate_stored_b; + unsigned inflate_stored_k; + unsigned inflate_stored_w; + + const char *error_msg; + jmp_buf error_jmp; +} state_t; +#define gunzip_bytes_out (S()gunzip_bytes_out ) +#define gunzip_crc (S()gunzip_crc ) +#define gunzip_src_fd (S()gunzip_src_fd ) +#define gunzip_outbuf_count (S()gunzip_outbuf_count) +#define gunzip_window (S()gunzip_window ) +#define gunzip_crc_table (S()gunzip_crc_table ) +#define gunzip_bb (S()gunzip_bb ) +#define gunzip_bk (S()gunzip_bk ) +#define to_read (S()to_read ) +// #define bytebuffer_max (S()bytebuffer_max ) +// Both gunzip and unzip can use constant buffer size now (16k): +#define bytebuffer_max 0x4000 +#define bytebuffer (S()bytebuffer ) +#define bytebuffer_offset (S()bytebuffer_offset ) +#define bytebuffer_size (S()bytebuffer_size ) +#define inflate_codes_ml (S()inflate_codes_ml ) +#define inflate_codes_md (S()inflate_codes_md ) +#define inflate_codes_bb (S()inflate_codes_bb ) +#define inflate_codes_k (S()inflate_codes_k ) +#define inflate_codes_w (S()inflate_codes_w ) +#define inflate_codes_tl (S()inflate_codes_tl ) +#define inflate_codes_td (S()inflate_codes_td ) +#define inflate_codes_bl (S()inflate_codes_bl ) +#define inflate_codes_bd (S()inflate_codes_bd ) +#define inflate_codes_nn (S()inflate_codes_nn ) +#define inflate_codes_dd (S()inflate_codes_dd ) +#define resume_copy (S()resume_copy ) +#define method (S()method ) +#define need_another_block (S()need_another_block ) +#define end_reached (S()end_reached ) +#define inflate_stored_n (S()inflate_stored_n ) +#define inflate_stored_b (S()inflate_stored_b ) +#define inflate_stored_k (S()inflate_stored_k ) +#define inflate_stored_w (S()inflate_stored_w ) +#define error_msg (S()error_msg ) +#define error_jmp (S()error_jmp ) + +/* This is a generic part */ +#if STATE_IN_BSS /* Use global data segment */ +#define DECLARE_STATE /*nothing*/ +#define ALLOC_STATE /*nothing*/ +#define DEALLOC_STATE ((void)0) +#define S() state. +#define PASS_STATE /*nothing*/ +#define PASS_STATE_ONLY /*nothing*/ +#define STATE_PARAM /*nothing*/ +#define STATE_PARAM_ONLY void +static state_t state; +#endif + +#if STATE_IN_MALLOC /* Use malloc space */ +#define DECLARE_STATE state_t *state +#define ALLOC_STATE (state = xzalloc(sizeof(*state))) +#define DEALLOC_STATE free(state) +#define S() state-> +#define PASS_STATE state, +#define PASS_STATE_ONLY state +#define STATE_PARAM state_t *state, +#define STATE_PARAM_ONLY state_t *state +#endif + + +static const uint16_t mask_bits[] ALIGN2 = { + 0x0000, 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, + 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff +}; + +/* Copy lengths for literal codes 257..285 */ +static const uint16_t cplens[] ALIGN2 = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, + 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 +}; + +/* note: see note #13 above about the 258 in this list. */ +/* Extra bits for literal codes 257..285 */ +static const uint8_t cplext[] ALIGN1 = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, + 5, 5, 5, 0, 99, 99 +}; /* 99 == invalid */ + +/* Copy offsets for distance codes 0..29 */ +static const uint16_t cpdist[] ALIGN2 = { + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, + 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577 +}; + +/* Extra bits for distance codes */ +static const uint8_t cpdext[] ALIGN1 = { + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, + 11, 11, 12, 12, 13, 13 +}; + +/* Tables for deflate from PKZIP's appnote.txt. */ +/* Order of the bit length code lengths */ +static const uint8_t border[] ALIGN1 = { + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 +}; + + +/* + * Free the malloc'ed tables built by huft_build(), which makes a linked + * list of the tables it made, with the links in a dummy first entry of + * each table. + * t: table to free + */ +static void huft_free(huft_t *p) +{ + huft_t *q; + + /* Go through linked list, freeing from the malloced (t[-1]) address. */ + while (p) { + q = (--p)->v.t; + free(p); + p = q; + } +} + +static void huft_free_all(STATE_PARAM_ONLY) +{ + huft_free(inflate_codes_tl); + huft_free(inflate_codes_td); + inflate_codes_tl = NULL; + inflate_codes_td = NULL; +} + +static void abort_unzip(STATE_PARAM_ONLY) NORETURN; +static void abort_unzip(STATE_PARAM_ONLY) +{ + huft_free_all(PASS_STATE_ONLY); + longjmp(error_jmp, 1); +} + +static unsigned fill_bitbuffer(STATE_PARAM unsigned bitbuffer, unsigned *current, const unsigned required) +{ + while (*current < required) { + if (bytebuffer_offset >= bytebuffer_size) { + unsigned sz = bytebuffer_max - 4; + if (to_read >= 0 && to_read < sz) /* unzip only */ + sz = to_read; + /* Leave the first 4 bytes empty so we can always unwind the bitbuffer + * to the front of the bytebuffer */ + bytebuffer_size = safe_read(gunzip_src_fd, &bytebuffer[4], sz); + if ((int)bytebuffer_size < 1) { + error_msg = "unexpected end of file"; + abort_unzip(PASS_STATE_ONLY); + } + if (to_read >= 0) /* unzip only */ + to_read -= bytebuffer_size; + bytebuffer_size += 4; + bytebuffer_offset = 4; + } + bitbuffer |= ((unsigned) bytebuffer[bytebuffer_offset]) << *current; + bytebuffer_offset++; + *current += 8; + } + return bitbuffer; +} + + +/* Given a list of code lengths and a maximum table size, make a set of + * tables to decode that set of codes. Return zero on success, one if + * the given code set is incomplete (the tables are still built in this + * case), two if the input is invalid (all zero length codes or an + * oversubscribed set of lengths) - in this case stores NULL in *t. + * + * b: code lengths in bits (all assumed <= BMAX) + * n: number of codes (assumed <= N_MAX) + * s: number of simple-valued codes (0..s-1) + * d: list of base values for non-simple codes + * e: list of extra bits for non-simple codes + * t: result: starting table + * m: maximum lookup bits, returns actual + */ +static int huft_build(const unsigned *b, const unsigned n, + const unsigned s, const unsigned short *d, + const unsigned char *e, huft_t **t, unsigned *m) +{ + unsigned a; /* counter for codes of length k */ + unsigned c[BMAX + 1]; /* bit length count table */ + unsigned eob_len; /* length of end-of-block code (value 256) */ + unsigned f; /* i repeats in table every f entries */ + int g; /* maximum code length */ + int htl; /* table level */ + unsigned i; /* counter, current code */ + unsigned j; /* counter */ + int k; /* number of bits in current code */ + const unsigned *p; /* pointer into c[], b[], or v[] */ + huft_t *q; /* points to current table */ + huft_t r; /* table entry for structure assignment */ + huft_t *u[BMAX]; /* table stack */ + unsigned v[N_MAX + 1]; /* values in order of bit length. last v[] is never used */ + int ws[BMAX + 1]; /* bits decoded stack */ + int w; /* bits decoded */ + unsigned x[BMAX + 1]; /* bit offsets, then code stack */ + unsigned *xp; /* pointer into x */ + int y; /* number of dummy codes added */ + unsigned z; /* number of entries in current table */ + + /* Length of EOB code, if any */ + eob_len = n > 256 ? b[256] : BMAX; + + *t = NULL; + + /* Generate counts for each bit length */ + memset(c, 0, sizeof(c)); + p = b; + i = n; + do { + c[*p]++; /* assume all entries <= BMAX */ + p++; /* can't combine with above line (Solaris bug) */ + } while (--i); + if (c[0] == n) { /* null input - all zero length codes */ + *m = 0; + return 2; + } + + /* Find minimum and maximum length, bound *m by those */ + for (j = 1; (j <= BMAX) && (c[j] == 0); j++) + continue; + k = j; /* minimum code length */ + for (i = BMAX; (c[i] == 0) && i; i--) + continue; + g = i; /* maximum code length */ + *m = (*m < j) ? j : ((*m > i) ? i : *m); + + /* Adjust last length count to fill out codes, if needed */ + for (y = 1 << j; j < i; j++, y <<= 1) { + y -= c[j]; + if (y < 0) + return 2; /* bad input: more codes than bits */ + } + y -= c[i]; + if (y < 0) + return 2; + c[i] += y; + + /* Generate starting offsets into the value table for each length */ + x[1] = j = 0; + p = c + 1; + xp = x + 2; + while (--i) { /* note that i == g from above */ + j += *p++; + *xp++ = j; + } + + /* Make a table of values in order of bit lengths. + * To detect bad input, unused v[i]'s are set to invalid value UINT_MAX. + * In particular, last v[i] is never filled and must not be accessed. + */ + memset(v, 0xff, sizeof(v)); + p = b; + i = 0; + do { + j = *p++; + if (j != 0) { + v[x[j]++] = i; + } + } while (++i < n); + + /* Generate the Huffman codes and for each, make the table entries */ + x[0] = i = 0; /* first Huffman code is zero */ + p = v; /* grab values in bit order */ + htl = -1; /* no tables yet--level -1 */ + w = ws[0] = 0; /* bits decoded */ + u[0] = NULL; /* just to keep compilers happy */ + q = NULL; /* ditto */ + z = 0; /* ditto */ + + /* go through the bit lengths (k already is bits in shortest code) */ + for (; k <= g; k++) { + a = c[k]; + while (a--) { + /* here i is the Huffman code of length k bits for value *p */ + /* make tables up to required level */ + while (k > ws[htl + 1]) { + w = ws[++htl]; + + /* compute minimum size table less than or equal to *m bits */ + z = g - w; + z = z > *m ? *m : z; /* upper limit on table size */ + j = k - w; + f = 1 << j; + if (f > a + 1) { /* try a k-w bit table */ + /* too few codes for k-w bit table */ + f -= a + 1; /* deduct codes from patterns left */ + xp = c + k; + while (++j < z) { /* try smaller tables up to z bits */ + f <<= 1; + if (f <= *++xp) { + break; /* enough codes to use up j bits */ + } + f -= *xp; /* else deduct codes from patterns */ + } + } + j = (w + j > eob_len && w < eob_len) ? eob_len - w : j; /* make EOB code end at table */ + z = 1 << j; /* table entries for j-bit table */ + ws[htl+1] = w + j; /* set bits decoded in stack */ + + /* allocate and link in new table */ + q = xzalloc((z + 1) * sizeof(huft_t)); + *t = q + 1; /* link to list for huft_free() */ + t = &(q->v.t); + u[htl] = ++q; /* table starts after link */ + + /* connect to last table, if there is one */ + if (htl) { + x[htl] = i; /* save pattern for backing up */ + r.b = (unsigned char) (w - ws[htl - 1]); /* bits to dump before this table */ + r.e = (unsigned char) (16 + j); /* bits in this table */ + r.v.t = q; /* pointer to this table */ + j = (i & ((1 << w) - 1)) >> ws[htl - 1]; + u[htl - 1][j] = r; /* connect to last table */ + } + } + + /* set up table entry in r */ + r.b = (unsigned char) (k - w); + if (/*p >= v + n || -- redundant, caught by the second check: */ + *p == UINT_MAX /* do we access uninited v[i]? (see memset(v))*/ + ) { + r.e = 99; /* out of values--invalid code */ + } else if (*p < s) { + r.e = (unsigned char) (*p < 256 ? 16 : 15); /* 256 is EOB code */ + r.v.n = (unsigned short) (*p++); /* simple code is just the value */ + } else { + r.e = (unsigned char) e[*p - s]; /* non-simple--look up in lists */ + r.v.n = d[*p++ - s]; + } + + /* fill code-like entries with r */ + f = 1 << (k - w); + for (j = i >> w; j < z; j += f) { + q[j] = r; + } + + /* backwards increment the k-bit code i */ + for (j = 1 << (k - 1); i & j; j >>= 1) { + i ^= j; + } + i ^= j; + + /* backup over finished tables */ + while ((i & ((1 << w) - 1)) != x[htl]) { + w = ws[--htl]; + } + } + } + + /* return actual size of base table */ + *m = ws[1]; + + /* Return 1 if we were given an incomplete table */ + return y != 0 && g != 1; +} + + +/* + * inflate (decompress) the codes in a deflated (compressed) block. + * Return an error code or zero if it all goes ok. + * + * tl, td: literal/length and distance decoder tables + * bl, bd: number of bits decoded by tl[] and td[] + */ +/* called once from inflate_block */ + +/* map formerly local static variables to globals */ +#define ml inflate_codes_ml +#define md inflate_codes_md +#define bb inflate_codes_bb +#define k inflate_codes_k +#define w inflate_codes_w +#define tl inflate_codes_tl +#define td inflate_codes_td +#define bl inflate_codes_bl +#define bd inflate_codes_bd +#define nn inflate_codes_nn +#define dd inflate_codes_dd +static void inflate_codes_setup(STATE_PARAM unsigned my_bl, unsigned my_bd) +{ + bl = my_bl; + bd = my_bd; + /* make local copies of globals */ + bb = gunzip_bb; /* initialize bit buffer */ + k = gunzip_bk; + w = gunzip_outbuf_count; /* initialize gunzip_window position */ + /* inflate the coded data */ + ml = mask_bits[bl]; /* precompute masks for speed */ + md = mask_bits[bd]; +} +/* called once from inflate_get_next_window */ +static NOINLINE int inflate_codes(STATE_PARAM_ONLY) +{ + unsigned e; /* table entry flag/number of extra bits */ + huft_t *t; /* pointer to table entry */ + + if (resume_copy) + goto do_copy; + + while (1) { /* do until end of block */ + bb = fill_bitbuffer(PASS_STATE bb, &k, bl); + t = tl + ((unsigned) bb & ml); + e = t->e; + if (e > 16) + do { + if (e == 99) { + abort_unzip(PASS_STATE_ONLY); + } + bb >>= t->b; + k -= t->b; + e -= 16; + bb = fill_bitbuffer(PASS_STATE bb, &k, e); + t = t->v.t + ((unsigned) bb & mask_bits[e]); + e = t->e; + } while (e > 16); + bb >>= t->b; + k -= t->b; + if (e == 16) { /* then it's a literal */ + gunzip_window[w++] = (unsigned char) t->v.n; + if (w == GUNZIP_WSIZE) { + gunzip_outbuf_count = w; + //flush_gunzip_window(); + w = 0; + return 1; // We have a block to read + } + } else { /* it's an EOB or a length */ + /* exit if end of block */ + if (e == 15) { + break; + } + + /* get length of block to copy */ + bb = fill_bitbuffer(PASS_STATE bb, &k, e); + nn = t->v.n + ((unsigned) bb & mask_bits[e]); + bb >>= e; + k -= e; + + /* decode distance of block to copy */ + bb = fill_bitbuffer(PASS_STATE bb, &k, bd); + t = td + ((unsigned) bb & md); + e = t->e; + if (e > 16) + do { + if (e == 99) { + abort_unzip(PASS_STATE_ONLY); + } + bb >>= t->b; + k -= t->b; + e -= 16; + bb = fill_bitbuffer(PASS_STATE bb, &k, e); + t = t->v.t + ((unsigned) bb & mask_bits[e]); + e = t->e; + } while (e > 16); + bb >>= t->b; + k -= t->b; + bb = fill_bitbuffer(PASS_STATE bb, &k, e); + dd = w - t->v.n - ((unsigned) bb & mask_bits[e]); + bb >>= e; + k -= e; + + /* do the copy */ + do_copy: + do { + /* Was: nn -= (e = (e = GUNZIP_WSIZE - ((dd &= GUNZIP_WSIZE - 1) > w ? dd : w)) > nn ? nn : e); */ + /* Who wrote THAT?? rewritten as: */ + unsigned delta; + + dd &= GUNZIP_WSIZE - 1; + e = GUNZIP_WSIZE - (dd > w ? dd : w); + delta = w > dd ? w - dd : dd - w; + if (e > nn) e = nn; + nn -= e; + + /* copy to new buffer to prevent possible overwrite */ + if (delta >= e) { + memcpy(gunzip_window + w, gunzip_window + dd, e); + w += e; + dd += e; + } else { + /* do it slow to avoid memcpy() overlap */ + /* !NOMEMCPY */ + do { + gunzip_window[w++] = gunzip_window[dd++]; + } while (--e); + } + if (w == GUNZIP_WSIZE) { + gunzip_outbuf_count = w; + resume_copy = (nn != 0); + //flush_gunzip_window(); + w = 0; + return 1; + } + } while (nn); + resume_copy = 0; + } + } + + /* restore the globals from the locals */ + gunzip_outbuf_count = w; /* restore global gunzip_window pointer */ + gunzip_bb = bb; /* restore global bit buffer */ + gunzip_bk = k; + + /* normally just after call to inflate_codes, but save code by putting it here */ + /* free the decoding tables (tl and td), return */ + huft_free_all(PASS_STATE_ONLY); + + /* done */ + return 0; +} +#undef ml +#undef md +#undef bb +#undef k +#undef w +#undef tl +#undef td +#undef bl +#undef bd +#undef nn +#undef dd + + +/* called once from inflate_block */ +static void inflate_stored_setup(STATE_PARAM int my_n, int my_b, int my_k) +{ + inflate_stored_n = my_n; + inflate_stored_b = my_b; + inflate_stored_k = my_k; + /* initialize gunzip_window position */ + inflate_stored_w = gunzip_outbuf_count; +} +/* called once from inflate_get_next_window */ +static int inflate_stored(STATE_PARAM_ONLY) +{ + /* read and output the compressed data */ + while (inflate_stored_n--) { + inflate_stored_b = fill_bitbuffer(PASS_STATE inflate_stored_b, &inflate_stored_k, 8); + gunzip_window[inflate_stored_w++] = (unsigned char) inflate_stored_b; + if (inflate_stored_w == GUNZIP_WSIZE) { + gunzip_outbuf_count = inflate_stored_w; + //flush_gunzip_window(); + inflate_stored_w = 0; + inflate_stored_b >>= 8; + inflate_stored_k -= 8; + return 1; /* We have a block */ + } + inflate_stored_b >>= 8; + inflate_stored_k -= 8; + } + + /* restore the globals from the locals */ + gunzip_outbuf_count = inflate_stored_w; /* restore global gunzip_window pointer */ + gunzip_bb = inflate_stored_b; /* restore global bit buffer */ + gunzip_bk = inflate_stored_k; + return 0; /* Finished */ +} + + +/* + * decompress an inflated block + * e: last block flag + * + * GLOBAL VARIABLES: bb, kk, + */ +/* Return values: -1 = inflate_stored, -2 = inflate_codes */ +/* One callsite in inflate_get_next_window */ +static int inflate_block(STATE_PARAM smallint *e) +{ + unsigned ll[286 + 30]; /* literal/length and distance code lengths */ + unsigned t; /* block type */ + unsigned b; /* bit buffer */ + unsigned k; /* number of bits in bit buffer */ + + /* make local bit buffer */ + + b = gunzip_bb; + k = gunzip_bk; + + /* read in last block bit */ + b = fill_bitbuffer(PASS_STATE b, &k, 1); + *e = b & 1; + b >>= 1; + k -= 1; + + /* read in block type */ + b = fill_bitbuffer(PASS_STATE b, &k, 2); + t = (unsigned) b & 3; + b >>= 2; + k -= 2; + + /* restore the global bit buffer */ + gunzip_bb = b; + gunzip_bk = k; + + /* Do we see block type 1 often? Yes! + * TODO: fix performance problem (see below) */ + //bb_error_msg("blktype %d", t); + + /* inflate that block type */ + switch (t) { + case 0: /* Inflate stored */ + { + unsigned n; /* number of bytes in block */ + unsigned b_stored; /* bit buffer */ + unsigned k_stored; /* number of bits in bit buffer */ + + /* make local copies of globals */ + b_stored = gunzip_bb; /* initialize bit buffer */ + k_stored = gunzip_bk; + + /* go to byte boundary */ + n = k_stored & 7; + b_stored >>= n; + k_stored -= n; + + /* get the length and its complement */ + b_stored = fill_bitbuffer(PASS_STATE b_stored, &k_stored, 16); + n = ((unsigned) b_stored & 0xffff); + b_stored >>= 16; + k_stored -= 16; + + b_stored = fill_bitbuffer(PASS_STATE b_stored, &k_stored, 16); + if (n != (unsigned) ((~b_stored) & 0xffff)) { + abort_unzip(PASS_STATE_ONLY); /* error in compressed data */ + } + b_stored >>= 16; + k_stored -= 16; + + inflate_stored_setup(PASS_STATE n, b_stored, k_stored); + + return -1; + } + case 1: + /* Inflate fixed + * decompress an inflated type 1 (fixed Huffman codes) block. We should + * either replace this with a custom decoder, or at least precompute the + * Huffman tables. TODO */ + { + int i; /* temporary variable */ + unsigned bl; /* lookup bits for tl */ + unsigned bd; /* lookup bits for td */ + /* gcc 4.2.1 is too dumb to reuse stackspace. Moved up... */ + //unsigned ll[288]; /* length list for huft_build */ + + /* set up literal table */ + for (i = 0; i < 144; i++) + ll[i] = 8; + for (; i < 256; i++) + ll[i] = 9; + for (; i < 280; i++) + ll[i] = 7; + for (; i < 288; i++) /* make a complete, but wrong code set */ + ll[i] = 8; + bl = 7; + huft_build(ll, 288, 257, cplens, cplext, &inflate_codes_tl, &bl); + /* huft_build() never return nonzero - we use known data */ + + /* set up distance table */ + for (i = 0; i < 30; i++) /* make an incomplete code set */ + ll[i] = 5; + bd = 5; + huft_build(ll, 30, 0, cpdist, cpdext, &inflate_codes_td, &bd); + + /* set up data for inflate_codes() */ + inflate_codes_setup(PASS_STATE bl, bd); + + /* huft_free code moved into inflate_codes */ + + return -2; + } + case 2: /* Inflate dynamic */ + { + enum { dbits = 6 }; /* bits in base distance lookup table */ + enum { lbits = 9 }; /* bits in base literal/length lookup table */ + + huft_t *td; /* distance code table */ + unsigned i; /* temporary variables */ + unsigned j; + unsigned l; /* last length */ + unsigned m; /* mask for bit lengths table */ + unsigned n; /* number of lengths to get */ + unsigned bl; /* lookup bits for tl */ + unsigned bd; /* lookup bits for td */ + unsigned nb; /* number of bit length codes */ + unsigned nl; /* number of literal/length codes */ + unsigned nd; /* number of distance codes */ + + //unsigned ll[286 + 30];/* literal/length and distance code lengths */ + unsigned b_dynamic; /* bit buffer */ + unsigned k_dynamic; /* number of bits in bit buffer */ + + /* make local bit buffer */ + b_dynamic = gunzip_bb; + k_dynamic = gunzip_bk; + + /* read in table lengths */ + b_dynamic = fill_bitbuffer(PASS_STATE b_dynamic, &k_dynamic, 5); + nl = 257 + ((unsigned) b_dynamic & 0x1f); /* number of literal/length codes */ + + b_dynamic >>= 5; + k_dynamic -= 5; + b_dynamic = fill_bitbuffer(PASS_STATE b_dynamic, &k_dynamic, 5); + nd = 1 + ((unsigned) b_dynamic & 0x1f); /* number of distance codes */ + + b_dynamic >>= 5; + k_dynamic -= 5; + b_dynamic = fill_bitbuffer(PASS_STATE b_dynamic, &k_dynamic, 4); + nb = 4 + ((unsigned) b_dynamic & 0xf); /* number of bit length codes */ + + b_dynamic >>= 4; + k_dynamic -= 4; + if (nl > 286 || nd > 30) { + abort_unzip(PASS_STATE_ONLY); /* bad lengths */ + } + + /* read in bit-length-code lengths */ + for (j = 0; j < nb; j++) { + b_dynamic = fill_bitbuffer(PASS_STATE b_dynamic, &k_dynamic, 3); + ll[border[j]] = (unsigned) b_dynamic & 7; + b_dynamic >>= 3; + k_dynamic -= 3; + } + for (; j < 19; j++) + ll[border[j]] = 0; + + /* build decoding table for trees - single level, 7 bit lookup */ + bl = 7; + i = huft_build(ll, 19, 19, NULL, NULL, &inflate_codes_tl, &bl); + if (i != 0) { + abort_unzip(PASS_STATE_ONLY); //return i; /* incomplete code set */ + } + + /* read in literal and distance code lengths */ + n = nl + nd; + m = mask_bits[bl]; + i = l = 0; + while ((unsigned) i < n) { + b_dynamic = fill_bitbuffer(PASS_STATE b_dynamic, &k_dynamic, (unsigned)bl); + td = inflate_codes_tl + ((unsigned) b_dynamic & m); + j = td->b; + b_dynamic >>= j; + k_dynamic -= j; + j = td->v.n; + if (j < 16) { /* length of code in bits (0..15) */ + ll[i++] = l = j; /* save last length in l */ + } else if (j == 16) { /* repeat last length 3 to 6 times */ + b_dynamic = fill_bitbuffer(PASS_STATE b_dynamic, &k_dynamic, 2); + j = 3 + ((unsigned) b_dynamic & 3); + b_dynamic >>= 2; + k_dynamic -= 2; + if ((unsigned) i + j > n) { + abort_unzip(PASS_STATE_ONLY); //return 1; + } + while (j--) { + ll[i++] = l; + } + } else if (j == 17) { /* 3 to 10 zero length codes */ + b_dynamic = fill_bitbuffer(PASS_STATE b_dynamic, &k_dynamic, 3); + j = 3 + ((unsigned) b_dynamic & 7); + b_dynamic >>= 3; + k_dynamic -= 3; + if ((unsigned) i + j > n) { + abort_unzip(PASS_STATE_ONLY); //return 1; + } + while (j--) { + ll[i++] = 0; + } + l = 0; + } else { /* j == 18: 11 to 138 zero length codes */ + b_dynamic = fill_bitbuffer(PASS_STATE b_dynamic, &k_dynamic, 7); + j = 11 + ((unsigned) b_dynamic & 0x7f); + b_dynamic >>= 7; + k_dynamic -= 7; + if ((unsigned) i + j > n) { + abort_unzip(PASS_STATE_ONLY); //return 1; + } + while (j--) { + ll[i++] = 0; + } + l = 0; + } + } + + /* free decoding table for trees */ + huft_free(inflate_codes_tl); + + /* restore the global bit buffer */ + gunzip_bb = b_dynamic; + gunzip_bk = k_dynamic; + + /* build the decoding tables for literal/length and distance codes */ + bl = lbits; + + i = huft_build(ll, nl, 257, cplens, cplext, &inflate_codes_tl, &bl); + if (i != 0) { + abort_unzip(PASS_STATE_ONLY); + } + bd = dbits; + i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &inflate_codes_td, &bd); + if (i != 0) { + abort_unzip(PASS_STATE_ONLY); + } + + /* set up data for inflate_codes() */ + inflate_codes_setup(PASS_STATE bl, bd); + + /* huft_free code moved into inflate_codes */ + + return -2; + } + default: + abort_unzip(PASS_STATE_ONLY); + } +} + +/* Two callsites, both in inflate_get_next_window */ +static void calculate_gunzip_crc(STATE_PARAM_ONLY) +{ + gunzip_crc = crc32_block_endian0(gunzip_crc, gunzip_window, gunzip_outbuf_count, gunzip_crc_table); + gunzip_bytes_out += gunzip_outbuf_count; +} + +/* One callsite in inflate_unzip_internal */ +static int inflate_get_next_window(STATE_PARAM_ONLY) +{ + gunzip_outbuf_count = 0; + + while (1) { + int ret; + + if (need_another_block) { + if (end_reached) { + calculate_gunzip_crc(PASS_STATE_ONLY); + end_reached = 0; + /* NB: need_another_block is still set */ + return 0; /* Last block */ + } + method = inflate_block(PASS_STATE &end_reached); + need_another_block = 0; + } + + switch (method) { + case -1: + ret = inflate_stored(PASS_STATE_ONLY); + break; + case -2: + ret = inflate_codes(PASS_STATE_ONLY); + break; + default: /* cannot happen */ + abort_unzip(PASS_STATE_ONLY); + } + + if (ret == 1) { + calculate_gunzip_crc(PASS_STATE_ONLY); + return 1; /* more data left */ + } + need_another_block = 1; /* end of that block */ + } + /* Doesnt get here */ +} + + +/* Called from unpack_gz_stream() and inflate_unzip() */ +static IF_DESKTOP(long long) int +inflate_unzip_internal(STATE_PARAM transformer_state_t *xstate) +{ + IF_DESKTOP(long long) int n = 0; + ssize_t nwrote; + + /* Allocate all global buffers (for DYN_ALLOC option) */ + gunzip_window = xmalloc(GUNZIP_WSIZE); + gunzip_outbuf_count = 0; + gunzip_bytes_out = 0; + gunzip_src_fd = xstate->src_fd; + + /* (re) initialize state */ + method = -1; + need_another_block = 1; + resume_copy = 0; + gunzip_bk = 0; + gunzip_bb = 0; + + /* Create the crc table */ + gunzip_crc_table = crc32_filltable(NULL, 0); + gunzip_crc = ~0; + + error_msg = "corrupted data"; + if (setjmp(error_jmp)) { + /* Error from deep inside zip machinery */ + bb_error_msg(error_msg); + n = -1; + goto ret; + } + + while (1) { + int r = inflate_get_next_window(PASS_STATE_ONLY); + nwrote = transformer_write(xstate, gunzip_window, gunzip_outbuf_count); + if (nwrote == (ssize_t)-1) { + n = -1; + goto ret; + } + IF_DESKTOP(n += nwrote;) + if (r == 0) break; + } + + /* Store unused bytes in a global buffer so calling applets can access it */ + if (gunzip_bk >= 8) { + /* Undo too much lookahead. The next read will be byte aligned + * so we can discard unused bits in the last meaningful byte. */ + bytebuffer_offset--; + bytebuffer[bytebuffer_offset] = gunzip_bb & 0xff; + gunzip_bb >>= 8; + gunzip_bk -= 8; + } + ret: + /* Cleanup */ + free(gunzip_window); + free(gunzip_crc_table); + return n; +} + + +/* External entry points */ + +/* For unzip */ + +IF_DESKTOP(long long) int FAST_FUNC +inflate_unzip(transformer_state_t *xstate) +{ + IF_DESKTOP(long long) int n; + DECLARE_STATE; + + ALLOC_STATE; + + to_read = xstate->bytes_in; +// bytebuffer_max = 0x8000; + bytebuffer_offset = 4; + bytebuffer = xmalloc(bytebuffer_max); + n = inflate_unzip_internal(PASS_STATE xstate); + free(bytebuffer); + + xstate->crc32 = gunzip_crc; + xstate->bytes_out = gunzip_bytes_out; + DEALLOC_STATE; + return n; +} + + +/* For gunzip */ + +/* helpers first */ + +/* Top up the input buffer with at least n bytes. */ +static int top_up(STATE_PARAM unsigned n) +{ + int count = bytebuffer_size - bytebuffer_offset; + + if (count < (int)n) { + memmove(bytebuffer, &bytebuffer[bytebuffer_offset], count); + bytebuffer_offset = 0; + bytebuffer_size = full_read(gunzip_src_fd, &bytebuffer[count], bytebuffer_max - count); + if ((int)bytebuffer_size < 0) { + bb_error_msg(bb_msg_read_error); + return 0; + } + bytebuffer_size += count; + if (bytebuffer_size < n) + return 0; + } + return 1; +} + +static uint16_t buffer_read_le_u16(STATE_PARAM_ONLY) +{ + uint16_t res; +#if BB_LITTLE_ENDIAN + move_from_unaligned16(res, &bytebuffer[bytebuffer_offset]); +#else + res = bytebuffer[bytebuffer_offset]; + res |= bytebuffer[bytebuffer_offset + 1] << 8; +#endif + bytebuffer_offset += 2; + return res; +} + +static uint32_t buffer_read_le_u32(STATE_PARAM_ONLY) +{ + uint32_t res; +#if BB_LITTLE_ENDIAN + move_from_unaligned32(res, &bytebuffer[bytebuffer_offset]); +#else + res = bytebuffer[bytebuffer_offset]; + res |= bytebuffer[bytebuffer_offset + 1] << 8; + res |= bytebuffer[bytebuffer_offset + 2] << 16; + res |= bytebuffer[bytebuffer_offset + 3] << 24; +#endif + bytebuffer_offset += 4; + return res; +} + +static int check_header_gzip(STATE_PARAM transformer_state_t *xstate) +{ + union { + unsigned char raw[8]; + struct { + uint8_t gz_method; + uint8_t flags; + uint32_t mtime; + uint8_t xtra_flags_UNUSED; + uint8_t os_flags_UNUSED; + } PACKED formatted; + } header; + + BUILD_BUG_ON(sizeof(header) != 8); + + /* + * Rewind bytebuffer. We use the beginning because the header has 8 + * bytes, leaving enough for unwinding afterwards. + */ + bytebuffer_size -= bytebuffer_offset; + memmove(bytebuffer, &bytebuffer[bytebuffer_offset], bytebuffer_size); + bytebuffer_offset = 0; + + if (!top_up(PASS_STATE 8)) + return 0; + memcpy(header.raw, &bytebuffer[bytebuffer_offset], 8); + bytebuffer_offset += 8; + + /* Check the compression method */ + if (header.formatted.gz_method != 8) { + return 0; + } + + if (header.formatted.flags & 0x04) { + /* bit 2 set: extra field present */ + unsigned extra_short; + + if (!top_up(PASS_STATE 2)) + return 0; + extra_short = buffer_read_le_u16(PASS_STATE_ONLY); + if (!top_up(PASS_STATE extra_short)) + return 0; + /* Ignore extra field */ + bytebuffer_offset += extra_short; + } + + /* Discard original name and file comment if any */ + /* bit 3 set: original file name present */ + /* bit 4 set: file comment present */ + if (header.formatted.flags & 0x18) { + while (1) { + do { + if (!top_up(PASS_STATE 1)) + return 0; + } while (bytebuffer[bytebuffer_offset++] != 0); + if ((header.formatted.flags & 0x18) != 0x18) + break; + header.formatted.flags &= ~0x18; + } + } + + xstate->mtime = SWAP_LE32(header.formatted.mtime); + + /* Read the header checksum */ + if (header.formatted.flags & 0x02) { + if (!top_up(PASS_STATE 2)) + return 0; + bytebuffer_offset += 2; + } + return 1; +} + +IF_DESKTOP(long long) int FAST_FUNC +unpack_gz_stream(transformer_state_t *xstate) +{ + uint32_t v32; + IF_DESKTOP(long long) int total, n; + DECLARE_STATE; + +#if !ENABLE_FEATURE_SEAMLESS_Z + if (check_signature16(xstate, GZIP_MAGIC)) + return -1; +#else + if (!xstate->signature_skipped) { + uint16_t magic2; + + if (full_read(xstate->src_fd, &magic2, 2) != 2) { + bad_magic: + bb_error_msg("invalid magic"); + return -1; + } + if (magic2 == COMPRESS_MAGIC) { + xstate->signature_skipped = 2; + return unpack_Z_stream(xstate); + } + if (magic2 != GZIP_MAGIC) + goto bad_magic; + } +#endif + + total = 0; + + ALLOC_STATE; + to_read = -1; +// bytebuffer_max = 0x8000; + bytebuffer = xmalloc(bytebuffer_max); + gunzip_src_fd = xstate->src_fd; + + again: + if (!check_header_gzip(PASS_STATE xstate)) { + bb_error_msg("corrupted data"); + total = -1; + goto ret; + } + + n = inflate_unzip_internal(PASS_STATE xstate); + if (n < 0) { + total = -1; + goto ret; + } + total += n; + + if (!top_up(PASS_STATE 8)) { + bb_error_msg("corrupted data"); + total = -1; + goto ret; + } + + /* Validate decompression - crc */ + v32 = buffer_read_le_u32(PASS_STATE_ONLY); + if ((~gunzip_crc) != v32) { + bb_error_msg("crc error"); + total = -1; + goto ret; + } + + /* Validate decompression - size */ + v32 = buffer_read_le_u32(PASS_STATE_ONLY); + if ((uint32_t)gunzip_bytes_out != v32) { + bb_error_msg("incorrect length"); + total = -1; + } + + if (!top_up(PASS_STATE 2)) + goto ret; /* EOF */ + + if (bytebuffer[bytebuffer_offset] == 0x1f + && bytebuffer[bytebuffer_offset + 1] == 0x8b + ) { + bytebuffer_offset += 2; + goto again; + } + /* GNU gzip says: */ + /*bb_error_msg("decompression OK, trailing garbage ignored");*/ + + ret: + free(bytebuffer); + DEALLOC_STATE; + return total; +} diff --git a/probe-busybox/archival/libarchive/decompress_uncompress.c b/probe-busybox/archival/libarchive/decompress_uncompress.c new file mode 100644 index 00000000..034ed502 --- /dev/null +++ b/probe-busybox/archival/libarchive/decompress_uncompress.c @@ -0,0 +1,314 @@ +/* vi: set sw=4 ts=4: */ +/* uncompress for busybox -- (c) 2002 Robert Griebl + * + * based on the original compress42.c source + * (see disclaimer below) + */ + +/* (N)compress42.c - File compression ala IEEE Computer, Mar 1992. + * + * Authors: + * Spencer W. Thomas (decvax!harpo!utah-cs!utah-gr!thomas) + * Jim McKie (decvax!mcvax!jim) + * Steve Davies (decvax!vax135!petsd!peora!srd) + * Ken Turkowski (decvax!decwrl!turtlevax!ken) + * James A. Woods (decvax!ihnp4!ames!jaw) + * Joe Orost (decvax!vax135!petsd!joe) + * Dave Mack (csu@alembic.acs.com) + * Peter Jannesen, Network Communication Systems + * (peter@ncs.nl) + * + * marc@suse.de : a small security fix for a buffer overflow + * + * [... History snipped ...] + * + */ + +#include "libbb.h" +#include "bb_archive.h" + + +/* Default input buffer size */ +#define IBUFSIZ 2048 + +/* Default output buffer size */ +#define OBUFSIZ 2048 + +/* Defines for third byte of header */ +#define BIT_MASK 0x1f /* Mask for 'number of compresssion bits' */ + /* Masks 0x20 and 0x40 are free. */ + /* I think 0x20 should mean that there is */ + /* a fourth header byte (for expansion). */ +#define BLOCK_MODE 0x80 /* Block compression if table is full and */ + /* compression rate is dropping flush tables */ + /* the next two codes should not be changed lightly, as they must not */ + /* lie within the contiguous general code space. */ +#define FIRST 257 /* first free entry */ +#define CLEAR 256 /* table clear output code */ + +#define INIT_BITS 9 /* initial number of bits/code */ + + +/* machine variants which require cc -Dmachine: pdp11, z8000, DOS */ +#define HBITS 17 /* 50% occupancy */ +#define HSIZE (1<src_fd, inbuf, 1) != 1) { + bb_error_msg("short read"); + goto err; + } + + maxbits = inbuf[0] & BIT_MASK; + block_mode = inbuf[0] & BLOCK_MODE; + maxmaxcode = MAXCODE(maxbits); + + if (maxbits > BITS) { + bb_error_msg("compressed with %d bits, can only handle " + BITS_STR" bits", maxbits); + goto err; + } + + n_bits = INIT_BITS; + maxcode = MAXCODE(INIT_BITS) - 1; + bitmask = (1 << INIT_BITS) - 1; + oldcode = -1; + finchar = 0; + outpos = 0; + posbits = 0 << 3; + + free_ent = ((block_mode) ? FIRST : 256); + + /* As above, initialize the first 256 entries in the table. */ + /*clear_tab_prefixof(); - done by xzalloc */ + + { + int i; + for (i = 255; i >= 0; --i) + tab_suffixof(i) = (unsigned char) i; + } + + do { + resetbuf: + { + int i; + int e; + int o; + + o = posbits >> 3; + e = insize - o; + + for (i = 0; i < e; ++i) + inbuf[i] = inbuf[i + o]; + + insize = e; + posbits = 0; + } + + if (insize < (int) (IBUFSIZ + 64) - IBUFSIZ) { + rsize = safe_read(xstate->src_fd, inbuf + insize, IBUFSIZ); + if (rsize < 0) + bb_error_msg_and_die(bb_msg_read_error); + insize += rsize; + } + + inbits = ((rsize > 0) ? (insize - insize % n_bits) << 3 : + (insize << 3) - (n_bits - 1)); + + while (inbits > posbits) { + long code; + + if (free_ent > maxcode) { + posbits = + ((posbits - 1) + + ((n_bits << 3) - + (posbits - 1 + (n_bits << 3)) % (n_bits << 3))); + ++n_bits; + if (n_bits == maxbits) { + maxcode = maxmaxcode; + } else { + maxcode = MAXCODE(n_bits) - 1; + } + bitmask = (1 << n_bits) - 1; + goto resetbuf; + } + { + unsigned char *p = &inbuf[posbits >> 3]; + code = ((p[0] + | ((long) (p[1]) << 8) + | ((long) (p[2]) << 16)) >> (posbits & 0x7)) & bitmask; + } + posbits += n_bits; + + if (oldcode == -1) { + if (code >= 256) + bb_error_msg_and_die("corrupted data"); /* %ld", code); */ + oldcode = code; + finchar = (int) oldcode; + outbuf[outpos++] = (unsigned char) finchar; + continue; + } + + if (code == CLEAR && block_mode) { + clear_tab_prefixof(); + free_ent = FIRST - 1; + posbits = + ((posbits - 1) + + ((n_bits << 3) - + (posbits - 1 + (n_bits << 3)) % (n_bits << 3))); + n_bits = INIT_BITS; + maxcode = MAXCODE(INIT_BITS) - 1; + bitmask = (1 << INIT_BITS) - 1; + goto resetbuf; + } + + incode = code; + stackp = de_stack; + + /* Special case for KwKwK string. */ + if (code >= free_ent) { + if (code > free_ent) { +/* + unsigned char *p; + + posbits -= n_bits; + p = &inbuf[posbits >> 3]; + bb_error_msg + ("insize:%d posbits:%d inbuf:%02X %02X %02X %02X %02X (%d)", + insize, posbits, p[-1], p[0], p[1], p[2], p[3], + (posbits & 07)); +*/ + bb_error_msg("corrupted data"); + goto err; + } + + *--stackp = (unsigned char) finchar; + code = oldcode; + } + + /* Generate output characters in reverse order */ + while (code >= 256) { + if (stackp <= &htabof(0)) + bb_error_msg_and_die("corrupted data"); + *--stackp = tab_suffixof(code); + code = tab_prefixof(code); + } + + finchar = tab_suffixof(code); + *--stackp = (unsigned char) finchar; + + /* And put them out in forward order */ + { + int i; + + i = de_stack - stackp; + if (outpos + i >= OBUFSIZ) { + do { + if (i > OBUFSIZ - outpos) { + i = OBUFSIZ - outpos; + } + + if (i > 0) { + memcpy(outbuf + outpos, stackp, i); + outpos += i; + } + + if (outpos >= OBUFSIZ) { + xtransformer_write(xstate, outbuf, outpos); + IF_DESKTOP(total_written += outpos;) + outpos = 0; + } + stackp += i; + i = de_stack - stackp; + } while (i > 0); + } else { + memcpy(outbuf + outpos, stackp, i); + outpos += i; + } + } + + /* Generate the new entry. */ + if (free_ent < maxmaxcode) { + tab_prefixof(free_ent) = (unsigned short) oldcode; + tab_suffixof(free_ent) = (unsigned char) finchar; + free_ent++; + } + + /* Remember previous code. */ + oldcode = incode; + } + } while (rsize > 0); + + if (outpos > 0) { + xtransformer_write(xstate, outbuf, outpos); + IF_DESKTOP(total_written += outpos;) + } + + retval = IF_DESKTOP(total_written) + 0; + err: + free(inbuf); + free(outbuf); + free(htab); + free(codetab); + return retval; +} diff --git a/probe-busybox/archival/libarchive/decompress_unlzma.c b/probe-busybox/archival/libarchive/decompress_unlzma.c new file mode 100644 index 00000000..c8622f97 --- /dev/null +++ b/probe-busybox/archival/libarchive/decompress_unlzma.c @@ -0,0 +1,467 @@ +/* vi: set sw=4 ts=4: */ +/* + * Small lzma deflate implementation. + * Copyright (C) 2006 Aurelien Jacobs + * + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) + * Copyright (C) 1999-2005 Igor Pavlov + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#include "libbb.h" +#include "bb_archive.h" + +#if ENABLE_FEATURE_LZMA_FAST +# define speed_inline ALWAYS_INLINE +# define size_inline +#else +# define speed_inline +# define size_inline ALWAYS_INLINE +#endif + + +typedef struct { + int fd; + uint8_t *ptr; + +/* Was keeping rc on stack in unlzma and separately allocating buffer, + * but with "buffer 'attached to' allocated rc" code is smaller: */ + /* uint8_t *buffer; */ +#define RC_BUFFER ((uint8_t*)(rc+1)) + + uint8_t *buffer_end; + +/* Had provisions for variable buffer, but we don't need it here */ + /* int buffer_size; */ +#define RC_BUFFER_SIZE 0x10000 + + uint32_t code; + uint32_t range; + uint32_t bound; +} rc_t; + +#define RC_TOP_BITS 24 +#define RC_MOVE_BITS 5 +#define RC_MODEL_TOTAL_BITS 11 + + +/* Called once in rc_do_normalize() */ +static void rc_read(rc_t *rc) +{ + int buffer_size = safe_read(rc->fd, RC_BUFFER, RC_BUFFER_SIZE); +//TODO: return -1 instead +//This will make unlzma delete broken unpacked file on unpack errors + if (buffer_size <= 0) + bb_error_msg_and_die("unexpected EOF"); + rc->buffer_end = RC_BUFFER + buffer_size; + rc->ptr = RC_BUFFER; +} + +/* Called twice, but one callsite is in speed_inline'd rc_is_bit_1() */ +static void rc_do_normalize(rc_t *rc) +{ + if (rc->ptr >= rc->buffer_end) + rc_read(rc); + rc->range <<= 8; + rc->code = (rc->code << 8) | *rc->ptr++; +} +static ALWAYS_INLINE void rc_normalize(rc_t *rc) +{ + if (rc->range < (1 << RC_TOP_BITS)) { + rc_do_normalize(rc); + } +} + +/* Called once */ +static ALWAYS_INLINE rc_t* rc_init(int fd) /*, int buffer_size) */ +{ + int i; + rc_t *rc; + + rc = xzalloc(sizeof(*rc) + RC_BUFFER_SIZE); + + rc->fd = fd; + /* rc->ptr = rc->buffer_end; */ + + for (i = 0; i < 5; i++) { + rc_do_normalize(rc); + } + rc->range = 0xffffffff; + return rc; +} + +/* Called once */ +static ALWAYS_INLINE void rc_free(rc_t *rc) +{ + free(rc); +} + +/* rc_is_bit_1 is called 9 times */ +static speed_inline int rc_is_bit_1(rc_t *rc, uint16_t *p) +{ + rc_normalize(rc); + rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); + if (rc->code < rc->bound) { + rc->range = rc->bound; + *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; + return 0; + } + rc->range -= rc->bound; + rc->code -= rc->bound; + *p -= *p >> RC_MOVE_BITS; + return 1; +} + +/* Called 4 times in unlzma loop */ +static ALWAYS_INLINE int rc_get_bit(rc_t *rc, uint16_t *p, int *symbol) +{ + int ret = rc_is_bit_1(rc, p); + *symbol = *symbol * 2 + ret; + return ret; +} + +/* Called once */ +static ALWAYS_INLINE int rc_direct_bit(rc_t *rc) +{ + rc_normalize(rc); + rc->range >>= 1; + if (rc->code >= rc->range) { + rc->code -= rc->range; + return 1; + } + return 0; +} + +/* Called twice */ +static speed_inline void +rc_bit_tree_decode(rc_t *rc, uint16_t *p, int num_levels, int *symbol) +{ + int i = num_levels; + + *symbol = 1; + while (i--) + rc_get_bit(rc, p + *symbol, symbol); + *symbol -= 1 << num_levels; +} + + +typedef struct { + uint8_t pos; + uint32_t dict_size; + uint64_t dst_size; +} PACKED lzma_header_t; + + +/* #defines will force compiler to compute/optimize each one with each usage. + * Have heart and use enum instead. */ +enum { + LZMA_BASE_SIZE = 1846, + LZMA_LIT_SIZE = 768, + + LZMA_NUM_POS_BITS_MAX = 4, + + LZMA_LEN_NUM_LOW_BITS = 3, + LZMA_LEN_NUM_MID_BITS = 3, + LZMA_LEN_NUM_HIGH_BITS = 8, + + LZMA_LEN_CHOICE = 0, + LZMA_LEN_CHOICE_2 = (LZMA_LEN_CHOICE + 1), + LZMA_LEN_LOW = (LZMA_LEN_CHOICE_2 + 1), + LZMA_LEN_MID = (LZMA_LEN_LOW \ + + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))), + LZMA_LEN_HIGH = (LZMA_LEN_MID \ + + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))), + LZMA_NUM_LEN_PROBS = (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS)), + + LZMA_NUM_STATES = 12, + LZMA_NUM_LIT_STATES = 7, + + LZMA_START_POS_MODEL_INDEX = 4, + LZMA_END_POS_MODEL_INDEX = 14, + LZMA_NUM_FULL_DISTANCES = (1 << (LZMA_END_POS_MODEL_INDEX >> 1)), + + LZMA_NUM_POS_SLOT_BITS = 6, + LZMA_NUM_LEN_TO_POS_STATES = 4, + + LZMA_NUM_ALIGN_BITS = 4, + + LZMA_MATCH_MIN_LEN = 2, + + LZMA_IS_MATCH = 0, + LZMA_IS_REP = (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)), + LZMA_IS_REP_G0 = (LZMA_IS_REP + LZMA_NUM_STATES), + LZMA_IS_REP_G1 = (LZMA_IS_REP_G0 + LZMA_NUM_STATES), + LZMA_IS_REP_G2 = (LZMA_IS_REP_G1 + LZMA_NUM_STATES), + LZMA_IS_REP_0_LONG = (LZMA_IS_REP_G2 + LZMA_NUM_STATES), + LZMA_POS_SLOT = (LZMA_IS_REP_0_LONG \ + + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)), + LZMA_SPEC_POS = (LZMA_POS_SLOT \ + + (LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS)), + LZMA_ALIGN = (LZMA_SPEC_POS \ + + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX), + LZMA_LEN_CODER = (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS)), + LZMA_REP_LEN_CODER = (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS), + LZMA_LITERAL = (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS), +}; + + +IF_DESKTOP(long long) int FAST_FUNC +unpack_lzma_stream(transformer_state_t *xstate) +{ + IF_DESKTOP(long long total_written = 0;) + lzma_header_t header; + int lc, pb, lp; + uint32_t pos_state_mask; + uint32_t literal_pos_mask; + uint16_t *p; + rc_t *rc; + int i; + uint8_t *buffer; + uint8_t previous_byte = 0; + size_t buffer_pos = 0, global_pos = 0; + int len = 0; + int state = 0; + uint32_t rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1; + + if (full_read(xstate->src_fd, &header, sizeof(header)) != sizeof(header) + || header.pos >= (9 * 5 * 5) + ) { + bb_error_msg("bad lzma header"); + return -1; + } + + i = header.pos / 9; + lc = header.pos % 9; + pb = i / 5; + lp = i % 5; + pos_state_mask = (1 << pb) - 1; + literal_pos_mask = (1 << lp) - 1; + + /* Example values from linux-3.3.4.tar.lzma: + * dict_size: 64M, dst_size: 2^64-1 + */ + header.dict_size = SWAP_LE32(header.dict_size); + header.dst_size = SWAP_LE64(header.dst_size); + + if (header.dict_size == 0) + header.dict_size++; + + buffer = xmalloc(MIN(header.dst_size, header.dict_size)); + + { + int num_probs; + + num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); + p = xmalloc(num_probs * sizeof(*p)); + num_probs += LZMA_LITERAL - LZMA_BASE_SIZE; + for (i = 0; i < num_probs; i++) + p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1; + } + + rc = rc_init(xstate->src_fd); /*, RC_BUFFER_SIZE); */ + + while (global_pos + buffer_pos < header.dst_size) { + int pos_state = (buffer_pos + global_pos) & pos_state_mask; + uint16_t *prob = p + LZMA_IS_MATCH + (state << LZMA_NUM_POS_BITS_MAX) + pos_state; + + if (!rc_is_bit_1(rc, prob)) { + static const char next_state[LZMA_NUM_STATES] = + { 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5 }; + int mi = 1; + + prob = (p + LZMA_LITERAL + + (LZMA_LIT_SIZE * ((((buffer_pos + global_pos) & literal_pos_mask) << lc) + + (previous_byte >> (8 - lc)) + ) + ) + ); + + if (state >= LZMA_NUM_LIT_STATES) { + int match_byte; + uint32_t pos = buffer_pos - rep0; + + while (pos >= header.dict_size) + pos += header.dict_size; + match_byte = buffer[pos]; + do { + int bit; + + match_byte <<= 1; + bit = match_byte & 0x100; + bit ^= (rc_get_bit(rc, prob + 0x100 + bit + mi, &mi) << 8); /* 0x100 or 0 */ + if (bit) + break; + } while (mi < 0x100); + } + while (mi < 0x100) { + rc_get_bit(rc, prob + mi, &mi); + } + + state = next_state[state]; + + previous_byte = (uint8_t) mi; +#if ENABLE_FEATURE_LZMA_FAST + one_byte1: + buffer[buffer_pos++] = previous_byte; + if (buffer_pos == header.dict_size) { + buffer_pos = 0; + global_pos += header.dict_size; + if (transformer_write(xstate, buffer, header.dict_size) != (ssize_t)header.dict_size) + goto bad; + IF_DESKTOP(total_written += header.dict_size;) + } +#else + len = 1; + goto one_byte2; +#endif + } else { + int num_bits; + int offset; + uint16_t *prob2; +#define prob_len prob2 + + prob2 = p + LZMA_IS_REP + state; + if (!rc_is_bit_1(rc, prob2)) { + rep3 = rep2; + rep2 = rep1; + rep1 = rep0; + state = state < LZMA_NUM_LIT_STATES ? 0 : 3; + prob2 = p + LZMA_LEN_CODER; + } else { + prob2 += LZMA_IS_REP_G0 - LZMA_IS_REP; + if (!rc_is_bit_1(rc, prob2)) { + prob2 = (p + LZMA_IS_REP_0_LONG + + (state << LZMA_NUM_POS_BITS_MAX) + + pos_state + ); + if (!rc_is_bit_1(rc, prob2)) { +#if ENABLE_FEATURE_LZMA_FAST + uint32_t pos = buffer_pos - rep0; + state = state < LZMA_NUM_LIT_STATES ? 9 : 11; + while (pos >= header.dict_size) + pos += header.dict_size; + previous_byte = buffer[pos]; + goto one_byte1; +#else + state = state < LZMA_NUM_LIT_STATES ? 9 : 11; + len = 1; + goto string; +#endif + } + } else { + uint32_t distance; + + prob2 += LZMA_IS_REP_G1 - LZMA_IS_REP_G0; + distance = rep1; + if (rc_is_bit_1(rc, prob2)) { + prob2 += LZMA_IS_REP_G2 - LZMA_IS_REP_G1; + distance = rep2; + if (rc_is_bit_1(rc, prob2)) { + distance = rep3; + rep3 = rep2; + } + rep2 = rep1; + } + rep1 = rep0; + rep0 = distance; + } + state = state < LZMA_NUM_LIT_STATES ? 8 : 11; + prob2 = p + LZMA_REP_LEN_CODER; + } + + prob_len = prob2 + LZMA_LEN_CHOICE; + num_bits = LZMA_LEN_NUM_LOW_BITS; + if (!rc_is_bit_1(rc, prob_len)) { + prob_len += LZMA_LEN_LOW - LZMA_LEN_CHOICE + + (pos_state << LZMA_LEN_NUM_LOW_BITS); + offset = 0; + } else { + prob_len += LZMA_LEN_CHOICE_2 - LZMA_LEN_CHOICE; + if (!rc_is_bit_1(rc, prob_len)) { + prob_len += LZMA_LEN_MID - LZMA_LEN_CHOICE_2 + + (pos_state << LZMA_LEN_NUM_MID_BITS); + offset = 1 << LZMA_LEN_NUM_LOW_BITS; + num_bits += LZMA_LEN_NUM_MID_BITS - LZMA_LEN_NUM_LOW_BITS; + } else { + prob_len += LZMA_LEN_HIGH - LZMA_LEN_CHOICE_2; + offset = ((1 << LZMA_LEN_NUM_LOW_BITS) + + (1 << LZMA_LEN_NUM_MID_BITS)); + num_bits += LZMA_LEN_NUM_HIGH_BITS - LZMA_LEN_NUM_LOW_BITS; + } + } + rc_bit_tree_decode(rc, prob_len, num_bits, &len); + len += offset; + + if (state < 4) { + int pos_slot; + uint16_t *prob3; + + state += LZMA_NUM_LIT_STATES; + prob3 = p + LZMA_POS_SLOT + + ((len < LZMA_NUM_LEN_TO_POS_STATES ? len : + LZMA_NUM_LEN_TO_POS_STATES - 1) + << LZMA_NUM_POS_SLOT_BITS); + rc_bit_tree_decode(rc, prob3, + LZMA_NUM_POS_SLOT_BITS, &pos_slot); + rep0 = pos_slot; + if (pos_slot >= LZMA_START_POS_MODEL_INDEX) { + int i2, mi2, num_bits2 = (pos_slot >> 1) - 1; + rep0 = 2 | (pos_slot & 1); + if (pos_slot < LZMA_END_POS_MODEL_INDEX) { + rep0 <<= num_bits2; + prob3 = p + LZMA_SPEC_POS + rep0 - pos_slot - 1; + } else { + for (; num_bits2 != LZMA_NUM_ALIGN_BITS; num_bits2--) + rep0 = (rep0 << 1) | rc_direct_bit(rc); + rep0 <<= LZMA_NUM_ALIGN_BITS; + prob3 = p + LZMA_ALIGN; + } + i2 = 1; + mi2 = 1; + while (num_bits2--) { + if (rc_get_bit(rc, prob3 + mi2, &mi2)) + rep0 |= i2; + i2 <<= 1; + } + } + if (++rep0 == 0) + break; + } + + len += LZMA_MATCH_MIN_LEN; + IF_NOT_FEATURE_LZMA_FAST(string:) + do { + uint32_t pos = buffer_pos - rep0; + while (pos >= header.dict_size) + pos += header.dict_size; + previous_byte = buffer[pos]; + IF_NOT_FEATURE_LZMA_FAST(one_byte2:) + buffer[buffer_pos++] = previous_byte; + if (buffer_pos == header.dict_size) { + buffer_pos = 0; + global_pos += header.dict_size; + if (transformer_write(xstate, buffer, header.dict_size) != (ssize_t)header.dict_size) + goto bad; + IF_DESKTOP(total_written += header.dict_size;) + } + len--; + } while (len != 0 && buffer_pos < header.dst_size); + /* FIXME: ...........^^^^^ + * shouldn't it be "global_pos + buffer_pos < header.dst_size"? + */ + } + } + + { + IF_NOT_DESKTOP(int total_written = 0; /* success */) + IF_DESKTOP(total_written += buffer_pos;) + if (transformer_write(xstate, buffer, buffer_pos) != (ssize_t)buffer_pos) { + bad: + total_written = -1; /* failure */ + } + rc_free(rc); + free(p); + free(buffer); + return total_written; + } +} diff --git a/probe-busybox/archival/libarchive/decompress_unxz.c b/probe-busybox/archival/libarchive/decompress_unxz.c new file mode 100644 index 00000000..cd32cc74 --- /dev/null +++ b/probe-busybox/archival/libarchive/decompress_unxz.c @@ -0,0 +1,131 @@ +/* + * This file uses XZ Embedded library code which is written + * by Lasse Collin + * and Igor Pavlov + * + * See README file in unxz/ directory for more information. + * + * This file is: + * Copyright (C) 2010 Denys Vlasenko + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#include "libbb.h" +#include "bb_archive.h" + +#define XZ_FUNC FAST_FUNC +#define XZ_EXTERN static + +#define XZ_DEC_DYNALLOC + +/* Skip check (rather than fail) of unsupported hash functions */ +#define XZ_DEC_ANY_CHECK 1 + +/* We use our own crc32 function */ +#define XZ_INTERNAL_CRC32 0 +static uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) +{ + return ~crc32_block_endian0(~crc, buf, size, global_crc32_table); +} + +/* We use arch-optimized unaligned accessors */ +#define get_unaligned_le32(buf) ({ uint32_t v; move_from_unaligned32(v, buf); SWAP_LE32(v); }) +#define get_unaligned_be32(buf) ({ uint32_t v; move_from_unaligned32(v, buf); SWAP_BE32(v); }) +#define put_unaligned_le32(val, buf) move_to_unaligned32(buf, SWAP_LE32(val)) +#define put_unaligned_be32(val, buf) move_to_unaligned32(buf, SWAP_BE32(val)) + +#include "unxz/xz_dec_bcj.c" +#include "unxz/xz_dec_lzma2.c" +#include "unxz/xz_dec_stream.c" + +IF_DESKTOP(long long) int FAST_FUNC +unpack_xz_stream(transformer_state_t *xstate) +{ + enum xz_ret xz_result; + struct xz_buf iobuf; + struct xz_dec *state; + unsigned char *membuf; + IF_DESKTOP(long long) int total = 0; + + if (!global_crc32_table) + global_crc32_table = crc32_filltable(NULL, /*endian:*/ 0); + + memset(&iobuf, 0, sizeof(iobuf)); + membuf = xmalloc(2 * BUFSIZ); + iobuf.in = membuf; + iobuf.out = membuf + BUFSIZ; + iobuf.out_size = BUFSIZ; + + if (!xstate || xstate->signature_skipped) { + /* Preload XZ file signature */ + strcpy((char*)membuf, HEADER_MAGIC); + iobuf.in_size = HEADER_MAGIC_SIZE; + } /* else: let xz code read & check it */ + + /* Limit memory usage to about 64 MiB. */ + state = xz_dec_init(XZ_DYNALLOC, 64*1024*1024); + + xz_result = X_OK; + while (1) { + if (iobuf.in_pos == iobuf.in_size) { + int rd = safe_read(xstate->src_fd, membuf, BUFSIZ); + if (rd < 0) { + bb_error_msg(bb_msg_read_error); + total = -1; + break; + } + if (rd == 0 && xz_result == XZ_STREAM_END) + break; + iobuf.in_size = rd; + iobuf.in_pos = 0; + } + if (xz_result == XZ_STREAM_END) { + /* + * Try to start decoding next concatenated stream. + * Stream padding must always be a multiple of four + * bytes to preserve four-byte alignment. To keep the + * code slightly smaller, we aren't as strict here as + * the .xz spec requires. We just skip all zero-bytes + * without checking the alignment and thus can accept + * files that aren't valid, e.g. the XZ utils test + * files bad-0pad-empty.xz and bad-0catpad-empty.xz. + */ + do { + if (membuf[iobuf.in_pos] != 0) { + xz_dec_reset(state); + goto do_run; + } + iobuf.in_pos++; + } while (iobuf.in_pos < iobuf.in_size); + } + do_run: +// bb_error_msg(">in pos:%d size:%d out pos:%d size:%d", +// iobuf.in_pos, iobuf.in_size, iobuf.out_pos, iobuf.out_size); + xz_result = xz_dec_run(state, &iobuf); +// bb_error_msg("file_header->name) + return EXIT_SUCCESS; + return EXIT_FAILURE; +} diff --git a/probe-busybox/archival/libarchive/filter_accept_list.c b/probe-busybox/archival/libarchive/filter_accept_list.c new file mode 100644 index 00000000..a2d4b23e --- /dev/null +++ b/probe-busybox/archival/libarchive/filter_accept_list.c @@ -0,0 +1,19 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright (C) 2002 by Glenn McGrath + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +/* + * Accept names that are in the accept list, ignoring reject list. + */ +char FAST_FUNC filter_accept_list(archive_handle_t *archive_handle) +{ + if (find_list_entry(archive_handle->accept, archive_handle->file_header->name)) + return EXIT_SUCCESS; + return EXIT_FAILURE; +} diff --git a/probe-busybox/archival/libarchive/filter_accept_list_reassign.c b/probe-busybox/archival/libarchive/filter_accept_list_reassign.c new file mode 100644 index 00000000..b9acfbc0 --- /dev/null +++ b/probe-busybox/archival/libarchive/filter_accept_list_reassign.c @@ -0,0 +1,61 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright (C) 2002 by Glenn McGrath + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +/* Built and used only if ENABLE_DPKG || ENABLE_DPKG_DEB */ + +/* + * Reassign the subarchive metadata parser based on the filename extension + * e.g. if its a .tar.gz modify archive_handle->sub_archive to process a .tar.gz + * or if its a .tar.bz2 make archive_handle->sub_archive handle that + */ +char FAST_FUNC filter_accept_list_reassign(archive_handle_t *archive_handle) +{ + /* Check the file entry is in the accept list */ + if (find_list_entry(archive_handle->accept, archive_handle->file_header->name)) { + const char *name_ptr; + + /* Find extension */ + name_ptr = strrchr(archive_handle->file_header->name, '.'); + if (!name_ptr) + return EXIT_FAILURE; + name_ptr++; + + /* Modify the subarchive handler based on the extension */ + if (strcmp(name_ptr, "tar") == 0) { + archive_handle->dpkg__action_data_subarchive = get_header_tar; + return EXIT_SUCCESS; + } + if (ENABLE_FEATURE_SEAMLESS_GZ + && strcmp(name_ptr, "gz") == 0 + ) { + archive_handle->dpkg__action_data_subarchive = get_header_tar_gz; + return EXIT_SUCCESS; + } + if (ENABLE_FEATURE_SEAMLESS_BZ2 + && strcmp(name_ptr, "bz2") == 0 + ) { + archive_handle->dpkg__action_data_subarchive = get_header_tar_bz2; + return EXIT_SUCCESS; + } + if (ENABLE_FEATURE_SEAMLESS_LZMA + && strcmp(name_ptr, "lzma") == 0 + ) { + archive_handle->dpkg__action_data_subarchive = get_header_tar_lzma; + return EXIT_SUCCESS; + } + if (ENABLE_FEATURE_SEAMLESS_XZ + && strcmp(name_ptr, "xz") == 0 + ) { + archive_handle->dpkg__action_data_subarchive = get_header_tar_xz; + return EXIT_SUCCESS; + } + } + return EXIT_FAILURE; +} diff --git a/probe-busybox/archival/libarchive/filter_accept_reject_list.c b/probe-busybox/archival/libarchive/filter_accept_reject_list.c new file mode 100644 index 00000000..24837494 --- /dev/null +++ b/probe-busybox/archival/libarchive/filter_accept_reject_list.c @@ -0,0 +1,38 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright (C) 2002 by Glenn McGrath + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +/* + * Accept names that are in the accept list and not in the reject list + */ +char FAST_FUNC filter_accept_reject_list(archive_handle_t *archive_handle) +{ + const char *key; + const llist_t *reject_entry; + const llist_t *accept_entry; + + key = archive_handle->file_header->name; + + /* If the key is in a reject list fail */ + reject_entry = find_list_entry2(archive_handle->reject, key); + if (reject_entry) { + return EXIT_FAILURE; + } + + /* Fail if an accept list was specified and the key wasnt in there */ + if (archive_handle->accept) { + accept_entry = find_list_entry2(archive_handle->accept, key); + if (!accept_entry) { + return EXIT_FAILURE; + } + } + + /* Accepted */ + return EXIT_SUCCESS; +} diff --git a/probe-busybox/archival/libarchive/find_list_entry.c b/probe-busybox/archival/libarchive/find_list_entry.c new file mode 100644 index 00000000..56032c65 --- /dev/null +++ b/probe-busybox/archival/libarchive/find_list_entry.c @@ -0,0 +1,54 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright (C) 2002 by Glenn McGrath + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include +#include "libbb.h" +#include "bb_archive.h" + +/* Find a string in a shell pattern list */ +const llist_t* FAST_FUNC find_list_entry(const llist_t *list, const char *filename) +{ + while (list) { + if (fnmatch(list->data, filename, 0) == 0) { + return list; + } + list = list->link; + } + return NULL; +} + +/* Same, but compares only path components present in pattern + * (extra trailing path components in filename are assumed to match) + */ +const llist_t* FAST_FUNC find_list_entry2(const llist_t *list, const char *filename) +{ + char buf[PATH_MAX]; + int pattern_slash_cnt; + const char *c; + char *d; + + while (list) { + c = list->data; + pattern_slash_cnt = 0; + while (*c) + if (*c++ == '/') pattern_slash_cnt++; + c = filename; + d = buf; + /* paranoia is better than buffer overflows */ + while (*c && d != buf + sizeof(buf)-1) { + if (*c == '/' && --pattern_slash_cnt < 0) + break; + *d++ = *c++; + } + *d = '\0'; + if (fnmatch(list->data, buf, 0) == 0) { + return list; + } + list = list->link; + } + return NULL; +} diff --git a/probe-busybox/archival/libarchive/get_header_ar.c b/probe-busybox/archival/libarchive/get_header_ar.c new file mode 100644 index 00000000..c66bb3ee --- /dev/null +++ b/probe-busybox/archival/libarchive/get_header_ar.c @@ -0,0 +1,144 @@ +/* vi: set sw=4 ts=4: */ +/* Copyright 2001 Glenn McGrath. + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" +#include "ar.h" + +/* WARNING: Clobbers str[len], so fields must be read in reverse order! */ +static unsigned read_num(char *str, int base, int len) +{ + int err; + + /* ar fields are fixed length text strings (padded with spaces). + * Ensure bb_strtou doesn't read past the field in case the full + * width is used. */ + str[len] = 0; + + /* This code works because + * on misformatted numbers bb_strtou returns all-ones */ + err = bb_strtou(str, NULL, base); + if (err == -1) + bb_error_msg_and_die("invalid ar header"); + return err; +} + +char FAST_FUNC get_header_ar(archive_handle_t *archive_handle) +{ + file_header_t *typed = archive_handle->file_header; + unsigned size; + union { + char raw[60]; + struct ar_header formatted; + } ar; +#if ENABLE_FEATURE_AR_LONG_FILENAMES + static char *ar_long_names; + static unsigned ar_long_name_size; +#endif + + /* dont use xread as we want to handle the error ourself */ + if (read(archive_handle->src_fd, ar.raw, 60) != 60) { + /* End Of File */ + return EXIT_FAILURE; + } + + /* ar header starts on an even byte (2 byte aligned) + * '\n' is used for padding + */ + if (ar.raw[0] == '\n') { + /* fix up the header, we started reading 1 byte too early */ + memmove(ar.raw, &ar.raw[1], 59); + ar.raw[59] = xread_char(archive_handle->src_fd); + archive_handle->offset++; + } + archive_handle->offset += 60; + + if (ar.formatted.magic[0] != '`' || ar.formatted.magic[1] != '\n') + bb_error_msg_and_die("invalid ar header"); + + /* + * Note that the fields MUST be read in reverse order as + * read_num() clobbers the next byte after the field! + * Order is: name, date, uid, gid, mode, size, magic. + */ + typed->size = size = read_num(ar.formatted.size, 10, + sizeof(ar.formatted.size)); + + /* special filenames have '/' as the first character */ + if (ar.formatted.name[0] == '/') { + if (ar.formatted.name[1] == ' ') { + /* This is the index of symbols in the file for compilers */ + data_skip(archive_handle); + archive_handle->offset += size; + return get_header_ar(archive_handle); /* Return next header */ + } +#if ENABLE_FEATURE_AR_LONG_FILENAMES + if (ar.formatted.name[1] == '/') { + /* If the second char is a '/' then this entries data section + * stores long filename for multiple entries, they are stored + * in static variable long_names for use in future entries + */ + ar_long_name_size = size; + free(ar_long_names); + ar_long_names = xmalloc(size); + xread(archive_handle->src_fd, ar_long_names, size); + archive_handle->offset += size; + /* Return next header */ + return get_header_ar(archive_handle); + } +#else + bb_error_msg_and_die("long filenames not supported"); +#endif + } + /* Only size is always present, the rest may be missing in + * long filename pseudo file. Thus we decode the rest + * after dealing with long filename pseudo file. + */ + typed->mode = read_num(ar.formatted.mode, 8, sizeof(ar.formatted.mode)); + typed->gid = read_num(ar.formatted.gid, 10, sizeof(ar.formatted.gid)); + typed->uid = read_num(ar.formatted.uid, 10, sizeof(ar.formatted.uid)); + typed->mtime = read_num(ar.formatted.date, 10, sizeof(ar.formatted.date)); + +#if ENABLE_FEATURE_AR_LONG_FILENAMES + if (ar.formatted.name[0] == '/') { + unsigned long_offset; + + /* The number after the '/' indicates the offset in the ar data section + * (saved in ar_long_names) that conatains the real filename */ + long_offset = read_num(&ar.formatted.name[1], 10, + sizeof(ar.formatted.name) - 1); + if (long_offset >= ar_long_name_size) { + bb_error_msg_and_die("can't resolve long filename"); + } + typed->name = xstrdup(ar_long_names + long_offset); + } else +#endif + { + /* short filenames */ + typed->name = xstrndup(ar.formatted.name, 16); + } + + typed->name[strcspn(typed->name, " /")] = '\0'; + + if (archive_handle->filter(archive_handle) == EXIT_SUCCESS) { + archive_handle->action_header(typed); +#if ENABLE_DPKG || ENABLE_DPKG_DEB + if (archive_handle->dpkg__sub_archive) { + while (archive_handle->dpkg__action_data_subarchive(archive_handle->dpkg__sub_archive) == EXIT_SUCCESS) + continue; + } else +#endif + archive_handle->action_data(archive_handle); + } else { + data_skip(archive_handle); + } + + archive_handle->offset += typed->size; + /* Set the file pointer to the correct spot, we may have been reading a compressed file */ + lseek(archive_handle->src_fd, archive_handle->offset, SEEK_SET); + + return EXIT_SUCCESS; +} diff --git a/probe-busybox/archival/libarchive/get_header_cpio.c b/probe-busybox/archival/libarchive/get_header_cpio.c new file mode 100644 index 00000000..badd4a84 --- /dev/null +++ b/probe-busybox/archival/libarchive/get_header_cpio.c @@ -0,0 +1,191 @@ +/* vi: set sw=4 ts=4: */ +/* Copyright 2002 Laurence Anderson + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +typedef struct hardlinks_t { + struct hardlinks_t *next; + int inode; /* TODO: must match maj/min too! */ + int mode ; + int mtime; /* These three are useful only in corner case */ + int uid ; /* of hardlinks with zero size body */ + int gid ; + char name[1]; +} hardlinks_t; + +char FAST_FUNC get_header_cpio(archive_handle_t *archive_handle) +{ + file_header_t *file_header = archive_handle->file_header; + char cpio_header[110]; + int namesize; + int major, minor, nlink, mode, inode; + unsigned size, uid, gid, mtime; + + /* There can be padding before archive header */ + data_align(archive_handle, 4); + + size = full_read(archive_handle->src_fd, cpio_header, 110); + if (size == 0) { + goto create_hardlinks; + } + if (size != 110) { + bb_error_msg_and_die("short read"); + } + archive_handle->offset += 110; + + if (!is_prefixed_with(&cpio_header[0], "07070") + || (cpio_header[5] != '1' && cpio_header[5] != '2') + ) { + bb_error_msg_and_die("unsupported cpio format, use newc or crc"); + } + + if (sscanf(cpio_header + 6, + "%8x" "%8x" "%8x" "%8x" + "%8x" "%8x" "%8x" /*maj,min:*/ "%*16c" + /*rmaj,rmin:*/"%8x" "%8x" "%8x" /*chksum: "%*8c"*/, + &inode, &mode, &uid, &gid, + &nlink, &mtime, &size, + &major, &minor, &namesize) != 10) + bb_error_msg_and_die("damaged cpio file"); + file_header->mode = mode; + /* "cpio -R USER:GRP" support: */ + if (archive_handle->cpio__owner.uid != (uid_t)-1L) + uid = archive_handle->cpio__owner.uid; + if (archive_handle->cpio__owner.gid != (gid_t)-1L) + gid = archive_handle->cpio__owner.gid; + file_header->uid = uid; + file_header->gid = gid; + file_header->mtime = mtime; + file_header->size = size; + + namesize &= 0x1fff; /* paranoia: limit names to 8k chars */ + file_header->name = xzalloc(namesize + 1); + /* Read in filename */ + xread(archive_handle->src_fd, file_header->name, namesize); + if (file_header->name[0] == '/') { + /* Testcase: echo /etc/hosts | cpio -pvd /tmp + * Without this code, it tries to unpack /etc/hosts + * into "/etc/hosts", not "etc/hosts". + */ + char *p = file_header->name; + do p++; while (*p == '/'); + overlapping_strcpy(file_header->name, p); + } + archive_handle->offset += namesize; + + /* Update offset amount and skip padding before file contents */ + data_align(archive_handle, 4); + + if (strcmp(file_header->name, cpio_TRAILER) == 0) { + /* Always round up. ">> 9" divides by 512 */ + archive_handle->cpio__blocks = (uoff_t)(archive_handle->offset + 511) >> 9; + goto create_hardlinks; + } + + file_header->link_target = NULL; + if (S_ISLNK(file_header->mode)) { + file_header->size &= 0x1fff; /* paranoia: limit names to 8k chars */ + file_header->link_target = xzalloc(file_header->size + 1); + xread(archive_handle->src_fd, file_header->link_target, file_header->size); + archive_handle->offset += file_header->size; + file_header->size = 0; /* Stop possible seeks in future */ + } + +// TODO: data_extract_all can't deal with hardlinks to non-files... +// when fixed, change S_ISREG to !S_ISDIR here + + if (nlink > 1 && S_ISREG(file_header->mode)) { + hardlinks_t *new = xmalloc(sizeof(*new) + namesize); + new->inode = inode; + new->mode = mode ; + new->mtime = mtime; + new->uid = uid ; + new->gid = gid ; + strcpy(new->name, file_header->name); + /* Put file on a linked list for later */ + if (size == 0) { + new->next = archive_handle->cpio__hardlinks_to_create; + archive_handle->cpio__hardlinks_to_create = new; + return EXIT_SUCCESS; /* Skip this one */ + /* TODO: this breaks cpio -t (it does not show hardlinks) */ + } + new->next = archive_handle->cpio__created_hardlinks; + archive_handle->cpio__created_hardlinks = new; + } + file_header->device = makedev(major, minor); + + if (archive_handle->filter(archive_handle) == EXIT_SUCCESS) { + archive_handle->action_data(archive_handle); +//TODO: run "echo /etc/hosts | cpio -pv /tmp" twice. On 2nd run: +//cpio: etc/hosts not created: newer or same age file exists +//etc/hosts <-- should NOT show it +//2 blocks <-- should say "0 blocks" + archive_handle->action_header(file_header); + } else { + data_skip(archive_handle); + } + + archive_handle->offset += file_header->size; + + free(file_header->link_target); + free(file_header->name); + file_header->link_target = NULL; + file_header->name = NULL; + + return EXIT_SUCCESS; + + create_hardlinks: + free(file_header->link_target); + free(file_header->name); + + while (archive_handle->cpio__hardlinks_to_create) { + hardlinks_t *cur; + hardlinks_t *make_me = archive_handle->cpio__hardlinks_to_create; + + archive_handle->cpio__hardlinks_to_create = make_me->next; + + memset(file_header, 0, sizeof(*file_header)); + file_header->mtime = make_me->mtime; + file_header->name = make_me->name; + file_header->mode = make_me->mode; + file_header->uid = make_me->uid; + file_header->gid = make_me->gid; + /*file_header->size = 0;*/ + /*file_header->link_target = NULL;*/ + + /* Try to find a file we are hardlinked to */ + cur = archive_handle->cpio__created_hardlinks; + while (cur) { + /* TODO: must match maj/min too! */ + if (cur->inode == make_me->inode) { + file_header->link_target = cur->name; + /* link_target != NULL, size = 0: "I am a hardlink" */ + if (archive_handle->filter(archive_handle) == EXIT_SUCCESS) + archive_handle->action_data(archive_handle); + free(make_me); + goto next_link; + } + cur = cur->next; + } + /* Oops... no file with such inode was created... do it now + * (happens when hardlinked files are empty (zero length)) */ + if (archive_handle->filter(archive_handle) == EXIT_SUCCESS) + archive_handle->action_data(archive_handle); + /* Move to the list of created hardlinked files */ + make_me->next = archive_handle->cpio__created_hardlinks; + archive_handle->cpio__created_hardlinks = make_me; + next_link: ; + } + + while (archive_handle->cpio__created_hardlinks) { + hardlinks_t *p = archive_handle->cpio__created_hardlinks; + archive_handle->cpio__created_hardlinks = p->next; + free(p); + } + + return EXIT_FAILURE; /* "No more files to process" */ +} diff --git a/probe-busybox/archival/libarchive/get_header_tar.c b/probe-busybox/archival/libarchive/get_header_tar.c new file mode 100644 index 00000000..c7e3bc16 --- /dev/null +++ b/probe-busybox/archival/libarchive/get_header_tar.c @@ -0,0 +1,481 @@ +/* vi: set sw=4 ts=4: */ +/* Licensed under GPLv2 or later, see file LICENSE in this source tree. + * + * FIXME: + * In privileged mode if uname and gname map to a uid and gid then use the + * mapped value instead of the uid/gid values in tar header + * + * References: + * GNU tar and star man pages, + * Opengroup's ustar interchange format, + * http://www.opengroup.org/onlinepubs/007904975/utilities/pax.html + */ + +#include "libbb.h" +#include "bb_archive.h" + +typedef uint32_t aliased_uint32_t FIX_ALIASING; +typedef off_t aliased_off_t FIX_ALIASING; + +/* NB: _DESTROYS_ str[len] character! */ +static unsigned long long getOctal(char *str, int len) +{ + unsigned long long v; + char *end; + /* NB: leading spaces are allowed. Using strtoull to handle that. + * The downside is that we accept e.g. "-123" too :( + */ + str[len] = '\0'; + v = strtoull(str, &end, 8); + /* std: "Each numeric field is terminated by one or more + * or NUL characters". We must support ' '! */ + if (*end != '\0' && *end != ' ') { + int8_t first = str[0]; + if (!(first & 0x80)) + bb_error_msg_and_die("corrupted octal value in tar header"); + /* + * GNU tar uses "base-256 encoding" for very large numbers. + * Encoding is binary, with highest bit always set as a marker + * and sign in next-highest bit: + * 80 00 .. 00 - zero + * bf ff .. ff - largest positive number + * ff ff .. ff - minus 1 + * c0 00 .. 00 - smallest negative number + * + * Example of tar file with 8914993153 (0x213600001) byte file. + * Field starts at offset 7c: + * 00070 30 30 30 00 30 30 30 30 30 30 30 00 80 00 00 00 |000.0000000.....| + * 00080 00 00 00 02 13 60 00 01 31 31 31 32 30 33 33 36 |.....`..11120336| + * + * NB: tarballs with NEGATIVE unix times encoded that way were seen! + */ + /* Sign-extend 7bit 'first' to 64bit 'v' (that is, using 6th bit as sign): */ + first <<= 1; + first >>= 1; /* now 7th bit = 6th bit */ + v = first; /* sign-extend 8 bits to 64 */ + while (--len != 0) + v = (v << 8) + (uint8_t) *++str; + } + return v; +} +#define GET_OCTAL(a) getOctal((a), sizeof(a)) + +#define TAR_EXTD (ENABLE_FEATURE_TAR_GNU_EXTENSIONS || ENABLE_FEATURE_TAR_SELINUX) +#if !TAR_EXTD +#define process_pax_hdr(archive_handle, sz, global) \ + process_pax_hdr(archive_handle, sz) +#endif +/* "global" is 0 or 1 */ +static void process_pax_hdr(archive_handle_t *archive_handle, unsigned sz, int global) +{ +#if !TAR_EXTD + unsigned blk_sz = (sz + 511) & (~511); + seek_by_read(archive_handle->src_fd, blk_sz); +#else + unsigned blk_sz = (sz + 511) & (~511); + char *buf, *p; + + p = buf = xmalloc(blk_sz + 1); + xread(archive_handle->src_fd, buf, blk_sz); + archive_handle->offset += blk_sz; + + /* prevent bb_strtou from running off the buffer */ + buf[sz] = '\0'; + + while (sz != 0) { + char *end, *value; + unsigned len; + + /* Every record has this format: "LEN NAME=VALUE\n" */ + len = bb_strtou(p, &end, 10); + /* expect errno to be EINVAL, because the character + * following the digits should be a space + */ + p += len; + sz -= len; + if ( + /** (int)sz < 0 - not good enough for huge malicious VALUE of 2^32-1 */ + (int)(sz|len) < 0 /* this works */ + || len == 0 + || errno != EINVAL + || *end != ' ' + ) { + bb_error_msg("malformed extended header, skipped"); + // More verbose version: + //bb_error_msg("malformed extended header at %"OFF_FMT"d, skipped", + // archive_handle->offset - (sz + len)); + break; + } + /* overwrite the terminating newline with NUL + * (we do not bother to check that it *was* a newline) + */ + p[-1] = '\0'; + value = end + 1; + +# if ENABLE_FEATURE_TAR_GNU_EXTENSIONS + if (!global) { + if (is_prefixed_with(value, "path=")) { + value += sizeof("path=") - 1; + free(archive_handle->tar__longname); + archive_handle->tar__longname = xstrdup(value); + continue; + } + if (is_prefixed_with(value, "linkpath=")) { + value += sizeof("linkpath=") - 1; + free(archive_handle->tar__linkname); + archive_handle->tar__linkname = xstrdup(value); + continue; + } + } +# endif + +# if ENABLE_FEATURE_TAR_SELINUX + /* Scan for SELinux contexts, via "RHT.security.selinux" keyword. + * This is what Red Hat's patched version of tar uses. + */ +# define SELINUX_CONTEXT_KEYWORD "RHT.security.selinux" + if (is_prefixed_with(value, SELINUX_CONTEXT_KEYWORD"=")) { + value += sizeof(SELINUX_CONTEXT_KEYWORD"=") - 1; + free(archive_handle->tar__sctx[global]); + archive_handle->tar__sctx[global] = xstrdup(value); + continue; + } +# endif + } + + free(buf); +#endif +} + +char FAST_FUNC get_header_tar(archive_handle_t *archive_handle) +{ + file_header_t *file_header = archive_handle->file_header; + struct tar_header_t tar; + char *cp; + int i, sum_u, sum; +#if ENABLE_FEATURE_TAR_OLDSUN_COMPATIBILITY + int sum_s; +#endif + int parse_names; + + /* Our "private data" */ +#if ENABLE_FEATURE_TAR_GNU_EXTENSIONS +# define p_longname (archive_handle->tar__longname) +# define p_linkname (archive_handle->tar__linkname) +#else +# define p_longname 0 +# define p_linkname 0 +#endif + +#if ENABLE_FEATURE_TAR_GNU_EXTENSIONS || ENABLE_FEATURE_TAR_SELINUX + again: +#endif + /* Align header */ + data_align(archive_handle, 512); + + again_after_align: + +#if ENABLE_DESKTOP || ENABLE_FEATURE_TAR_AUTODETECT + /* to prevent misdetection of bz2 sig */ + *(aliased_uint32_t*)&tar = 0; + i = full_read(archive_handle->src_fd, &tar, 512); + /* If GNU tar sees EOF in above read, it says: + * "tar: A lone zero block at N", where N = kilobyte + * where EOF was met (not EOF block, actual EOF!), + * and exits with EXIT_SUCCESS. + * We will mimic exit(EXIT_SUCCESS), although we will not mimic + * the message and we don't check whether we indeed + * saw zero block directly before this. */ + if (i == 0) { + /* GNU tar 1.29 will be silent if tar archive ends abruptly + * (if there are no zero blocks at all, and last read returns zero, + * not short read 0 < len < 512). Complain only if + * the very first read fails. Grrr. + */ + if (archive_handle->offset == 0) + bb_error_msg("short read"); + /* this merely signals end of archive, not exit(1): */ + return EXIT_FAILURE; + } + if (i != 512) { + IF_FEATURE_TAR_AUTODETECT(goto autodetect;) + bb_error_msg_and_die("short read"); + } + +#else + i = 512; + xread(archive_handle->src_fd, &tar, i); +#endif + archive_handle->offset += i; + + /* If there is no filename its an empty header */ + if (tar.name[0] == 0 && tar.prefix[0] == 0 + /* Have seen a tar archive with pax 'x' header supplying UTF8 filename, + * with actual file having all name fields NUL-filled. Check this: */ + && !p_longname + ) { + if (archive_handle->tar__end) { + /* Second consecutive empty header - end of archive. + * Read until the end to empty the pipe from gz or bz2 + */ + while (full_read(archive_handle->src_fd, &tar, 512) == 512) + continue; + return EXIT_FAILURE; /* "end of archive" */ + } + archive_handle->tar__end = 1; + return EXIT_SUCCESS; /* "decoded one header" */ + } + archive_handle->tar__end = 0; + + /* Check header has valid magic, "ustar" is for the proper tar, + * five NULs are for the old tar format */ + if (!is_prefixed_with(tar.magic, "ustar") + && (!ENABLE_FEATURE_TAR_OLDGNU_COMPATIBILITY + || memcmp(tar.magic, "\0\0\0\0", 5) != 0) + ) { +#if ENABLE_FEATURE_TAR_AUTODETECT + autodetect: + /* Two different causes for lseek() != 0: + * unseekable fd (would like to support that too, but...), + * or not first block (false positive, it's not .gz/.bz2!) */ + if (lseek(archive_handle->src_fd, -i, SEEK_CUR) != 0) + goto err; + if (setup_unzip_on_fd(archive_handle->src_fd, /*fail_if_not_compressed:*/ 0) != 0) + err: + bb_error_msg_and_die("invalid tar magic"); + archive_handle->offset = 0; + goto again_after_align; +#endif + bb_error_msg_and_die("invalid tar magic"); + } + + /* Do checksum on headers. + * POSIX says that checksum is done on unsigned bytes, but + * Sun and HP-UX gets it wrong... more details in + * GNU tar source. */ +#if ENABLE_FEATURE_TAR_OLDSUN_COMPATIBILITY + sum_s = ' ' * sizeof(tar.chksum); +#endif + sum_u = ' ' * sizeof(tar.chksum); + for (i = 0; i < 148; i++) { + sum_u += ((unsigned char*)&tar)[i]; +#if ENABLE_FEATURE_TAR_OLDSUN_COMPATIBILITY + sum_s += ((signed char*)&tar)[i]; +#endif + } + for (i = 156; i < 512; i++) { + sum_u += ((unsigned char*)&tar)[i]; +#if ENABLE_FEATURE_TAR_OLDSUN_COMPATIBILITY + sum_s += ((signed char*)&tar)[i]; +#endif + } + /* This field does not need special treatment (getOctal) */ + { + char *endp; /* gcc likes temp var for &endp */ + sum = strtoul(tar.chksum, &endp, 8); + if ((*endp != '\0' && *endp != ' ') + || (sum_u != sum IF_FEATURE_TAR_OLDSUN_COMPATIBILITY(&& sum_s != sum)) + ) { + bb_error_msg_and_die("invalid tar header checksum"); + } + } + /* don't use xstrtoul, tar.chksum may have leading spaces */ + sum = strtoul(tar.chksum, NULL, 8); + if (sum_u != sum IF_FEATURE_TAR_OLDSUN_COMPATIBILITY(&& sum_s != sum)) { + bb_error_msg_and_die("invalid tar header checksum"); + } + + /* 0 is reserved for high perf file, treat as normal file */ + if (!tar.typeflag) tar.typeflag = '0'; + parse_names = (tar.typeflag >= '0' && tar.typeflag <= '7'); + + /* getOctal trashes subsequent field, therefore we call it + * on fields in reverse order */ + if (tar.devmajor[0]) { + char t = tar.prefix[0]; + /* we trash prefix[0] here, but we DO need it later! */ + unsigned minor = GET_OCTAL(tar.devminor); + unsigned major = GET_OCTAL(tar.devmajor); + file_header->device = makedev(major, minor); + tar.prefix[0] = t; + } + file_header->link_target = NULL; + if (!p_linkname && parse_names && tar.linkname[0]) { + file_header->link_target = xstrndup(tar.linkname, sizeof(tar.linkname)); + /* FIXME: what if we have non-link object with link_target? */ + /* Will link_target be free()ed? */ + } +#if ENABLE_FEATURE_TAR_UNAME_GNAME + file_header->tar__uname = tar.uname[0] ? xstrndup(tar.uname, sizeof(tar.uname)) : NULL; + file_header->tar__gname = tar.gname[0] ? xstrndup(tar.gname, sizeof(tar.gname)) : NULL; +#endif + file_header->mtime = GET_OCTAL(tar.mtime); + file_header->size = GET_OCTAL(tar.size); + file_header->gid = GET_OCTAL(tar.gid); + file_header->uid = GET_OCTAL(tar.uid); + /* Set bits 0-11 of the files mode */ + file_header->mode = 07777 & GET_OCTAL(tar.mode); + + file_header->name = NULL; + if (!p_longname && parse_names) { + /* we trash mode[0] here, it's ok */ + //tar.name[sizeof(tar.name)] = '\0'; - gcc 4.3.0 would complain + tar.mode[0] = '\0'; + if (tar.prefix[0]) { + /* and padding[0] */ + //tar.prefix[sizeof(tar.prefix)] = '\0'; - gcc 4.3.0 would complain + tar.padding[0] = '\0'; + file_header->name = concat_path_file(tar.prefix, tar.name); + } else + file_header->name = xstrdup(tar.name); + } + + /* Set bits 12-15 of the files mode */ + /* (typeflag was not trashed because chksum does not use getOctal) */ + switch (tar.typeflag) { + case '1': /* hardlink */ + /* we mark hardlinks as regular files with zero size and a link name */ + file_header->mode |= S_IFREG; + /* on size of link fields from star(4) + * ... For tar archives written by pre POSIX.1-1988 + * implementations, the size field usually contains the size of + * the file and needs to be ignored as no data may follow this + * header type. For POSIX.1- 1988 compliant archives, the size + * field needs to be 0. For POSIX.1-2001 compliant archives, + * the size field may be non zero, indicating that file data is + * included in the archive. + * i.e; always assume this is zero for safety. + */ + goto size0; + case '7': + /* case 0: */ + case '0': +#if ENABLE_FEATURE_TAR_OLDGNU_COMPATIBILITY + if (last_char_is(file_header->name, '/')) { + goto set_dir; + } +#endif + file_header->mode |= S_IFREG; + break; + case '2': + file_header->mode |= S_IFLNK; + /* have seen tarballs with size field containing + * the size of the link target's name */ + size0: + file_header->size = 0; + break; + case '3': + file_header->mode |= S_IFCHR; + goto size0; /* paranoia */ + case '4': + file_header->mode |= S_IFBLK; + goto size0; + case '5': + IF_FEATURE_TAR_OLDGNU_COMPATIBILITY(set_dir:) + file_header->mode |= S_IFDIR; + goto size0; + case '6': + file_header->mode |= S_IFIFO; + goto size0; + case 'g': /* pax global header */ + case 'x': { /* pax extended header */ + if ((uoff_t)file_header->size > 0xfffff) /* paranoia */ + goto skip_ext_hdr; + process_pax_hdr(archive_handle, file_header->size, (tar.typeflag == 'g')); + goto again_after_align; +#if ENABLE_FEATURE_TAR_GNU_EXTENSIONS +/* See http://www.gnu.org/software/tar/manual/html_node/Extensions.html */ + case 'L': + /* free: paranoia: tar with several consecutive longnames */ + free(p_longname); + /* For paranoia reasons we allocate extra NUL char */ + p_longname = xzalloc(file_header->size + 1); + /* We read ASCIZ string, including NUL */ + xread(archive_handle->src_fd, p_longname, file_header->size); + archive_handle->offset += file_header->size; + /* return get_header_tar(archive_handle); */ + /* gcc 4.1.1 didn't optimize it into jump */ + /* so we will do it ourself, this also saves stack */ + goto again; + case 'K': + free(p_linkname); + p_linkname = xzalloc(file_header->size + 1); + xread(archive_handle->src_fd, p_linkname, file_header->size); + archive_handle->offset += file_header->size; + /* return get_header_tar(archive_handle); */ + goto again; +/* + * case 'S': // Sparse file + * Was seen in the wild. Not supported (yet?). + * See https://www.gnu.org/software/tar/manual/html_section/tar_92.html + * for the format. (An "Old GNU Format" was seen, not PAX formats). + */ +// case 'D': /* GNU dump dir */ +// case 'M': /* Continuation of multi volume archive */ +// case 'N': /* Old GNU for names > 100 characters */ +// case 'V': /* Volume header */ +#endif + } + skip_ext_hdr: + { + off_t sz; + bb_error_msg("warning: skipping header '%c'", tar.typeflag); + sz = (file_header->size + 511) & ~(off_t)511; + archive_handle->offset += sz; + sz >>= 9; /* sz /= 512 but w/o contortions for signed div */ + while (sz--) + xread(archive_handle->src_fd, &tar, 512); + /* return get_header_tar(archive_handle); */ + goto again_after_align; + } + default: + bb_error_msg_and_die("unknown typeflag: 0x%x", tar.typeflag); + } + +#if ENABLE_FEATURE_TAR_GNU_EXTENSIONS + if (p_longname) { + file_header->name = p_longname; + p_longname = NULL; + } + if (p_linkname) { + file_header->link_target = p_linkname; + p_linkname = NULL; + } +#endif + + /* Everything up to and including last ".." component is stripped */ + overlapping_strcpy(file_header->name, strip_unsafe_prefix(file_header->name)); +//TODO: do the same for file_header->link_target? + + /* Strip trailing '/' in directories */ + /* Must be done after mode is set as '/' is used to check if it's a directory */ + cp = last_char_is(file_header->name, '/'); + + if (archive_handle->filter(archive_handle) == EXIT_SUCCESS) { + archive_handle->action_header(/*archive_handle->*/ file_header); + /* Note that we kill the '/' only after action_header() */ + /* (like GNU tar 1.15.1: verbose mode outputs "dir/dir/") */ + if (cp) + *cp = '\0'; + archive_handle->action_data(archive_handle); + if (archive_handle->accept || archive_handle->reject + || (archive_handle->ah_flags & ARCHIVE_REMEMBER_NAMES) + ) { + llist_add_to(&archive_handle->passed, file_header->name); + } else /* Caller isn't interested in list of unpacked files */ + free(file_header->name); + } else { + data_skip(archive_handle); + free(file_header->name); + } + archive_handle->offset += file_header->size; + + free(file_header->link_target); + /* Do not free(file_header->name)! + * It might be inserted in archive_handle->passed - see above */ +#if ENABLE_FEATURE_TAR_UNAME_GNAME + free(file_header->tar__uname); + free(file_header->tar__gname); +#endif + return EXIT_SUCCESS; /* "decoded one header" */ +} diff --git a/probe-busybox/archival/libarchive/get_header_tar_bz2.c b/probe-busybox/archival/libarchive/get_header_tar_bz2.c new file mode 100644 index 00000000..78f78a85 --- /dev/null +++ b/probe-busybox/archival/libarchive/get_header_tar_bz2.c @@ -0,0 +1,21 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +char FAST_FUNC get_header_tar_bz2(archive_handle_t *archive_handle) +{ + /* Can't lseek over pipes */ + archive_handle->seek = seek_by_read; + + fork_transformer_with_sig(archive_handle->src_fd, unpack_bz2_stream, "bunzip2"); + archive_handle->offset = 0; + while (get_header_tar(archive_handle) == EXIT_SUCCESS) + continue; + + /* Can only do one file at a time */ + return EXIT_FAILURE; +} diff --git a/probe-busybox/archival/libarchive/get_header_tar_gz.c b/probe-busybox/archival/libarchive/get_header_tar_gz.c new file mode 100644 index 00000000..b11f503d --- /dev/null +++ b/probe-busybox/archival/libarchive/get_header_tar_gz.c @@ -0,0 +1,21 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +char FAST_FUNC get_header_tar_gz(archive_handle_t *archive_handle) +{ + /* Can't lseek over pipes */ + archive_handle->seek = seek_by_read; + + fork_transformer_with_sig(archive_handle->src_fd, unpack_gz_stream, "gunzip"); + archive_handle->offset = 0; + while (get_header_tar(archive_handle) == EXIT_SUCCESS) + continue; + + /* Can only do one file at a time */ + return EXIT_FAILURE; +} diff --git a/probe-busybox/archival/libarchive/get_header_tar_lzma.c b/probe-busybox/archival/libarchive/get_header_tar_lzma.c new file mode 100644 index 00000000..d228cbc1 --- /dev/null +++ b/probe-busybox/archival/libarchive/get_header_tar_lzma.c @@ -0,0 +1,24 @@ +/* vi: set sw=4 ts=4: */ +/* + * Small lzma deflate implementation. + * Copyright (C) 2006 Aurelien Jacobs + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +char FAST_FUNC get_header_tar_lzma(archive_handle_t *archive_handle) +{ + /* Can't lseek over pipes */ + archive_handle->seek = seek_by_read; + + fork_transformer_with_sig(archive_handle->src_fd, unpack_lzma_stream, "unlzma"); + archive_handle->offset = 0; + while (get_header_tar(archive_handle) == EXIT_SUCCESS) + continue; + + /* Can only do one file at a time */ + return EXIT_FAILURE; +} diff --git a/probe-busybox/archival/libarchive/get_header_tar_xz.c b/probe-busybox/archival/libarchive/get_header_tar_xz.c new file mode 100644 index 00000000..7bf3b3b5 --- /dev/null +++ b/probe-busybox/archival/libarchive/get_header_tar_xz.c @@ -0,0 +1,21 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +char FAST_FUNC get_header_tar_xz(archive_handle_t *archive_handle) +{ + /* Can't lseek over pipes */ + archive_handle->seek = seek_by_read; + + fork_transformer_with_sig(archive_handle->src_fd, unpack_xz_stream, "unxz"); + archive_handle->offset = 0; + while (get_header_tar(archive_handle) == EXIT_SUCCESS) + continue; + + /* Can only do one file at a time */ + return EXIT_FAILURE; +} diff --git a/probe-busybox/archival/libarchive/header_list.c b/probe-busybox/archival/libarchive/header_list.c new file mode 100644 index 00000000..0621aa40 --- /dev/null +++ b/probe-busybox/archival/libarchive/header_list.c @@ -0,0 +1,12 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#include "libbb.h" +#include "bb_archive.h" + +void FAST_FUNC header_list(const file_header_t *file_header) +{ +//TODO: cpio -vp DIR should output "DIR/NAME", not just "NAME" */ + puts(file_header->name); +} diff --git a/probe-busybox/archival/libarchive/header_skip.c b/probe-busybox/archival/libarchive/header_skip.c new file mode 100644 index 00000000..f5987bfe --- /dev/null +++ b/probe-busybox/archival/libarchive/header_skip.c @@ -0,0 +1,10 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#include "libbb.h" +#include "bb_archive.h" + +void FAST_FUNC header_skip(const file_header_t *file_header UNUSED_PARAM) +{ +} diff --git a/probe-busybox/archival/libarchive/header_verbose_list.c b/probe-busybox/archival/libarchive/header_verbose_list.c new file mode 100644 index 00000000..87dd8213 --- /dev/null +++ b/probe-busybox/archival/libarchive/header_verbose_list.c @@ -0,0 +1,69 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +void FAST_FUNC header_verbose_list(const file_header_t *file_header) +{ + struct tm tm_time; + struct tm *ptm = &tm_time; //localtime(&file_header->mtime); + +#if ENABLE_FEATURE_TAR_UNAME_GNAME + char uid[sizeof(int)*3 + 2]; + /*char gid[sizeof(int)*3 + 2];*/ + char *user; + char *group; + + localtime_r(&file_header->mtime, ptm); + + user = file_header->tar__uname; + if (user == NULL) { + sprintf(uid, "%u", (unsigned)file_header->uid); + user = uid; + } + group = file_header->tar__gname; + if (group == NULL) { + /*sprintf(gid, "%u", (unsigned)file_header->gid);*/ + group = utoa(file_header->gid); + } + printf("%s %s/%s %9"OFF_FMT"u %4u-%02u-%02u %02u:%02u:%02u %s", + bb_mode_string(file_header->mode), + user, + group, + file_header->size, + 1900 + ptm->tm_year, + 1 + ptm->tm_mon, + ptm->tm_mday, + ptm->tm_hour, + ptm->tm_min, + ptm->tm_sec, + file_header->name); + +#else /* !FEATURE_TAR_UNAME_GNAME */ + + localtime_r(&file_header->mtime, ptm); + + printf("%s %u/%u %9"OFF_FMT"u %4u-%02u-%02u %02u:%02u:%02u %s", + bb_mode_string(file_header->mode), + (unsigned)file_header->uid, + (unsigned)file_header->gid, + file_header->size, + 1900 + ptm->tm_year, + 1 + ptm->tm_mon, + ptm->tm_mday, + ptm->tm_hour, + ptm->tm_min, + ptm->tm_sec, + file_header->name); + +#endif /* FEATURE_TAR_UNAME_GNAME */ + + /* NB: GNU tar shows "->" for symlinks and "link to" for hardlinks */ + if (file_header->link_target) { + printf(" -> %s", file_header->link_target); + } + bb_putchar('\n'); +} diff --git a/probe-busybox/archival/libarchive/init_handle.c b/probe-busybox/archival/libarchive/init_handle.c new file mode 100644 index 00000000..dcba6666 --- /dev/null +++ b/probe-busybox/archival/libarchive/init_handle.c @@ -0,0 +1,26 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +archive_handle_t* FAST_FUNC init_handle(void) +{ + archive_handle_t *archive_handle; + + /* Initialize default values */ + archive_handle = xzalloc(sizeof(archive_handle_t)); + archive_handle->file_header = xzalloc(sizeof(file_header_t)); + archive_handle->action_header = header_skip; + archive_handle->action_data = data_skip; + archive_handle->filter = filter_accept_all; + archive_handle->seek = seek_by_jump; +#if ENABLE_CPIO || ENABLE_RPM2CPIO || ENABLE_RPM + archive_handle->cpio__owner.uid = (uid_t)-1L; + archive_handle->cpio__owner.gid = (gid_t)-1L; +#endif + + return archive_handle; +} diff --git a/probe-busybox/archival/libarchive/liblzo.h b/probe-busybox/archival/libarchive/liblzo.h new file mode 100644 index 00000000..4596620f --- /dev/null +++ b/probe-busybox/archival/libarchive/liblzo.h @@ -0,0 +1,95 @@ +/* + This file is part of the LZO real-time data compression library. + + Copyright (C) 1996..2008 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + Markus F.X.J. Oberhumer + http://www.oberhumer.com/opensource/lzo/ + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "liblzo_interface.h" + +/* lzo-2.03/src/config1x.h */ +#define M2_MIN_LEN 3 +#define M2_MAX_LEN 8 +#define M3_MAX_LEN 33 +#define M4_MAX_LEN 9 +#define M1_MAX_OFFSET 0x0400 +#define M2_MAX_OFFSET 0x0800 +#define M3_MAX_OFFSET 0x4000 +#define M4_MAX_OFFSET 0xbfff +#define M1_MARKER 0 +#define M3_MARKER 32 +#define M4_MARKER 16 + +#define MX_MAX_OFFSET (M1_MAX_OFFSET + M2_MAX_OFFSET) +#define MIN_LOOKAHEAD (M2_MAX_LEN + 1) + +#define LZO_EOF_CODE + +/* lzo-2.03/src/lzo_dict.h */ +#define GINDEX(m_pos,m_off,dict,dindex,in) m_pos = dict[dindex] +#define DX2(p,s1,s2) \ + (((((unsigned)((p)[2]) << (s2)) ^ (p)[1]) << (s1)) ^ (p)[0]) +//#define DA3(p,s1,s2,s3) ((DA2((p)+1,s2,s3) << (s1)) + (p)[0]) +//#define DS3(p,s1,s2,s3) ((DS2((p)+1,s2,s3) << (s1)) - (p)[0]) +#define DX3(p,s1,s2,s3) ((DX2((p)+1,s2,s3) << (s1)) ^ (p)[0]) + +#define D_SIZE (1U << D_BITS) +#define D_MASK ((1U << D_BITS) - 1) +#define D_HIGH ((D_MASK >> 1) + 1) + +#define LZO_CHECK_MPOS_NON_DET(m_pos,m_off,in,ip,max_offset) \ + ( \ + m_pos = ip - (unsigned)(ip - m_pos), \ + ((uintptr_t)m_pos < (uintptr_t)in \ + || (m_off = (unsigned)(ip - m_pos)) <= 0 \ + || m_off > max_offset) \ + ) + +#define DENTRY(p,in) (p) +#define UPDATE_I(dict,drun,index,p,in) dict[index] = DENTRY(p,in) + +#define DMS(v,s) ((unsigned) (((v) & (D_MASK >> (s))) << (s))) +#define DM(v) ((unsigned) ((v) & D_MASK)) +#define DMUL(a,b) ((unsigned) ((a) * (b))) + +/* lzo-2.03/src/lzo_ptr.h */ +#define pd(a,b) ((unsigned)((a)-(b))) + +# define TEST_IP (ip < ip_end) +# define NEED_IP(x) \ + if ((unsigned)(ip_end - ip) < (unsigned)(x)) goto input_overrun +# define TEST_IV(x) if ((x) > (unsigned)0 - (511)) goto input_overrun + +# undef TEST_OP /* don't need both of the tests here */ +# define TEST_OP 1 +# define NEED_OP(x) \ + if ((unsigned)(op_end - op) < (unsigned)(x)) goto output_overrun +# define TEST_OV(x) if ((x) > (unsigned)0 - (511)) goto output_overrun + +#define HAVE_ANY_OP 1 + +//#if defined(LZO_TEST_OVERRUN_LOOKBEHIND) +# define TEST_LB(m_pos) if (m_pos < out || m_pos >= op) goto lookbehind_overrun +//# define TEST_LBO(m_pos,o) if (m_pos < out || m_pos >= op - (o)) goto lookbehind_overrun +//#else +//# define TEST_LB(m_pos) ((void) 0) +//# define TEST_LBO(m_pos,o) ((void) 0) +//#endif diff --git a/probe-busybox/archival/libarchive/lzo1x_1.c b/probe-busybox/archival/libarchive/lzo1x_1.c new file mode 100644 index 00000000..a8883984 --- /dev/null +++ b/probe-busybox/archival/libarchive/lzo1x_1.c @@ -0,0 +1,35 @@ +/* LZO1X-1 compression + + This file is part of the LZO real-time data compression library. + + Copyright (C) 1996..2008 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + Markus F.X.J. Oberhumer + http://www.oberhumer.com/opensource/lzo/ + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include "libbb.h" +#include "liblzo.h" + +#define D_BITS 14 +#define D_INDEX1(d,p) d = DM(DMUL(0x21,DX3(p,5,5,6)) >> 5) +#define D_INDEX2(d,p) d = (d & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f) + +#define DO_COMPRESS lzo1x_1_compress + +#include "lzo1x_c.c" diff --git a/probe-busybox/archival/libarchive/lzo1x_1o.c b/probe-busybox/archival/libarchive/lzo1x_1o.c new file mode 100644 index 00000000..3c61253e --- /dev/null +++ b/probe-busybox/archival/libarchive/lzo1x_1o.c @@ -0,0 +1,35 @@ +/* LZO1X-1(15) compression + + This file is part of the LZO real-time data compression library. + + Copyright (C) 1996..2008 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + Markus F.X.J. Oberhumer + http://www.oberhumer.com/opensource/lzo/ + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include "libbb.h" +#include "liblzo.h" + +#define D_BITS 15 +#define D_INDEX1(d,p) d = DM(DMUL(0x21,DX3(p,5,5,6)) >> 5) +#define D_INDEX2(d,p) d = (d & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f) + +#define DO_COMPRESS lzo1x_1_15_compress + +#include "lzo1x_c.c" diff --git a/probe-busybox/archival/libarchive/lzo1x_9x.c b/probe-busybox/archival/libarchive/lzo1x_9x.c new file mode 100644 index 00000000..09ee4ba5 --- /dev/null +++ b/probe-busybox/archival/libarchive/lzo1x_9x.c @@ -0,0 +1,919 @@ +/* lzo1x_9x.c -- implementation of the LZO1X-999 compression algorithm + + This file is part of the LZO real-time data compression library. + + Copyright (C) 2008 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2007 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2006 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2005 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2004 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2003 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + Markus F.X.J. Oberhumer + + http://www.oberhumer.com/opensource/lzo/ +*/ +#include "libbb.h" + +/* The following is probably only safe on Intel-compatible processors ... */ +#define LZO_UNALIGNED_OK_2 +#define LZO_UNALIGNED_OK_4 + +#include "liblzo.h" + +#define LZO_MAX(a,b) ((a) >= (b) ? (a) : (b)) +#define LZO_MIN(a,b) ((a) <= (b) ? (a) : (b)) +#define LZO_MAX3(a,b,c) ((a) >= (b) ? LZO_MAX(a,c) : LZO_MAX(b,c)) + +/*********************************************************************** +// +************************************************************************/ +#define SWD_N M4_MAX_OFFSET /* size of ring buffer */ +#define SWD_F 2048 /* upper limit for match length */ + +#define SWD_BEST_OFF (LZO_MAX3(M2_MAX_LEN, M3_MAX_LEN, M4_MAX_LEN) + 1) + +typedef struct { + int init; + + unsigned look; /* bytes in lookahead buffer */ + + unsigned m_len; + unsigned m_off; + + const uint8_t *bp; + const uint8_t *ip; + const uint8_t *in; + const uint8_t *in_end; + uint8_t *out; + + unsigned r1_lit; +} lzo1x_999_t; + +#define getbyte(c) ((c).ip < (c).in_end ? *((c).ip)++ : (-1)) + +/* lzo_swd.c -- sliding window dictionary */ + +/*********************************************************************** +// +************************************************************************/ +#define SWD_UINT_MAX USHRT_MAX + +#ifndef SWD_HSIZE +# define SWD_HSIZE 16384 +#endif +#ifndef SWD_MAX_CHAIN +# define SWD_MAX_CHAIN 2048 +#endif + +#define HEAD3(b, p) \ + ( ((0x9f5f * ((((b[p]<<5)^b[p+1])<<5) ^ b[p+2])) >> 5) & (SWD_HSIZE-1) ) + +#if defined(LZO_UNALIGNED_OK_2) +# define HEAD2(b,p) (* (bb__aliased_uint16_t *) &(b[p])) +#else +# define HEAD2(b,p) (b[p] ^ ((unsigned)b[p+1]<<8)) +#endif +#define NIL2 SWD_UINT_MAX + +typedef struct lzo_swd { + /* public - "built-in" */ + + /* public - configuration */ + unsigned max_chain; + int use_best_off; + + /* public - output */ + unsigned m_len; + unsigned m_off; + unsigned look; + int b_char; +#if defined(SWD_BEST_OFF) + unsigned best_off[SWD_BEST_OFF]; +#endif + + /* semi public */ + lzo1x_999_t *c; + unsigned m_pos; +#if defined(SWD_BEST_OFF) + unsigned best_pos[SWD_BEST_OFF]; +#endif + + /* private */ + unsigned ip; /* input pointer (lookahead) */ + unsigned bp; /* buffer pointer */ + unsigned rp; /* remove pointer */ + + unsigned node_count; + unsigned first_rp; + + uint8_t b[SWD_N + SWD_F]; + uint8_t b_wrap[SWD_F]; /* must follow b */ + uint16_t head3[SWD_HSIZE]; + uint16_t succ3[SWD_N + SWD_F]; + uint16_t best3[SWD_N + SWD_F]; + uint16_t llen3[SWD_HSIZE]; +#ifdef HEAD2 + uint16_t head2[65536L]; +#endif +} lzo_swd_t, *lzo_swd_p; + +#define SIZEOF_LZO_SWD_T (sizeof(lzo_swd_t)) + + +/* Access macro for head3. + * head3[key] may be uninitialized, but then its value will never be used. + */ +#define s_get_head3(s,key) s->head3[key] + + +/*********************************************************************** +// +************************************************************************/ +#define B_SIZE (SWD_N + SWD_F) + +static int swd_init(lzo_swd_p s) +{ + /* defaults */ + s->node_count = SWD_N; + + memset(s->llen3, 0, sizeof(s->llen3[0]) * (unsigned)SWD_HSIZE); +#ifdef HEAD2 + memset(s->head2, 0xff, sizeof(s->head2[0]) * 65536L); + assert(s->head2[0] == NIL2); +#endif + + s->ip = 0; + s->bp = s->ip; + s->first_rp = s->ip; + + assert(s->ip + SWD_F <= B_SIZE); + s->look = (unsigned) (s->c->in_end - s->c->ip); + if (s->look > 0) { + if (s->look > SWD_F) + s->look = SWD_F; + memcpy(&s->b[s->ip], s->c->ip, s->look); + s->c->ip += s->look; + s->ip += s->look; + } + if (s->ip == B_SIZE) + s->ip = 0; + + s->rp = s->first_rp; + if (s->rp >= s->node_count) + s->rp -= s->node_count; + else + s->rp += B_SIZE - s->node_count; + + return LZO_E_OK; +} + +#define swd_pos2off(s,pos) \ + (s->bp > (pos) ? s->bp - (pos) : B_SIZE - ((pos) - s->bp)) + + +/*********************************************************************** +// +************************************************************************/ +static void swd_getbyte(lzo_swd_p s) +{ + int c; + + if ((c = getbyte(*(s->c))) < 0) { + if (s->look > 0) + --s->look; + } else { + s->b[s->ip] = c; + if (s->ip < SWD_F) + s->b_wrap[s->ip] = c; + } + if (++s->ip == B_SIZE) + s->ip = 0; + if (++s->bp == B_SIZE) + s->bp = 0; + if (++s->rp == B_SIZE) + s->rp = 0; +} + + +/*********************************************************************** +// remove node from lists +************************************************************************/ +static void swd_remove_node(lzo_swd_p s, unsigned node) +{ + if (s->node_count == 0) { + unsigned key; + + key = HEAD3(s->b,node); + assert(s->llen3[key] > 0); + --s->llen3[key]; + +#ifdef HEAD2 + key = HEAD2(s->b,node); + assert(s->head2[key] != NIL2); + if ((unsigned) s->head2[key] == node) + s->head2[key] = NIL2; +#endif + } else + --s->node_count; +} + + +/*********************************************************************** +// +************************************************************************/ +static void swd_accept(lzo_swd_p s, unsigned n) +{ + assert(n <= s->look); + + while (n--) { + unsigned key; + + swd_remove_node(s,s->rp); + + /* add bp into HEAD3 */ + key = HEAD3(s->b, s->bp); + s->succ3[s->bp] = s_get_head3(s, key); + s->head3[key] = s->bp; + s->best3[s->bp] = SWD_F + 1; + s->llen3[key]++; + assert(s->llen3[key] <= SWD_N); + +#ifdef HEAD2 + /* add bp into HEAD2 */ + key = HEAD2(s->b, s->bp); + s->head2[key] = s->bp; +#endif + + swd_getbyte(s); + } +} + + +/*********************************************************************** +// +************************************************************************/ +static void swd_search(lzo_swd_p s, unsigned node, unsigned cnt) +{ + const uint8_t *p1; + const uint8_t *p2; + const uint8_t *px; + unsigned m_len = s->m_len; + const uint8_t *b = s->b; + const uint8_t *bp = s->b + s->bp; + const uint8_t *bx = s->b + s->bp + s->look; + unsigned char scan_end1; + + assert(s->m_len > 0); + + scan_end1 = bp[m_len - 1]; + for ( ; cnt-- > 0; node = s->succ3[node]) { + p1 = bp; + p2 = b + node; + px = bx; + + assert(m_len < s->look); + + if (p2[m_len - 1] == scan_end1 + && p2[m_len] == p1[m_len] + && p2[0] == p1[0] + && p2[1] == p1[1] + ) { + unsigned i; + assert(lzo_memcmp(bp, &b[node], 3) == 0); + + p1 += 2; p2 += 2; + do {} while (++p1 < px && *p1 == *++p2); + i = p1-bp; + + assert(lzo_memcmp(bp, &b[node], i) == 0); + +#if defined(SWD_BEST_OFF) + if (i < SWD_BEST_OFF) { + if (s->best_pos[i] == 0) + s->best_pos[i] = node + 1; + } +#endif + if (i > m_len) { + s->m_len = m_len = i; + s->m_pos = node; + if (m_len == s->look) + return; + if (m_len >= SWD_F) + return; + if (m_len > (unsigned) s->best3[node]) + return; + scan_end1 = bp[m_len - 1]; + } + } + } +} + + +/*********************************************************************** +// +************************************************************************/ +#ifdef HEAD2 + +static int swd_search2(lzo_swd_p s) +{ + unsigned key; + + assert(s->look >= 2); + assert(s->m_len > 0); + + key = s->head2[HEAD2(s->b, s->bp)]; + if (key == NIL2) + return 0; + assert(lzo_memcmp(&s->b[s->bp], &s->b[key], 2) == 0); +#if defined(SWD_BEST_OFF) + if (s->best_pos[2] == 0) + s->best_pos[2] = key + 1; +#endif + + if (s->m_len < 2) { + s->m_len = 2; + s->m_pos = key; + } + return 1; +} + +#endif + + +/*********************************************************************** +// +************************************************************************/ +static void swd_findbest(lzo_swd_p s) +{ + unsigned key; + unsigned cnt, node; + unsigned len; + + assert(s->m_len > 0); + + /* get current head, add bp into HEAD3 */ + key = HEAD3(s->b,s->bp); + node = s->succ3[s->bp] = s_get_head3(s, key); + cnt = s->llen3[key]++; + assert(s->llen3[key] <= SWD_N + SWD_F); + if (cnt > s->max_chain) + cnt = s->max_chain; + s->head3[key] = s->bp; + + s->b_char = s->b[s->bp]; + len = s->m_len; + if (s->m_len >= s->look) { + if (s->look == 0) + s->b_char = -1; + s->m_off = 0; + s->best3[s->bp] = SWD_F + 1; + } else { +#ifdef HEAD2 + if (swd_search2(s)) +#endif + if (s->look >= 3) + swd_search(s, node, cnt); + if (s->m_len > len) + s->m_off = swd_pos2off(s,s->m_pos); + s->best3[s->bp] = s->m_len; + +#if defined(SWD_BEST_OFF) + if (s->use_best_off) { + int i; + for (i = 2; i < SWD_BEST_OFF; i++) { + if (s->best_pos[i] > 0) + s->best_off[i] = swd_pos2off(s, s->best_pos[i]-1); + else + s->best_off[i] = 0; + } + } +#endif + } + + swd_remove_node(s,s->rp); + +#ifdef HEAD2 + /* add bp into HEAD2 */ + key = HEAD2(s->b, s->bp); + s->head2[key] = s->bp; +#endif +} + +#undef HEAD3 +#undef HEAD2 +#undef s_get_head3 + + +/*********************************************************************** +// +************************************************************************/ +static int init_match(lzo1x_999_t *c, lzo_swd_p s, uint32_t use_best_off) +{ + int r; + + assert(!c->init); + c->init = 1; + + s->c = c; + + r = swd_init(s); + if (r != 0) + return r; + + s->use_best_off = use_best_off; + return r; +} + + +/*********************************************************************** +// +************************************************************************/ +static int find_match(lzo1x_999_t *c, lzo_swd_p s, + unsigned this_len, unsigned skip) +{ + assert(c->init); + + if (skip > 0) { + assert(this_len >= skip); + swd_accept(s, this_len - skip); + } else { + assert(this_len <= 1); + } + + s->m_len = 1; +#ifdef SWD_BEST_OFF + if (s->use_best_off) + memset(s->best_pos, 0, sizeof(s->best_pos)); +#endif + swd_findbest(s); + c->m_len = s->m_len; + c->m_off = s->m_off; + + swd_getbyte(s); + + if (s->b_char < 0) { + c->look = 0; + c->m_len = 0; + } else { + c->look = s->look + 1; + } + c->bp = c->ip - c->look; + + return LZO_E_OK; +} + +/* this is a public functions, but there is no prototype in a header file */ +static int lzo1x_999_compress_internal(const uint8_t *in , unsigned in_len, + uint8_t *out, unsigned *out_len, + void *wrkmem, + unsigned good_length, + unsigned max_lazy, + unsigned max_chain, + uint32_t use_best_off); + + +/*********************************************************************** +// +************************************************************************/ +static uint8_t *code_match(lzo1x_999_t *c, + uint8_t *op, unsigned m_len, unsigned m_off) +{ + assert(op > c->out); + if (m_len == 2) { + assert(m_off <= M1_MAX_OFFSET); + assert(c->r1_lit > 0); + assert(c->r1_lit < 4); + m_off -= 1; + *op++ = M1_MARKER | ((m_off & 3) << 2); + *op++ = m_off >> 2; + } else if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { + assert(m_len >= 3); + m_off -= 1; + *op++ = ((m_len - 1) << 5) | ((m_off & 7) << 2); + *op++ = m_off >> 3; + assert(op[-2] >= M2_MARKER); + } else if (m_len == M2_MIN_LEN && m_off <= MX_MAX_OFFSET && c->r1_lit >= 4) { + assert(m_len == 3); + assert(m_off > M2_MAX_OFFSET); + m_off -= 1 + M2_MAX_OFFSET; + *op++ = M1_MARKER | ((m_off & 3) << 2); + *op++ = m_off >> 2; + } else if (m_off <= M3_MAX_OFFSET) { + assert(m_len >= 3); + m_off -= 1; + if (m_len <= M3_MAX_LEN) + *op++ = M3_MARKER | (m_len - 2); + else { + m_len -= M3_MAX_LEN; + *op++ = M3_MARKER | 0; + while (m_len > 255) { + m_len -= 255; + *op++ = 0; + } + assert(m_len > 0); + *op++ = m_len; + } + *op++ = m_off << 2; + *op++ = m_off >> 6; + } else { + unsigned k; + + assert(m_len >= 3); + assert(m_off > 0x4000); + assert(m_off <= 0xbfff); + m_off -= 0x4000; + k = (m_off & 0x4000) >> 11; + if (m_len <= M4_MAX_LEN) + *op++ = M4_MARKER | k | (m_len - 2); + else { + m_len -= M4_MAX_LEN; + *op++ = M4_MARKER | k | 0; + while (m_len > 255) { + m_len -= 255; + *op++ = 0; + } + assert(m_len > 0); + *op++ = m_len; + } + *op++ = m_off << 2; + *op++ = m_off >> 6; + } + + return op; +} + + +static uint8_t *STORE_RUN(lzo1x_999_t *c, uint8_t *op, + const uint8_t *ii, unsigned t) +{ + if (op == c->out && t <= 238) { + *op++ = 17 + t; + } else if (t <= 3) { + op[-2] |= t; + } else if (t <= 18) { + *op++ = t - 3; + } else { + unsigned tt = t - 18; + + *op++ = 0; + while (tt > 255) { + tt -= 255; + *op++ = 0; + } + assert(tt > 0); + *op++ = tt; + } + do *op++ = *ii++; while (--t > 0); + + return op; +} + + +static uint8_t *code_run(lzo1x_999_t *c, uint8_t *op, const uint8_t *ii, + unsigned lit) +{ + if (lit > 0) { + assert(m_len >= 2); + op = STORE_RUN(c, op, ii, lit); + } else { + assert(m_len >= 3); + } + c->r1_lit = lit; + + return op; +} + + +/*********************************************************************** +// +************************************************************************/ +static int len_of_coded_match(unsigned m_len, unsigned m_off, unsigned lit) +{ + int n = 4; + + if (m_len < 2) + return -1; + if (m_len == 2) + return (m_off <= M1_MAX_OFFSET && lit > 0 && lit < 4) ? 2 : -1; + if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) + return 2; + if (m_len == M2_MIN_LEN && m_off <= MX_MAX_OFFSET && lit >= 4) + return 2; + if (m_off <= M3_MAX_OFFSET) { + if (m_len <= M3_MAX_LEN) + return 3; + m_len -= M3_MAX_LEN; + } else if (m_off <= M4_MAX_OFFSET) { + if (m_len <= M4_MAX_LEN) + return 3; + m_len -= M4_MAX_LEN; + } else + return -1; + while (m_len > 255) { + m_len -= 255; + n++; + } + return n; +} + + +static int min_gain(unsigned ahead, unsigned lit1, + unsigned lit2, int l1, int l2, int l3) +{ + int lazy_match_min_gain = 0; + + assert (ahead >= 1); + lazy_match_min_gain += ahead; + + if (lit1 <= 3) + lazy_match_min_gain += (lit2 <= 3) ? 0 : 2; + else if (lit1 <= 18) + lazy_match_min_gain += (lit2 <= 18) ? 0 : 1; + + lazy_match_min_gain += (l2 - l1) * 2; + if (l3 > 0) + lazy_match_min_gain -= (ahead - l3) * 2; + + if (lazy_match_min_gain < 0) + lazy_match_min_gain = 0; + + return lazy_match_min_gain; +} + + +/*********************************************************************** +// +************************************************************************/ +#if defined(SWD_BEST_OFF) + +static void better_match(const lzo_swd_p swd, + unsigned *m_len, unsigned *m_off) +{ + if (*m_len <= M2_MIN_LEN) + return; + + if (*m_off <= M2_MAX_OFFSET) + return; + + /* M3/M4 -> M2 */ + if (*m_off > M2_MAX_OFFSET + && *m_len >= M2_MIN_LEN + 1 && *m_len <= M2_MAX_LEN + 1 + && swd->best_off[*m_len-1] && swd->best_off[*m_len-1] <= M2_MAX_OFFSET + ) { + *m_len = *m_len - 1; + *m_off = swd->best_off[*m_len]; + return; + } + + /* M4 -> M2 */ + if (*m_off > M3_MAX_OFFSET + && *m_len >= M4_MAX_LEN + 1 && *m_len <= M2_MAX_LEN + 2 + && swd->best_off[*m_len-2] && swd->best_off[*m_len-2] <= M2_MAX_OFFSET + ) { + *m_len = *m_len - 2; + *m_off = swd->best_off[*m_len]; + return; + } + /* M4 -> M3 */ + if (*m_off > M3_MAX_OFFSET + && *m_len >= M4_MAX_LEN + 1 && *m_len <= M3_MAX_LEN + 1 + && swd->best_off[*m_len-1] && swd->best_off[*m_len-1] <= M3_MAX_OFFSET + ) { + *m_len = *m_len - 1; + *m_off = swd->best_off[*m_len]; + } +} + +#endif + + +/*********************************************************************** +// +************************************************************************/ +static int lzo1x_999_compress_internal(const uint8_t *in, unsigned in_len, + uint8_t *out, unsigned *out_len, + void *wrkmem, + unsigned good_length, + unsigned max_lazy, + unsigned max_chain, + uint32_t use_best_off) +{ + uint8_t *op; + const uint8_t *ii; + unsigned lit; + unsigned m_len, m_off; + lzo1x_999_t cc; + lzo1x_999_t *const c = &cc; + const lzo_swd_p swd = (lzo_swd_p) wrkmem; + int r; + + c->init = 0; + c->ip = c->in = in; + c->in_end = in + in_len; + c->out = out; + + op = out; + ii = c->ip; /* point to start of literal run */ + lit = 0; + c->r1_lit = 0; + + r = init_match(c, swd, use_best_off); + if (r != 0) + return r; + swd->max_chain = max_chain; + + r = find_match(c, swd, 0, 0); + if (r != 0) + return r; + + while (c->look > 0) { + unsigned ahead; + unsigned max_ahead; + int l1, l2, l3; + + m_len = c->m_len; + m_off = c->m_off; + + assert(c->bp == c->ip - c->look); + assert(c->bp >= in); + if (lit == 0) + ii = c->bp; + assert(ii + lit == c->bp); + assert(swd->b_char == *(c->bp)); + + if (m_len < 2 + || (m_len == 2 && (m_off > M1_MAX_OFFSET || lit == 0 || lit >= 4)) + /* Do not accept this match for compressed-data compatibility + * with LZO v1.01 and before + * [ might be a problem for decompress() and optimize() ] + */ + || (m_len == 2 && op == out) + || (op == out && lit == 0) + ) { + /* a literal */ + m_len = 0; + } + else if (m_len == M2_MIN_LEN) { + /* compression ratio improves if we code a literal in some cases */ + if (m_off > MX_MAX_OFFSET && lit >= 4) + m_len = 0; + } + + if (m_len == 0) { + /* a literal */ + lit++; + swd->max_chain = max_chain; + r = find_match(c, swd, 1, 0); + assert(r == 0); + continue; + } + + /* a match */ +#if defined(SWD_BEST_OFF) + if (swd->use_best_off) + better_match(swd, &m_len, &m_off); +#endif + + /* shall we try a lazy match ? */ + ahead = 0; + if (m_len >= max_lazy) { + /* no */ + l1 = 0; + max_ahead = 0; + } else { + /* yes, try a lazy match */ + l1 = len_of_coded_match(m_len, m_off, lit); + assert(l1 > 0); + max_ahead = LZO_MIN(2, (unsigned)l1 - 1); + } + + + while (ahead < max_ahead && c->look > m_len) { + int lazy_match_min_gain; + + if (m_len >= good_length) + swd->max_chain = max_chain >> 2; + else + swd->max_chain = max_chain; + r = find_match(c, swd, 1, 0); + ahead++; + + assert(r == 0); + assert(c->look > 0); + assert(ii + lit + ahead == c->bp); + + if (c->m_len < m_len) + continue; + if (c->m_len == m_len && c->m_off >= m_off) + continue; +#if defined(SWD_BEST_OFF) + if (swd->use_best_off) + better_match(swd, &c->m_len, &c->m_off); +#endif + l2 = len_of_coded_match(c->m_len, c->m_off, lit+ahead); + if (l2 < 0) + continue; + + /* compressed-data compatibility [see above] */ + l3 = (op == out) ? -1 : len_of_coded_match(ahead, m_off, lit); + + lazy_match_min_gain = min_gain(ahead, lit, lit+ahead, l1, l2, l3); + if (c->m_len >= m_len + lazy_match_min_gain) { + if (l3 > 0) { + /* code previous run */ + op = code_run(c, op, ii, lit); + lit = 0; + /* code shortened match */ + op = code_match(c, op, ahead, m_off); + } else { + lit += ahead; + assert(ii + lit == c->bp); + } + goto lazy_match_done; + } + } + + assert(ii + lit + ahead == c->bp); + + /* 1 - code run */ + op = code_run(c, op, ii, lit); + lit = 0; + + /* 2 - code match */ + op = code_match(c, op, m_len, m_off); + swd->max_chain = max_chain; + r = find_match(c, swd, m_len, 1+ahead); + assert(r == 0); + + lazy_match_done: ; + } + + /* store final run */ + if (lit > 0) + op = STORE_RUN(c, op, ii, lit); + +#if defined(LZO_EOF_CODE) + *op++ = M4_MARKER | 1; + *op++ = 0; + *op++ = 0; +#endif + + *out_len = op - out; + + return LZO_E_OK; +} + + +/*********************************************************************** +// +************************************************************************/ +int lzo1x_999_compress_level(const uint8_t *in, unsigned in_len, + uint8_t *out, unsigned *out_len, + void *wrkmem, + int compression_level) +{ + static const struct { + uint16_t good_length; + uint16_t max_lazy; + uint16_t max_chain; + uint16_t use_best_off; + } c[3] = { + { 8, 32, 256, 0 }, + { 32, 128, 2048, 1 }, + { SWD_F, SWD_F, 4096, 1 } /* max. compression */ + }; + + if (compression_level < 7 || compression_level > 9) + return LZO_E_ERROR; + + compression_level -= 7; + return lzo1x_999_compress_internal(in, in_len, out, out_len, wrkmem, + c[compression_level].good_length, + c[compression_level].max_lazy, + c[compression_level].max_chain, + c[compression_level].use_best_off); +} diff --git a/probe-busybox/archival/libarchive/lzo1x_c.c b/probe-busybox/archival/libarchive/lzo1x_c.c new file mode 100644 index 00000000..8c77072a --- /dev/null +++ b/probe-busybox/archival/libarchive/lzo1x_c.c @@ -0,0 +1,296 @@ +/* implementation of the LZO1[XY]-1 compression algorithm + + This file is part of the LZO real-time data compression library. + + Copyright (C) 1996..2008 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + Markus F.X.J. Oberhumer + http://www.oberhumer.com/opensource/lzo/ + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/*********************************************************************** +// compress a block of data. +************************************************************************/ +static NOINLINE unsigned +do_compress(const uint8_t* in, unsigned in_len, + uint8_t* out, unsigned* out_len, + void* wrkmem) +{ + register const uint8_t* ip; + uint8_t* op; + const uint8_t* const in_end = in + in_len; + const uint8_t* const ip_end = in + in_len - M2_MAX_LEN - 5; + const uint8_t* ii; + const void* *const dict = (const void**) wrkmem; + + op = out; + ip = in; + ii = ip; + + ip += 4; + for (;;) { + register const uint8_t* m_pos; + unsigned m_off; + unsigned m_len; + unsigned dindex; + + D_INDEX1(dindex,ip); + GINDEX(m_pos,m_off,dict,dindex,in); + if (LZO_CHECK_MPOS_NON_DET(m_pos,m_off,in,ip,M4_MAX_OFFSET)) + goto literal; +#if 1 + if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) + goto try_match; + D_INDEX2(dindex,ip); +#endif + GINDEX(m_pos,m_off,dict,dindex,in); + if (LZO_CHECK_MPOS_NON_DET(m_pos,m_off,in,ip,M4_MAX_OFFSET)) + goto literal; + if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) + goto try_match; + goto literal; + + try_match: +#if 1 && defined(LZO_UNALIGNED_OK_2) + if (* (const lzo_ushortp) m_pos != * (const lzo_ushortp) ip) +#else + if (m_pos[0] != ip[0] || m_pos[1] != ip[1]) +#endif + { + } else { + if (m_pos[2] == ip[2]) { +#if 0 + if (m_off <= M2_MAX_OFFSET) + goto match; + if (lit <= 3) + goto match; + if (lit == 3) { /* better compression, but slower */ + assert(op - 2 > out); op[-2] |= (uint8_t)(3); + *op++ = *ii++; *op++ = *ii++; *op++ = *ii++; + goto code_match; + } + if (m_pos[3] == ip[3]) +#endif + goto match; + } + else { + /* still need a better way for finding M1 matches */ +#if 0 + /* a M1 match */ +#if 0 + if (m_off <= M1_MAX_OFFSET && lit > 0 && lit <= 3) +#else + if (m_off <= M1_MAX_OFFSET && lit == 3) +#endif + { + register unsigned t; + + t = lit; + assert(op - 2 > out); op[-2] |= (uint8_t)(t); + do *op++ = *ii++; while (--t > 0); + assert(ii == ip); + m_off -= 1; + *op++ = (uint8_t)(M1_MARKER | ((m_off & 3) << 2)); + *op++ = (uint8_t)(m_off >> 2); + ip += 2; + goto match_done; + } +#endif + } + } + + /* a literal */ + literal: + UPDATE_I(dict, 0, dindex, ip, in); + ++ip; + if (ip >= ip_end) + break; + continue; + + /* a match */ +match: + UPDATE_I(dict, 0, dindex, ip, in); + /* store current literal run */ + if (pd(ip, ii) > 0) { + register unsigned t = pd(ip, ii); + + if (t <= 3) { + assert(op - 2 > out); + op[-2] |= (uint8_t)(t); + } + else if (t <= 18) + *op++ = (uint8_t)(t - 3); + else { + register unsigned tt = t - 18; + + *op++ = 0; + while (tt > 255) { + tt -= 255; + *op++ = 0; + } + assert(tt > 0); + *op++ = (uint8_t)(tt); + } + do *op++ = *ii++; while (--t > 0); + } + + /* code the match */ + assert(ii == ip); + ip += 3; + if (m_pos[3] != *ip++ || m_pos[4] != *ip++ || m_pos[5] != *ip++ + || m_pos[6] != *ip++ || m_pos[7] != *ip++ || m_pos[8] != *ip++ +#ifdef LZO1Y + || m_pos[ 9] != *ip++ || m_pos[10] != *ip++ || m_pos[11] != *ip++ + || m_pos[12] != *ip++ || m_pos[13] != *ip++ || m_pos[14] != *ip++ +#endif + ) { + --ip; + m_len = pd(ip, ii); + assert(m_len >= 3); + assert(m_len <= M2_MAX_LEN); + + if (m_off <= M2_MAX_OFFSET) { + m_off -= 1; +#if defined(LZO1X) + *op++ = (uint8_t)(((m_len - 1) << 5) | ((m_off & 7) << 2)); + *op++ = (uint8_t)(m_off >> 3); +#elif defined(LZO1Y) + *op++ = (uint8_t)(((m_len + 1) << 4) | ((m_off & 3) << 2)); + *op++ = (uint8_t)(m_off >> 2); +#endif + } + else if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; + *op++ = (uint8_t)(M3_MARKER | (m_len - 2)); + goto m3_m4_offset; + } else { +#if defined(LZO1X) + m_off -= 0x4000; + assert(m_off > 0); + assert(m_off <= 0x7fff); + *op++ = (uint8_t)(M4_MARKER | ((m_off & 0x4000) >> 11) | (m_len - 2)); + goto m3_m4_offset; +#elif defined(LZO1Y) + goto m4_match; +#endif + } + } + else { + { + const uint8_t* end = in_end; + const uint8_t* m = m_pos + M2_MAX_LEN + 1; + while (ip < end && *m == *ip) + m++, ip++; + m_len = pd(ip, ii); + } + assert(m_len > M2_MAX_LEN); + + if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; + if (m_len <= 33) + *op++ = (uint8_t)(M3_MARKER | (m_len - 2)); + else { + m_len -= 33; + *op++ = M3_MARKER | 0; + goto m3_m4_len; + } + } else { +#if defined(LZO1Y) + m4_match: +#endif + m_off -= 0x4000; + assert(m_off > 0); + assert(m_off <= 0x7fff); + if (m_len <= M4_MAX_LEN) + *op++ = (uint8_t)(M4_MARKER | ((m_off & 0x4000) >> 11) | (m_len - 2)); + else { + m_len -= M4_MAX_LEN; + *op++ = (uint8_t)(M4_MARKER | ((m_off & 0x4000) >> 11)); + m3_m4_len: + while (m_len > 255) { + m_len -= 255; + *op++ = 0; + } + assert(m_len > 0); + *op++ = (uint8_t)(m_len); + } + } + m3_m4_offset: + *op++ = (uint8_t)((m_off & 63) << 2); + *op++ = (uint8_t)(m_off >> 6); + } +#if 0 + match_done: +#endif + ii = ip; + if (ip >= ip_end) + break; + } + + *out_len = pd(op, out); + return pd(in_end, ii); +} + +/*********************************************************************** +// public entry point +************************************************************************/ +int DO_COMPRESS(const uint8_t* in, unsigned in_len, + uint8_t* out, unsigned* out_len, + void* wrkmem) +{ + uint8_t* op = out; + unsigned t; + + if (in_len <= M2_MAX_LEN + 5) + t = in_len; + else { + t = do_compress(in,in_len,op,out_len,wrkmem); + op += *out_len; + } + + if (t > 0) { + const uint8_t* ii = in + in_len - t; + + if (op == out && t <= 238) + *op++ = (uint8_t)(17 + t); + else if (t <= 3) + op[-2] |= (uint8_t)(t); + else if (t <= 18) + *op++ = (uint8_t)(t - 3); + else { + unsigned tt = t - 18; + + *op++ = 0; + while (tt > 255) { + tt -= 255; + *op++ = 0; + } + assert(tt > 0); + *op++ = (uint8_t)(tt); + } + do *op++ = *ii++; while (--t > 0); + } + + *op++ = M4_MARKER | 1; + *op++ = 0; + *op++ = 0; + + *out_len = pd(op, out); + return 0; /*LZO_E_OK*/ +} diff --git a/probe-busybox/archival/libarchive/lzo1x_d.c b/probe-busybox/archival/libarchive/lzo1x_d.c new file mode 100644 index 00000000..40b167e6 --- /dev/null +++ b/probe-busybox/archival/libarchive/lzo1x_d.c @@ -0,0 +1,423 @@ +/* implementation of the LZO1X decompression algorithm + + This file is part of the LZO real-time data compression library. + + Copyright (C) 1996..2008 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + Markus F.X.J. Oberhumer + http://www.oberhumer.com/opensource/lzo/ + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include "libbb.h" +#include "liblzo.h" + +/*********************************************************************** +// decompress a block of data. +************************************************************************/ +/* safe decompression with overrun testing */ +int lzo1x_decompress_safe(const uint8_t* in, unsigned in_len, + uint8_t* out, unsigned* out_len, + void* wrkmem UNUSED_PARAM) +{ + register uint8_t* op; + register const uint8_t* ip; + register unsigned t; +#if defined(COPY_DICT) + unsigned m_off; + const uint8_t* dict_end; +#else + register const uint8_t* m_pos = NULL; /* possibly not needed */ +#endif + const uint8_t* const ip_end = in + in_len; +#if defined(HAVE_ANY_OP) + uint8_t* const op_end = out + *out_len; +#endif +#if defined(LZO1Z) + unsigned last_m_off = 0; +#endif + +// LZO_UNUSED(wrkmem); + +#if defined(COPY_DICT) + if (dict) { + if (dict_len > M4_MAX_OFFSET) { + dict += dict_len - M4_MAX_OFFSET; + dict_len = M4_MAX_OFFSET; + } + dict_end = dict + dict_len; + } else { + dict_len = 0; + dict_end = NULL; + } +#endif /* COPY_DICT */ + + *out_len = 0; + + op = out; + ip = in; + + if (*ip > 17) { + t = *ip++ - 17; + if (t < 4) + goto match_next; + assert(t > 0); NEED_OP(t); NEED_IP(t+1); + do *op++ = *ip++; while (--t > 0); + goto first_literal_run; + } + + while (TEST_IP && TEST_OP) { + t = *ip++; + if (t >= 16) + goto match; + /* a literal run */ + if (t == 0) { + NEED_IP(1); + while (*ip == 0) { + t += 255; + ip++; + NEED_IP(1); + } + TEST_IV(t); + t += 15 + *ip++; + } + /* copy literals */ + assert(t > 0); + NEED_OP(t+3); + NEED_IP(t+4); +#if defined(LZO_UNALIGNED_OK_4) || defined(LZO_ALIGNED_OK_4) +# if !defined(LZO_UNALIGNED_OK_4) + if (PTR_ALIGNED2_4(op, ip)) +# endif + { + COPY4(op, ip); + op += 4; + ip += 4; + if (--t > 0) { + if (t >= 4) { + do { + COPY4(op, ip); + op += 4; + ip += 4; + t -= 4; + } while (t >= 4); + if (t > 0) + do *op++ = *ip++; while (--t > 0); + } else { + do *op++ = *ip++; while (--t > 0); + } + } + } +# if !defined(LZO_UNALIGNED_OK_4) + else +# endif +#endif +#if !defined(LZO_UNALIGNED_OK_4) + { + *op++ = *ip++; + *op++ = *ip++; + *op++ = *ip++; + do *op++ = *ip++; while (--t > 0); + } +#endif + + first_literal_run: + t = *ip++; + if (t >= 16) + goto match; +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2); + last_m_off = m_off; +#else + m_off = (1 + M2_MAX_OFFSET) + (t >> 2) + (*ip++ << 2); +#endif + NEED_OP(3); + t = 3; COPY_DICT(t,m_off) +#else /* !COPY_DICT */ +#if defined(LZO1Z) + t = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2); + m_pos = op - t; + last_m_off = t; +#else + m_pos = op - (1 + M2_MAX_OFFSET); + m_pos -= t >> 2; + m_pos -= *ip++ << 2; +#endif + TEST_LB(m_pos); NEED_OP(3); + *op++ = *m_pos++; + *op++ = *m_pos++; + *op++ = *m_pos; +#endif /* COPY_DICT */ + goto match_done; + + /* handle matches */ + do { + match: + if (t >= 64) { /* a M2 match */ +#if defined(COPY_DICT) +#if defined(LZO1X) + m_off = 1 + ((t >> 2) & 7) + (*ip++ << 3); + t = (t >> 5) - 1; +#elif defined(LZO1Y) + m_off = 1 + ((t >> 2) & 3) + (*ip++ << 2); + t = (t >> 4) - 3; +#elif defined(LZO1Z) + m_off = t & 0x1f; + if (m_off >= 0x1c) + m_off = last_m_off; + else { + m_off = 1 + (m_off << 6) + (*ip++ >> 2); + last_m_off = m_off; + } + t = (t >> 5) - 1; +#endif +#else /* !COPY_DICT */ +#if defined(LZO1X) + m_pos = op - 1; + m_pos -= (t >> 2) & 7; + m_pos -= *ip++ << 3; + t = (t >> 5) - 1; +#elif defined(LZO1Y) + m_pos = op - 1; + m_pos -= (t >> 2) & 3; + m_pos -= *ip++ << 2; + t = (t >> 4) - 3; +#elif defined(LZO1Z) + { + unsigned off = t & 0x1f; + m_pos = op; + if (off >= 0x1c) { + assert(last_m_off > 0); + m_pos -= last_m_off; + } else { + off = 1 + (off << 6) + (*ip++ >> 2); + m_pos -= off; + last_m_off = off; + } + } + t = (t >> 5) - 1; +#endif + TEST_LB(m_pos); assert(t > 0); NEED_OP(t+3-1); + goto copy_match; +#endif /* COPY_DICT */ + } + else if (t >= 32) { /* a M3 match */ + t &= 31; + if (t == 0) { + NEED_IP(1); + while (*ip == 0) { + t += 255; + ip++; + NEED_IP(1); + } + TEST_IV(t); + t += 31 + *ip++; + } +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off = 1 + (ip[0] << 6) + (ip[1] >> 2); + last_m_off = m_off; +#else + m_off = 1 + (ip[0] >> 2) + (ip[1] << 6); +#endif +#else /* !COPY_DICT */ +#if defined(LZO1Z) + { + unsigned off = 1 + (ip[0] << 6) + (ip[1] >> 2); + m_pos = op - off; + last_m_off = off; + } +#elif defined(LZO_UNALIGNED_OK_2) && defined(LZO_ABI_LITTLE_ENDIAN) + m_pos = op - 1; + m_pos -= (* (const lzo_ushortp) ip) >> 2; +#else + m_pos = op - 1; + m_pos -= (ip[0] >> 2) + (ip[1] << 6); +#endif +#endif /* COPY_DICT */ + ip += 2; + } + else if (t >= 16) { /* a M4 match */ +#if defined(COPY_DICT) + m_off = (t & 8) << 11; +#else /* !COPY_DICT */ + m_pos = op; + m_pos -= (t & 8) << 11; +#endif /* COPY_DICT */ + t &= 7; + if (t == 0) { + NEED_IP(1); + while (*ip == 0) { + t += 255; + ip++; + NEED_IP(1); + } + TEST_IV(t); + t += 7 + *ip++; + } +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off += (ip[0] << 6) + (ip[1] >> 2); +#else + m_off += (ip[0] >> 2) + (ip[1] << 6); +#endif + ip += 2; + if (m_off == 0) + goto eof_found; + m_off += 0x4000; +#if defined(LZO1Z) + last_m_off = m_off; +#endif +#else /* !COPY_DICT */ +#if defined(LZO1Z) + m_pos -= (ip[0] << 6) + (ip[1] >> 2); +#elif defined(LZO_UNALIGNED_OK_2) && defined(LZO_ABI_LITTLE_ENDIAN) + m_pos -= (* (const lzo_ushortp) ip) >> 2; +#else + m_pos -= (ip[0] >> 2) + (ip[1] << 6); +#endif + ip += 2; + if (m_pos == op) + goto eof_found; + m_pos -= 0x4000; +#if defined(LZO1Z) + last_m_off = pd((const uint8_t*)op, m_pos); +#endif +#endif /* COPY_DICT */ + } + else { /* a M1 match */ +#if defined(COPY_DICT) +#if defined(LZO1Z) + m_off = 1 + (t << 6) + (*ip++ >> 2); + last_m_off = m_off; +#else + m_off = 1 + (t >> 2) + (*ip++ << 2); +#endif + NEED_OP(2); + t = 2; COPY_DICT(t,m_off) +#else /* !COPY_DICT */ +#if defined(LZO1Z) + t = 1 + (t << 6) + (*ip++ >> 2); + m_pos = op - t; + last_m_off = t; +#else + m_pos = op - 1; + m_pos -= t >> 2; + m_pos -= *ip++ << 2; +#endif + TEST_LB(m_pos); NEED_OP(2); + *op++ = *m_pos++; + *op++ = *m_pos; +#endif /* COPY_DICT */ + goto match_done; + } + + /* copy match */ +#if defined(COPY_DICT) + + NEED_OP(t+3-1); + t += 3-1; COPY_DICT(t,m_off) + +#else /* !COPY_DICT */ + + TEST_LB(m_pos); assert(t > 0); NEED_OP(t+3-1); +#if defined(LZO_UNALIGNED_OK_4) || defined(LZO_ALIGNED_OK_4) +# if !defined(LZO_UNALIGNED_OK_4) + if (t >= 2 * 4 - (3 - 1) && PTR_ALIGNED2_4(op,m_pos)) { + assert((op - m_pos) >= 4); /* both pointers are aligned */ +# else + if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) { +# endif + COPY4(op,m_pos); + op += 4; m_pos += 4; t -= 4 - (3 - 1); + do { + COPY4(op,m_pos); + op += 4; m_pos += 4; t -= 4; + } while (t >= 4); + if (t > 0) + do *op++ = *m_pos++; while (--t > 0); + } + else +#endif + { + copy_match: + *op++ = *m_pos++; *op++ = *m_pos++; + do *op++ = *m_pos++; while (--t > 0); + } + +#endif /* COPY_DICT */ + + match_done: +#if defined(LZO1Z) + t = ip[-1] & 3; +#else + t = ip[-2] & 3; +#endif + if (t == 0) + break; + + /* copy literals */ + match_next: + assert(t > 0); + assert(t < 4); + NEED_OP(t); + NEED_IP(t+1); +#if 0 + do *op++ = *ip++; while (--t > 0); +#else + *op++ = *ip++; + if (t > 1) { + *op++ = *ip++; + if (t > 2) + *op++ = *ip++; + } +#endif + t = *ip++; + } while (TEST_IP && TEST_OP); + } + +//#if defined(HAVE_TEST_IP) || defined(HAVE_TEST_OP) + /* no EOF code was found */ + *out_len = pd(op, out); + return LZO_E_EOF_NOT_FOUND; +//#endif + + eof_found: + assert(t == 1); + *out_len = pd(op, out); + return (ip == ip_end ? LZO_E_OK : + (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN)); + +//#if defined(HAVE_NEED_IP) + input_overrun: + *out_len = pd(op, out); + return LZO_E_INPUT_OVERRUN; +//#endif + +//#if defined(HAVE_NEED_OP) + output_overrun: + *out_len = pd(op, out); + return LZO_E_OUTPUT_OVERRUN; +//#endif + +//#if defined(LZO_TEST_OVERRUN_LOOKBEHIND) + lookbehind_overrun: + *out_len = pd(op, out); + return LZO_E_LOOKBEHIND_OVERRUN; +//#endif +} diff --git a/probe-busybox/archival/libarchive/open_transformer.c b/probe-busybox/archival/libarchive/open_transformer.c new file mode 100644 index 00000000..ac7e5db9 --- /dev/null +++ b/probe-busybox/archival/libarchive/open_transformer.c @@ -0,0 +1,362 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +void FAST_FUNC init_transformer_state(transformer_state_t *xstate) +{ + memset(xstate, 0, sizeof(*xstate)); +} + +int FAST_FUNC check_signature16(transformer_state_t *xstate, unsigned magic16) +{ + if (!xstate->signature_skipped) { + uint16_t magic2; + if (full_read(xstate->src_fd, &magic2, 2) != 2 || magic2 != magic16) { + bb_error_msg("invalid magic"); + return -1; + } + xstate->signature_skipped = 2; + } + return 0; +} + +ssize_t FAST_FUNC transformer_write(transformer_state_t *xstate, const void *buf, size_t bufsize) +{ + ssize_t nwrote; + + if (xstate->mem_output_size_max != 0) { + size_t pos = xstate->mem_output_size; + size_t size; + + size = (xstate->mem_output_size += bufsize); + if (size > xstate->mem_output_size_max) { + free(xstate->mem_output_buf); + xstate->mem_output_buf = NULL; + bb_perror_msg("buffer %u too small", (unsigned)xstate->mem_output_size_max); + nwrote = -1; + goto ret; + } + xstate->mem_output_buf = xrealloc(xstate->mem_output_buf, size + 1); + memcpy(xstate->mem_output_buf + pos, buf, bufsize); + xstate->mem_output_buf[size] = '\0'; + nwrote = bufsize; + } else { + nwrote = full_write(xstate->dst_fd, buf, bufsize); + if (nwrote != (ssize_t)bufsize) { + bb_perror_msg("write"); + nwrote = -1; + goto ret; + } + } + ret: + return nwrote; +} + +ssize_t FAST_FUNC xtransformer_write(transformer_state_t *xstate, const void *buf, size_t bufsize) +{ + ssize_t nwrote = transformer_write(xstate, buf, bufsize); + if (nwrote != (ssize_t)bufsize) { + xfunc_die(); + } + return nwrote; +} + +void check_errors_in_children(int signo) +{ + int status; + + if (!signo) { + /* block waiting for any child */ + if (wait(&status) < 0) +//FIXME: check EINTR? + return; /* probably there are no children */ + goto check_status; + } + + /* Wait for any child without blocking */ + for (;;) { + if (wait_any_nohang(&status) < 0) +//FIXME: check EINTR? + /* wait failed?! I'm confused... */ + return; + check_status: + /*if (WIFEXITED(status) && WEXITSTATUS(status) == 0)*/ + /* On Linux, the above can be checked simply as: */ + if (status == 0) + /* this child exited with 0 */ + continue; + /* Cannot happen: + if (!WIFSIGNALED(status) && !WIFEXITED(status)) ???; + */ + bb_got_signal = 1; + } +} + +/* transformer(), more than meets the eye */ +#if BB_MMU +void FAST_FUNC fork_transformer(int fd, + int signature_skipped, + IF_DESKTOP(long long) int FAST_FUNC (*transformer)(transformer_state_t *xstate) +) +#else +void FAST_FUNC fork_transformer(int fd, const char *transform_prog) +#endif +{ + struct fd_pair fd_pipe; + int pid; + + xpiped_pair(fd_pipe); + pid = BB_MMU ? xfork() : xvfork(); + if (pid == 0) { + /* Child */ + close(fd_pipe.rd); /* we don't want to read from the parent */ + // FIXME: error check? +#if BB_MMU + { + IF_DESKTOP(long long) int r; + transformer_state_t xstate; + init_transformer_state(&xstate); + xstate.signature_skipped = signature_skipped; + xstate.src_fd = fd; + xstate.dst_fd = fd_pipe.wr; + r = transformer(&xstate); + if (ENABLE_FEATURE_CLEAN_UP) { + close(fd_pipe.wr); /* send EOF */ + close(fd); + } + /* must be _exit! bug was actually seen here */ + _exit(/*error if:*/ r < 0); + } +#else + { + char *argv[4]; + xmove_fd(fd, 0); + xmove_fd(fd_pipe.wr, 1); + argv[0] = (char*)transform_prog; + argv[1] = (char*)"-cf"; + argv[2] = (char*)"-"; + argv[3] = NULL; + BB_EXECVP(transform_prog, argv); + bb_perror_msg_and_die("can't execute '%s'", transform_prog); + } +#endif + /* notreached */ + } + + /* parent process */ + close(fd_pipe.wr); /* don't want to write to the child */ + xmove_fd(fd_pipe.rd, fd); +} + + +#if SEAMLESS_COMPRESSION + +/* Used by e.g. rpm which gives us a fd without filename, + * thus we can't guess the format from filename's extension. + */ +static transformer_state_t *setup_transformer_on_fd(int fd, int fail_if_not_compressed) +{ + union { + uint8_t b[4]; + uint16_t b16[2]; + uint32_t b32[1]; + } magic; + transformer_state_t *xstate; + + xstate = xzalloc(sizeof(*xstate)); + xstate->src_fd = fd; + xstate->signature_skipped = 2; + + /* .gz and .bz2 both have 2-byte signature, and their + * unpack_XXX_stream wants this header skipped. */ + xread(fd, magic.b16, sizeof(magic.b16[0])); + if (ENABLE_FEATURE_SEAMLESS_GZ + && magic.b16[0] == GZIP_MAGIC + ) { + xstate->xformer = unpack_gz_stream; + USE_FOR_NOMMU(xstate->xformer_prog = "gunzip";) + goto found_magic; + } + if (ENABLE_FEATURE_SEAMLESS_Z + && magic.b16[0] == COMPRESS_MAGIC + ) { + xstate->xformer = unpack_Z_stream; + USE_FOR_NOMMU(xstate->xformer_prog = "uncompress";) + goto found_magic; + } + if (ENABLE_FEATURE_SEAMLESS_BZ2 + && magic.b16[0] == BZIP2_MAGIC + ) { + xstate->xformer = unpack_bz2_stream; + USE_FOR_NOMMU(xstate->xformer_prog = "bunzip2";) + goto found_magic; + } + if (ENABLE_FEATURE_SEAMLESS_XZ + && magic.b16[0] == XZ_MAGIC1 + ) { + xstate->signature_skipped = 6; + xread(fd, magic.b32, sizeof(magic.b32[0])); + if (magic.b32[0] == XZ_MAGIC2) { + xstate->xformer = unpack_xz_stream; + USE_FOR_NOMMU(xstate->xformer_prog = "unxz";) + goto found_magic; + } + } + + /* No known magic seen */ + if (fail_if_not_compressed) + bb_error_msg_and_die("no gzip" + IF_FEATURE_SEAMLESS_BZ2("/bzip2") + IF_FEATURE_SEAMLESS_XZ("/xz") + " magic"); + + /* Some callers expect this function to "consume" fd + * even if data is not compressed. In this case, + * we return a state with trivial transformer. + */ +// USE_FOR_MMU(xstate->xformer = copy_stream;) +// USE_FOR_NOMMU(xstate->xformer_prog = "cat";) + + found_magic: + return xstate; +} + +/* Used by e.g. rpm which gives us a fd without filename, + * thus we can't guess the format from filename's extension. + */ +int FAST_FUNC setup_unzip_on_fd(int fd, int fail_if_not_compressed) +{ + transformer_state_t *xstate = setup_transformer_on_fd(fd, fail_if_not_compressed); + + if (!xstate || !xstate->xformer) { + free(xstate); + return 1; + } + +# if BB_MMU + fork_transformer_with_no_sig(xstate->src_fd, xstate->xformer); +# else + /* NOMMU version of fork_transformer execs + * an external unzipper that wants + * file position at the start of the file. + */ + xlseek(fd, - xstate->signature_skipped, SEEK_CUR); + xstate->signature_skipped = 0; + fork_transformer_with_sig(xstate->src_fd, xstate->xformer, xstate->xformer_prog); +# endif + free(xstate); + return 0; +} + +static transformer_state_t *open_transformer(const char *fname, int fail_if_not_compressed) +{ + transformer_state_t *xstate; + int fd; + + fd = open(fname, O_RDONLY); + if (fd < 0) + return NULL; + + if (ENABLE_FEATURE_SEAMLESS_LZMA) { + /* .lzma has no header/signature, can only detect it by extension */ + char *sfx = strrchr(fname, '.'); + if (sfx && strcmp(sfx+1, "lzma") == 0) { + xstate = xzalloc(sizeof(*xstate)); + xstate->src_fd = fd; + xstate->xformer = unpack_lzma_stream; + USE_FOR_NOMMU(xstate->xformer_prog = "unlzma";) + return xstate; + } + } + + xstate = setup_transformer_on_fd(fd, fail_if_not_compressed); + + return xstate; +} + +int FAST_FUNC open_zipped(const char *fname, int fail_if_not_compressed) +{ + int fd; + transformer_state_t *xstate; + + xstate = open_transformer(fname, fail_if_not_compressed); + if (!xstate) + return -1; + + fd = xstate->src_fd; +# if BB_MMU + if (xstate->xformer) { + fork_transformer_with_no_sig(fd, xstate->xformer); + } else { + /* the file is not compressed */ + xlseek(fd, - xstate->signature_skipped, SEEK_CUR); + xstate->signature_skipped = 0; + } +# else + /* NOMMU can't avoid the seek :( */ + xlseek(fd, - xstate->signature_skipped, SEEK_CUR); + xstate->signature_skipped = 0; + if (xstate->xformer) { + fork_transformer_with_sig(fd, xstate->xformer, xstate->xformer_prog); + } /* else: the file is not compressed */ +# endif + + free(xstate); + return fd; +} + +void* FAST_FUNC xmalloc_open_zipped_read_close(const char *fname, size_t *maxsz_p) +{ +# if 1 + transformer_state_t *xstate; + char *image; + + xstate = open_transformer(fname, /*fail_if_not_compressed:*/ 0); + if (!xstate) /* file open error */ + return NULL; + + image = NULL; + if (xstate->xformer) { + /* In-memory decompression */ + xstate->mem_output_size_max = maxsz_p ? *maxsz_p : (size_t)(INT_MAX - 4095); + xstate->xformer(xstate); + if (xstate->mem_output_buf) { + image = xstate->mem_output_buf; + if (maxsz_p) + *maxsz_p = xstate->mem_output_size; + } + } else { + /* File is not compressed */ +//FIXME: avoid seek + xlseek(xstate->src_fd, - xstate->signature_skipped, SEEK_CUR); + xstate->signature_skipped = 0; + image = xmalloc_read(xstate->src_fd, maxsz_p); + } + + if (!image) + bb_perror_msg("read error from '%s'", fname); + close(xstate->src_fd); + free(xstate); + return image; +# else + /* This version forks a subprocess - much more expensive */ + int fd; + char *image; + + fd = open_zipped(fname, /*fail_if_not_compressed:*/ 0); + if (fd < 0) + return NULL; + + image = xmalloc_read(fd, maxsz_p); + if (!image) + bb_perror_msg("read error from '%s'", fname); + close(fd); + return image; +# endif +} + +#endif /* SEAMLESS_COMPRESSION */ diff --git a/probe-busybox/archival/libarchive/seek_by_jump.c b/probe-busybox/archival/libarchive/seek_by_jump.c new file mode 100644 index 00000000..4fcd99ac --- /dev/null +++ b/probe-busybox/archival/libarchive/seek_by_jump.c @@ -0,0 +1,19 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +void FAST_FUNC seek_by_jump(int fd, off_t amount) +{ + if (amount + && lseek(fd, amount, SEEK_CUR) == (off_t) -1 + ) { + if (errno == ESPIPE) + seek_by_read(fd, amount); + else + bb_perror_msg_and_die("seek failure"); + } +} diff --git a/probe-busybox/archival/libarchive/seek_by_read.c b/probe-busybox/archival/libarchive/seek_by_read.c new file mode 100644 index 00000000..c0fde966 --- /dev/null +++ b/probe-busybox/archival/libarchive/seek_by_read.c @@ -0,0 +1,16 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +/* If we are reading through a pipe, or from stdin then we can't lseek, + * we must read and discard the data to skip over it. + */ +void FAST_FUNC seek_by_read(int fd, off_t amount) +{ + if (amount) + bb_copyfd_exact_size(fd, -1, amount); +} diff --git a/probe-busybox/archival/libarchive/unpack_ar_archive.c b/probe-busybox/archival/libarchive/unpack_ar_archive.c new file mode 100644 index 00000000..0bc03034 --- /dev/null +++ b/probe-busybox/archival/libarchive/unpack_ar_archive.c @@ -0,0 +1,22 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" +#include "ar.h" + +void FAST_FUNC unpack_ar_archive(archive_handle_t *ar_archive) +{ + char magic[7]; + + xread(ar_archive->src_fd, magic, AR_MAGIC_LEN); + if (!is_prefixed_with(magic, AR_MAGIC)) { + bb_error_msg_and_die("invalid ar magic"); + } + ar_archive->offset += AR_MAGIC_LEN; + + while (get_header_ar(ar_archive) == EXIT_SUCCESS) + continue; +} diff --git a/probe-busybox/archival/libarchive/unsafe_prefix.c b/probe-busybox/archival/libarchive/unsafe_prefix.c new file mode 100644 index 00000000..9994f4d9 --- /dev/null +++ b/probe-busybox/archival/libarchive/unsafe_prefix.c @@ -0,0 +1,36 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "bb_archive.h" + +const char* FAST_FUNC strip_unsafe_prefix(const char *str) +{ + const char *cp = str; + while (1) { + char *cp2; + if (*cp == '/') { + cp++; + continue; + } + if (is_prefixed_with(cp, "/../"+1)) { + cp += 3; + continue; + } + cp2 = strstr(cp, "/../"); + if (!cp2) + break; + cp = cp2 + 4; + } + if (cp != str) { + static smallint warned = 0; + if (!warned) { + warned = 1; + bb_error_msg("removing leading '%.*s' from member names", + (int)(cp - str), str); + } + } + return cp; +} diff --git a/probe-busybox/archival/libarchive/unxz/README b/probe-busybox/archival/libarchive/unxz/README new file mode 100644 index 00000000..a8491203 --- /dev/null +++ b/probe-busybox/archival/libarchive/unxz/README @@ -0,0 +1,135 @@ + +XZ Embedded +=========== + + XZ Embedded is a relatively small, limited implementation of the .xz + file format. Currently only decoding is implemented. + + XZ Embedded was written for use in the Linux kernel, but the code can + be easily used in other environments too, including regular userspace + applications. See userspace/xzminidec.c for an example program. + + This README contains information that is useful only when the copy + of XZ Embedded isn't part of the Linux kernel tree. You should also + read linux/Documentation/xz.txt even if you aren't using XZ Embedded + as part of Linux; information in that file is not repeated in this + README. + +Compiling the Linux kernel module + + The xz_dec module depends on crc32 module, so make sure that you have + it enabled (CONFIG_CRC32). + + Building the xz_dec and xz_dec_test modules without support for BCJ + filters: + + cd linux/lib/xz + make -C /path/to/kernel/source \ + KCPPFLAGS=-I"$(pwd)/../../include" M="$(pwd)" \ + CONFIG_XZ_DEC=m CONFIG_XZ_DEC_TEST=m + + Building the xz_dec and xz_dec_test modules with support for BCJ + filters: + + cd linux/lib/xz + make -C /path/to/kernel/source \ + KCPPFLAGS=-I"$(pwd)/../../include" M="$(pwd)" \ + CONFIG_XZ_DEC=m CONFIG_XZ_DEC_TEST=m CONFIG_XZ_DEC_BCJ=y \ + CONFIG_XZ_DEC_X86=y CONFIG_XZ_DEC_POWERPC=y \ + CONFIG_XZ_DEC_IA64=y CONFIG_XZ_DEC_ARM=y \ + CONFIG_XZ_DEC_ARMTHUMB=y CONFIG_XZ_DEC_SPARC=y + + If you want only one or a few of the BCJ filters, omit the appropriate + variables. CONFIG_XZ_DEC_BCJ=y is always required to build the support + code shared between all BCJ filters. + + Most people don't need the xz_dec_test module. You can skip building + it by omitting CONFIG_XZ_DEC_TEST=m from the make command line. + +Compiler requirements + + XZ Embedded should compile as either GNU-C89 (used in the Linux + kernel) or with any C99 compiler. Getting the code to compile with + non-GNU C89 compiler or a C++ compiler should be quite easy as + long as there is a data type for unsigned 64-bit integer (or the + code is modified not to support large files, which needs some more + care than just using 32-bit integer instead of 64-bit). + + If you use GCC, try to use a recent version. For example, on x86-32, + xz_dec_lzma2.c compiled with GCC 3.3.6 is 15-25 % slower than when + compiled with GCC 4.3.3. + +Embedding into userspace applications + + To embed the XZ decoder, copy the following files into a single + directory in your source code tree: + + linux/include/linux/xz.h + linux/lib/xz/xz_crc32.c + linux/lib/xz/xz_dec_lzma2.c + linux/lib/xz/xz_dec_stream.c + linux/lib/xz/xz_lzma2.h + linux/lib/xz/xz_private.h + linux/lib/xz/xz_stream.h + userspace/xz_config.h + + Alternatively, xz.h may be placed into a different directory but then + that directory must be in the compiler include path when compiling + the .c files. + + Your code should use only the functions declared in xz.h. The rest of + the .h files are meant only for internal use in XZ Embedded. + + You may want to modify xz_config.h to be more suitable for your build + environment. Probably you should at least skim through it even if the + default file works as is. + +BCJ filter support + + If you want support for one or more BCJ filters, you need to copy also + linux/lib/xz/xz_dec_bcj.c into your application, and use appropriate + #defines in xz_config.h or in compiler flags. You don't need these + #defines in the code that just uses XZ Embedded via xz.h, but having + them always #defined doesn't hurt either. + + #define Instruction set BCJ filter endianness + XZ_DEC_X86 x86-32 or x86-64 Little endian only + XZ_DEC_POWERPC PowerPC Big endian only + XZ_DEC_IA64 Itanium (IA-64) Big or little endian + XZ_DEC_ARM ARM Little endian only + XZ_DEC_ARMTHUMB ARM-Thumb Little endian only + XZ_DEC_SPARC SPARC Big or little endian + + While some architectures are (partially) bi-endian, the endianness + setting doesn't change the endianness of the instructions on all + architectures. That's why Itanium and SPARC filters work for both big + and little endian executables (Itanium has little endian instructions + and SPARC has big endian instructions). + + There currently is no filter for little endian PowerPC or big endian + ARM or ARM-Thumb. Implementing filters for them can be considered if + there is a need for such filters in real-world applications. + +Notes about shared libraries + + If you are including XZ Embedded into a shared library, you very + probably should rename the xz_* functions to prevent symbol + conflicts in case your library is linked against some other library + or application that also has XZ Embedded in it (which may even be + a different version of XZ Embedded). TODO: Provide an easy way + to do this. + + Please don't create a shared library of XZ Embedded itself unless + it is fine to rebuild everything depending on that shared library + everytime you upgrade to a newer version of XZ Embedded. There are + no API or ABI stability guarantees between different versions of + XZ Embedded. + +Specifying the calling convention + + XZ_FUNC macro was included to support declaring functions with __init + in Linux. Outside Linux, it can be used to specify the calling + convention on systems that support multiple calling conventions. + For example, on Windows, you may make all functions use the stdcall + calling convention by defining XZ_FUNC=__stdcall when building and + using the functions from XZ Embedded. diff --git a/probe-busybox/archival/libarchive/unxz/xz.h b/probe-busybox/archival/libarchive/unxz/xz.h new file mode 100644 index 00000000..e0b22db5 --- /dev/null +++ b/probe-busybox/archival/libarchive/unxz/xz.h @@ -0,0 +1,280 @@ +/* + * XZ decompressor + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_H +#define XZ_H + +#ifdef __KERNEL__ +# include +# include +#else +# include +# include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* In Linux, this is used to make extern functions static when needed. */ +#ifndef XZ_EXTERN +# define XZ_EXTERN extern +#endif + +/* In Linux, this is used to mark the functions with __init when needed. */ +#ifndef XZ_FUNC +# define XZ_FUNC +#endif + +/** + * enum xz_mode - Operation mode + * + * @XZ_SINGLE: Single-call mode. This uses less RAM than + * than multi-call modes, because the LZMA2 + * dictionary doesn't need to be allocated as + * part of the decoder state. All required data + * structures are allocated at initialization, + * so xz_dec_run() cannot return XZ_MEM_ERROR. + * @XZ_PREALLOC: Multi-call mode with preallocated LZMA2 + * dictionary buffer. All data structures are + * allocated at initialization, so xz_dec_run() + * cannot return XZ_MEM_ERROR. + * @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is + * allocated once the required size has been + * parsed from the stream headers. If the + * allocation fails, xz_dec_run() will return + * XZ_MEM_ERROR. + * + * It is possible to enable support only for a subset of the above + * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC, + * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled + * with support for all operation modes, but the preboot code may + * be built with fewer features to minimize code size. + */ +enum xz_mode { + XZ_SINGLE, + XZ_PREALLOC, + XZ_DYNALLOC +}; + +/** + * enum xz_ret - Return codes + * @XZ_OK: Everything is OK so far. More input or more + * output space is required to continue. This + * return code is possible only in multi-call mode + * (XZ_PREALLOC or XZ_DYNALLOC). + * @XZ_STREAM_END: Operation finished successfully. + * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding + * is still possible in multi-call mode by simply + * calling xz_dec_run() again. + * Note that this return value is used only if + * XZ_DEC_ANY_CHECK was defined at build time, + * which is not used in the kernel. Unsupported + * check types return XZ_OPTIONS_ERROR if + * XZ_DEC_ANY_CHECK was not defined at build time. + * @XZ_MEM_ERROR: Allocating memory failed. This return code is + * possible only if the decoder was initialized + * with XZ_DYNALLOC. The amount of memory that was + * tried to be allocated was no more than the + * dict_max argument given to xz_dec_init(). + * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than + * allowed by the dict_max argument given to + * xz_dec_init(). This return value is possible + * only in multi-call mode (XZ_PREALLOC or + * XZ_DYNALLOC); the single-call mode (XZ_SINGLE) + * ignores the dict_max argument. + * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic + * bytes). + * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested + * compression options. In the decoder this means + * that the header CRC32 matches, but the header + * itself specifies something that we don't support. + * @XZ_DATA_ERROR: Compressed data is corrupt. + * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly + * different between multi-call and single-call + * mode; more information below. + * + * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls + * to XZ code cannot consume any input and cannot produce any new output. + * This happens when there is no new input available, or the output buffer + * is full while at least one output byte is still pending. Assuming your + * code is not buggy, you can get this error only when decoding a compressed + * stream that is truncated or otherwise corrupt. + * + * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer + * is too small or the compressed input is corrupt in a way that makes the + * decoder produce more output than the caller expected. When it is + * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR + * is used instead of XZ_BUF_ERROR. + */ +enum xz_ret { + XZ_OK, + XZ_STREAM_END, + XZ_UNSUPPORTED_CHECK, + XZ_MEM_ERROR, + XZ_MEMLIMIT_ERROR, + XZ_FORMAT_ERROR, + XZ_OPTIONS_ERROR, + XZ_DATA_ERROR, + XZ_BUF_ERROR +}; + +/** + * struct xz_buf - Passing input and output buffers to XZ code + * @in: Beginning of the input buffer. This may be NULL if and only + * if in_pos is equal to in_size. + * @in_pos: Current position in the input buffer. This must not exceed + * in_size. + * @in_size: Size of the input buffer + * @out: Beginning of the output buffer. This may be NULL if and only + * if out_pos is equal to out_size. + * @out_pos: Current position in the output buffer. This must not exceed + * out_size. + * @out_size: Size of the output buffer + * + * Only the contents of the output buffer from out[out_pos] onward, and + * the variables in_pos and out_pos are modified by the XZ code. + */ +struct xz_buf { + const uint8_t *in; + size_t in_pos; + size_t in_size; + + uint8_t *out; + size_t out_pos; + size_t out_size; +}; + +/** + * struct xz_dec - Opaque type to hold the XZ decoder state + */ +struct xz_dec; + +/** + * xz_dec_init() - Allocate and initialize a XZ decoder state + * @mode: Operation mode + * @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for + * multi-call decoding. This is ignored in single-call mode + * (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes + * or 2^n + 2^(n-1) bytes (the latter sizes are less common + * in practice), so other values for dict_max don't make sense. + * In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB, + * 512 KiB, and 1 MiB are probably the only reasonable values, + * except for kernel and initramfs images where a bigger + * dictionary can be fine and useful. + * + * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at + * once. The caller must provide enough output space or the decoding will + * fail. The output space is used as the dictionary buffer, which is why + * there is no need to allocate the dictionary as part of the decoder's + * internal state. + * + * Because the output buffer is used as the workspace, streams encoded using + * a big dictionary are not a problem in single-call mode. It is enough that + * the output buffer is big enough to hold the actual uncompressed data; it + * can be smaller than the dictionary size stored in the stream headers. + * + * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes + * of memory is preallocated for the LZMA2 dictionary. This way there is no + * risk that xz_dec_run() could run out of memory, since xz_dec_run() will + * never allocate any memory. Instead, if the preallocated dictionary is too + * small for decoding the given input stream, xz_dec_run() will return + * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be + * decoded to avoid allocating excessive amount of memory for the dictionary. + * + * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC): + * dict_max specifies the maximum allowed dictionary size that xz_dec_run() + * may allocate once it has parsed the dictionary size from the stream + * headers. This way excessive allocations can be avoided while still + * limiting the maximum memory usage to a sane value to prevent running the + * system out of memory when decompressing streams from untrusted sources. + * + * On success, xz_dec_init() returns a pointer to struct xz_dec, which is + * ready to be used with xz_dec_run(). If memory allocation fails, + * xz_dec_init() returns NULL. + */ +XZ_EXTERN struct xz_dec * XZ_FUNC xz_dec_init( + enum xz_mode mode, uint32_t dict_max); + +/** + * xz_dec_run() - Run the XZ decoder + * @s: Decoder state allocated using xz_dec_init() + * @b: Input and output buffers + * + * The possible return values depend on build options and operation mode. + * See enum xz_ret for details. + * + * Note that if an error occurs in single-call mode (return value is not + * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the + * contents of the output buffer from b->out[b->out_pos] onward are + * undefined. This is true even after XZ_BUF_ERROR, because with some filter + * chains, there may be a second pass over the output buffer, and this pass + * cannot be properly done if the output buffer is truncated. Thus, you + * cannot give the single-call decoder a too small buffer and then expect to + * get that amount valid data from the beginning of the stream. You must use + * the multi-call decoder if you don't want to uncompress the whole stream. + */ +XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_run(struct xz_dec *s, struct xz_buf *b); + +/** + * xz_dec_reset() - Reset an already allocated decoder state + * @s: Decoder state allocated using xz_dec_init() + * + * This function can be used to reset the multi-call decoder state without + * freeing and reallocating memory with xz_dec_end() and xz_dec_init(). + * + * In single-call mode, xz_dec_reset() is always called in the beginning of + * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in + * multi-call mode. + */ +XZ_EXTERN void XZ_FUNC xz_dec_reset(struct xz_dec *s); + +/** + * xz_dec_end() - Free the memory allocated for the decoder state + * @s: Decoder state allocated using xz_dec_init(). If s is NULL, + * this function does nothing. + */ +XZ_EXTERN void XZ_FUNC xz_dec_end(struct xz_dec *s); + +/* + * Standalone build (userspace build or in-kernel build for boot time use) + * needs a CRC32 implementation. For normal in-kernel use, kernel's own + * CRC32 module is used instead, and users of this module don't need to + * care about the functions below. + */ +#ifndef XZ_INTERNAL_CRC32 +# ifdef __KERNEL__ +# define XZ_INTERNAL_CRC32 0 +# else +# define XZ_INTERNAL_CRC32 1 +# endif +#endif + +#if XZ_INTERNAL_CRC32 +/* + * This must be called before any other xz_* function to initialize + * the CRC32 lookup table. + */ +XZ_EXTERN void XZ_FUNC xz_crc32_init(void); + +/* + * Update CRC32 value using the polynomial from IEEE-802.3. To start a new + * calculation, the third argument must be zero. To continue the calculation, + * the previously returned value is passed as the third argument. + */ +XZ_EXTERN uint32_t XZ_FUNC xz_crc32( + const uint8_t *buf, size_t size, uint32_t crc); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/probe-busybox/archival/libarchive/unxz/xz_config.h b/probe-busybox/archival/libarchive/unxz/xz_config.h new file mode 100644 index 00000000..187e1cbe --- /dev/null +++ b/probe-busybox/archival/libarchive/unxz/xz_config.h @@ -0,0 +1,123 @@ +/* + * Private includes and definitions for userspace use of XZ Embedded + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_CONFIG_H +#define XZ_CONFIG_H + +/* Uncomment as needed to enable BCJ filter decoders. */ +/* #define XZ_DEC_X86 */ +/* #define XZ_DEC_POWERPC */ +/* #define XZ_DEC_IA64 */ +/* #define XZ_DEC_ARM */ +/* #define XZ_DEC_ARMTHUMB */ +/* #define XZ_DEC_SPARC */ + +#include +#include +#include + +#include "xz.h" + +#define kmalloc(size, flags) malloc(size) +#define kfree(ptr) free(ptr) +#define vmalloc(size) malloc(size) +#define vfree(ptr) free(ptr) + +#define memeq(a, b, size) (memcmp(a, b, size) == 0) +#define memzero(buf, size) memset(buf, 0, size) + +#undef min +#undef min_t +#define min(x, y) ((x) < (y) ? (x) : (y)) +#define min_t(type, x, y) min(x, y) + +/* + * Some functions have been marked with __always_inline to keep the + * performance reasonable even when the compiler is optimizing for + * small code size. You may be able to save a few bytes by #defining + * __always_inline to plain inline, but don't complain if the code + * becomes slow. + * + * NOTE: System headers on GNU/Linux may #define this macro already, + * so if you want to change it, you need to #undef it first. + */ +#ifndef __always_inline +# ifdef __GNUC__ +# define __always_inline \ + inline __attribute__((__always_inline__)) +# else +# define __always_inline inline +# endif +#endif + +/* + * Some functions are marked to never be inlined to reduce stack usage. + * If you don't care about stack usage, you may want to modify this so + * that noinline_for_stack is #defined to be empty even when using GCC. + * Doing so may save a few bytes in binary size. + */ +#ifndef noinline_for_stack +# ifdef __GNUC__ +# define noinline_for_stack __attribute__((__noinline__)) +# else +# define noinline_for_stack +# endif +#endif + +/* Inline functions to access unaligned unsigned 32-bit integers */ +#ifndef get_unaligned_le32 +static inline uint32_t XZ_FUNC get_unaligned_le32(const uint8_t *buf) +{ + return (uint32_t)buf[0] + | ((uint32_t)buf[1] << 8) + | ((uint32_t)buf[2] << 16) + | ((uint32_t)buf[3] << 24); +} +#endif + +#ifndef get_unaligned_be32 +static inline uint32_t XZ_FUNC get_unaligned_be32(const uint8_t *buf) +{ + return (uint32_t)(buf[0] << 24) + | ((uint32_t)buf[1] << 16) + | ((uint32_t)buf[2] << 8) + | (uint32_t)buf[3]; +} +#endif + +#ifndef put_unaligned_le32 +static inline void XZ_FUNC put_unaligned_le32(uint32_t val, uint8_t *buf) +{ + buf[0] = (uint8_t)val; + buf[1] = (uint8_t)(val >> 8); + buf[2] = (uint8_t)(val >> 16); + buf[3] = (uint8_t)(val >> 24); +} +#endif + +#ifndef put_unaligned_be32 +static inline void XZ_FUNC put_unaligned_be32(uint32_t val, uint8_t *buf) +{ + buf[0] = (uint8_t)(val >> 24); + buf[1] = (uint8_t)(val >> 16); + buf[2] = (uint8_t)(val >> 8); + buf[3] = (uint8_t)val; +} +#endif + +/* + * Use get_unaligned_le32() also for aligned access for simplicity. On + * little endian systems, #define get_le32(ptr) (*(const uint32_t *)(ptr)) + * could save a few bytes in code size. + */ +#ifndef get_le32 +# define get_le32 get_unaligned_le32 +#endif + +#endif diff --git a/probe-busybox/archival/libarchive/unxz/xz_dec_bcj.c b/probe-busybox/archival/libarchive/unxz/xz_dec_bcj.c new file mode 100644 index 00000000..e0f913a9 --- /dev/null +++ b/probe-busybox/archival/libarchive/unxz/xz_dec_bcj.c @@ -0,0 +1,580 @@ +/* + * Branch/Call/Jump (BCJ) filter decoders + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include "xz_private.h" + +/* + * The rest of the file is inside this ifdef. It makes things a little more + * convenient when building without support for any BCJ filters. + */ +#ifdef XZ_DEC_BCJ + +struct xz_dec_bcj { + /* Type of the BCJ filter being used */ + enum { + BCJ_X86 = 4, /* x86 or x86-64 */ + BCJ_POWERPC = 5, /* Big endian only */ + BCJ_IA64 = 6, /* Big or little endian */ + BCJ_ARM = 7, /* Little endian only */ + BCJ_ARMTHUMB = 8, /* Little endian only */ + BCJ_SPARC = 9 /* Big or little endian */ + } type; + + /* + * Return value of the next filter in the chain. We need to preserve + * this information across calls, because we must not call the next + * filter anymore once it has returned XZ_STREAM_END. + */ + enum xz_ret ret; + + /* True if we are operating in single-call mode. */ + bool single_call; + + /* + * Absolute position relative to the beginning of the uncompressed + * data (in a single .xz Block). We care only about the lowest 32 + * bits so this doesn't need to be uint64_t even with big files. + */ + uint32_t pos; + + /* x86 filter state */ + uint32_t x86_prev_mask; + + /* Temporary space to hold the variables from struct xz_buf */ + uint8_t *out; + size_t out_pos; + size_t out_size; + + struct { + /* Amount of already filtered data in the beginning of buf */ + size_t filtered; + + /* Total amount of data currently stored in buf */ + size_t size; + + /* + * Buffer to hold a mix of filtered and unfiltered data. This + * needs to be big enough to hold Alignment + 2 * Look-ahead: + * + * Type Alignment Look-ahead + * x86 1 4 + * PowerPC 4 0 + * IA-64 16 0 + * ARM 4 0 + * ARM-Thumb 2 2 + * SPARC 4 0 + */ + uint8_t buf[16]; + } temp; +}; + +#ifdef XZ_DEC_X86 +/* + * This is used to test the most significant byte of a memory address + * in an x86 instruction. + */ +static inline int bcj_x86_test_msbyte(uint8_t b) +{ + return b == 0x00 || b == 0xFF; +} + +static noinline_for_stack size_t XZ_FUNC bcj_x86( + struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + static const bool mask_to_allowed_status[8] + = { true, true, true, false, true, false, false, false }; + + static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 }; + + size_t i; + size_t prev_pos = (size_t)-1; + uint32_t prev_mask = s->x86_prev_mask; + uint32_t src; + uint32_t dest; + uint32_t j; + uint8_t b; + + if (size <= 4) + return 0; + + size -= 4; + for (i = 0; i < size; ++i) { + if ((buf[i] & 0xFE) != 0xE8) + continue; + + prev_pos = i - prev_pos; + if (prev_pos > 3) { + prev_mask = 0; + } else { + prev_mask = (prev_mask << (prev_pos - 1)) & 7; + if (prev_mask != 0) { + b = buf[i + 4 - mask_to_bit_num[prev_mask]]; + if (!mask_to_allowed_status[prev_mask] + || bcj_x86_test_msbyte(b)) { + prev_pos = i; + prev_mask = (prev_mask << 1) | 1; + continue; + } + } + } + + prev_pos = i; + + if (bcj_x86_test_msbyte(buf[i + 4])) { + src = get_unaligned_le32(buf + i + 1); + while (true) { + dest = src - (s->pos + (uint32_t)i + 5); + if (prev_mask == 0) + break; + + j = mask_to_bit_num[prev_mask] * 8; + b = (uint8_t)(dest >> (24 - j)); + if (!bcj_x86_test_msbyte(b)) + break; + + src = dest ^ (((uint32_t)1 << (32 - j)) - 1); + } + + dest &= 0x01FFFFFF; + dest |= (uint32_t)0 - (dest & 0x01000000); + put_unaligned_le32(dest, buf + i + 1); + i += 4; + } else { + prev_mask = (prev_mask << 1) | 1; + } + } + + prev_pos = i - prev_pos; + s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1); + return i; +} +#endif + +#ifdef XZ_DEC_POWERPC +static noinline_for_stack size_t XZ_FUNC bcj_powerpc( + struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t instr; + + for (i = 0; i + 4 <= size; i += 4) { + instr = get_unaligned_be32(buf + i); + if ((instr & 0xFC000003) == 0x48000001) { + instr &= 0x03FFFFFC; + instr -= s->pos + (uint32_t)i; + instr &= 0x03FFFFFC; + instr |= 0x48000001; + put_unaligned_be32(instr, buf + i); + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_IA64 +static noinline_for_stack size_t XZ_FUNC bcj_ia64( + struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + static const uint8_t branch_table[32] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 4, 4, 6, 6, 0, 0, 7, 7, + 4, 4, 0, 0, 4, 4, 0, 0 + }; + + /* + * The local variables take a little bit stack space, but it's less + * than what LZMA2 decoder takes, so it doesn't make sense to reduce + * stack usage here without doing that for the LZMA2 decoder too. + */ + + /* Loop counters */ + size_t i; + size_t j; + + /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */ + uint32_t slot; + + /* Bitwise offset of the instruction indicated by slot */ + uint32_t bit_pos; + + /* bit_pos split into byte and bit parts */ + uint32_t byte_pos; + uint32_t bit_res; + + /* Address part of an instruction */ + uint32_t addr; + + /* Mask used to detect which instructions to convert */ + uint32_t mask; + + /* 41-bit instruction stored somewhere in the lowest 48 bits */ + uint64_t instr; + + /* Instruction normalized with bit_res for easier manipulation */ + uint64_t norm; + + for (i = 0; i + 16 <= size; i += 16) { + mask = branch_table[buf[i] & 0x1F]; + for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) { + if (((mask >> slot) & 1) == 0) + continue; + + byte_pos = bit_pos >> 3; + bit_res = bit_pos & 7; + instr = 0; + for (j = 0; j < 6; ++j) + instr |= (uint64_t)(buf[i + j + byte_pos]) + << (8 * j); + + norm = instr >> bit_res; + + if (((norm >> 37) & 0x0F) == 0x05 + && ((norm >> 9) & 0x07) == 0) { + addr = (norm >> 13) & 0x0FFFFF; + addr |= ((uint32_t)(norm >> 36) & 1) << 20; + addr <<= 4; + addr -= s->pos + (uint32_t)i; + addr >>= 4; + + norm &= ~((uint64_t)0x8FFFFF << 13); + norm |= (uint64_t)(addr & 0x0FFFFF) << 13; + norm |= (uint64_t)(addr & 0x100000) + << (36 - 20); + + instr &= (1 << bit_res) - 1; + instr |= norm << bit_res; + + for (j = 0; j < 6; j++) + buf[i + j + byte_pos] + = (uint8_t)(instr >> (8 * j)); + } + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_ARM +static noinline_for_stack size_t XZ_FUNC bcj_arm( + struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t addr; + + for (i = 0; i + 4 <= size; i += 4) { + if (buf[i + 3] == 0xEB) { + addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) + | ((uint32_t)buf[i + 2] << 16); + addr <<= 2; + addr -= s->pos + (uint32_t)i + 8; + addr >>= 2; + buf[i] = (uint8_t)addr; + buf[i + 1] = (uint8_t)(addr >> 8); + buf[i + 2] = (uint8_t)(addr >> 16); + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_ARMTHUMB +static noinline_for_stack size_t XZ_FUNC bcj_armthumb( + struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t addr; + + for (i = 0; i + 4 <= size; i += 2) { + if ((buf[i + 1] & 0xF8) == 0xF0 + && (buf[i + 3] & 0xF8) == 0xF8) { + addr = (((uint32_t)buf[i + 1] & 0x07) << 19) + | ((uint32_t)buf[i] << 11) + | (((uint32_t)buf[i + 3] & 0x07) << 8) + | (uint32_t)buf[i + 2]; + addr <<= 1; + addr -= s->pos + (uint32_t)i + 4; + addr >>= 1; + buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07)); + buf[i] = (uint8_t)(addr >> 11); + buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07)); + buf[i + 2] = (uint8_t)addr; + i += 2; + } + } + + return i; +} +#endif + +#ifdef XZ_DEC_SPARC +static noinline_for_stack size_t XZ_FUNC bcj_sparc( + struct xz_dec_bcj *s, uint8_t *buf, size_t size) +{ + size_t i; + uint32_t instr; + + for (i = 0; i + 4 <= size; i += 4) { + instr = get_unaligned_be32(buf + i); + if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) { + instr <<= 2; + instr -= s->pos + (uint32_t)i; + instr >>= 2; + instr = ((uint32_t)0x40000000 - (instr & 0x400000)) + | 0x40000000 | (instr & 0x3FFFFF); + put_unaligned_be32(instr, buf + i); + } + } + + return i; +} +#endif + +/* + * Apply the selected BCJ filter. Update *pos and s->pos to match the amount + * of data that got filtered. + * + * NOTE: This is implemented as a switch statement to avoid using function + * pointers, which could be problematic in the kernel boot code, which must + * avoid pointers to static data (at least on x86). + */ +static void XZ_FUNC bcj_apply(struct xz_dec_bcj *s, + uint8_t *buf, size_t *pos, size_t size) +{ + size_t filtered; + + buf += *pos; + size -= *pos; + + switch (s->type) { +#ifdef XZ_DEC_X86 + case BCJ_X86: + filtered = bcj_x86(s, buf, size); + break; +#endif +#ifdef XZ_DEC_POWERPC + case BCJ_POWERPC: + filtered = bcj_powerpc(s, buf, size); + break; +#endif +#ifdef XZ_DEC_IA64 + case BCJ_IA64: + filtered = bcj_ia64(s, buf, size); + break; +#endif +#ifdef XZ_DEC_ARM + case BCJ_ARM: + filtered = bcj_arm(s, buf, size); + break; +#endif +#ifdef XZ_DEC_ARMTHUMB + case BCJ_ARMTHUMB: + filtered = bcj_armthumb(s, buf, size); + break; +#endif +#ifdef XZ_DEC_SPARC + case BCJ_SPARC: + filtered = bcj_sparc(s, buf, size); + break; +#endif + default: + /* Never reached but silence compiler warnings. */ + filtered = 0; + break; + } + + *pos += filtered; + s->pos += filtered; +} + +/* + * Flush pending filtered data from temp to the output buffer. + * Move the remaining mixture of possibly filtered and unfiltered + * data to the beginning of temp. + */ +static void XZ_FUNC bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b) +{ + size_t copy_size; + + copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos); + memcpy(b->out + b->out_pos, s->temp.buf, copy_size); + b->out_pos += copy_size; + + s->temp.filtered -= copy_size; + s->temp.size -= copy_size; + memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size); +} + +/* + * The BCJ filter functions are primitive in sense that they process the + * data in chunks of 1-16 bytes. To hide this issue, this function does + * some buffering. + */ +XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_bcj_run(struct xz_dec_bcj *s, + struct xz_dec_lzma2 *lzma2, struct xz_buf *b) +{ + size_t out_start; + + /* + * Flush pending already filtered data to the output buffer. Return + * immediatelly if we couldn't flush everything, or if the next + * filter in the chain had already returned XZ_STREAM_END. + */ + if (s->temp.filtered > 0) { + bcj_flush(s, b); + if (s->temp.filtered > 0) + return XZ_OK; + + if (s->ret == XZ_STREAM_END) + return XZ_STREAM_END; + } + + /* + * If we have more output space than what is currently pending in + * temp, copy the unfiltered data from temp to the output buffer + * and try to fill the output buffer by decoding more data from the + * next filter in the chain. Apply the BCJ filter on the new data + * in the output buffer. If everything cannot be filtered, copy it + * to temp and rewind the output buffer position accordingly. + * + * This needs to be always run when temp.size == 0 to handle a special + * case where the output buffer is full and the next filter has no + * more output coming but hasn't returned XZ_STREAM_END yet. + */ + if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) { + out_start = b->out_pos; + memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); + b->out_pos += s->temp.size; + + s->ret = xz_dec_lzma2_run(lzma2, b); + if (s->ret != XZ_STREAM_END + && (s->ret != XZ_OK || s->single_call)) + return s->ret; + + bcj_apply(s, b->out, &out_start, b->out_pos); + + /* + * As an exception, if the next filter returned XZ_STREAM_END, + * we can do that too, since the last few bytes that remain + * unfiltered are meant to remain unfiltered. + */ + if (s->ret == XZ_STREAM_END) + return XZ_STREAM_END; + + s->temp.size = b->out_pos - out_start; + b->out_pos -= s->temp.size; + memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); + + /* + * If there wasn't enough input to the next filter to fill + * the output buffer with unfiltered data, there's no point + * to try decoding more data to temp. + */ + if (b->out_pos + s->temp.size < b->out_size) + return XZ_OK; + } + + /* + * We have unfiltered data in temp. If the output buffer isn't full + * yet, try to fill the temp buffer by decoding more data from the + * next filter. Apply the BCJ filter on temp. Then we hopefully can + * fill the actual output buffer by copying filtered data from temp. + * A mix of filtered and unfiltered data may be left in temp; it will + * be taken care on the next call to this function. + */ + if (b->out_pos < b->out_size) { + /* Make b->out{,_pos,_size} temporarily point to s->temp. */ + s->out = b->out; + s->out_pos = b->out_pos; + s->out_size = b->out_size; + b->out = s->temp.buf; + b->out_pos = s->temp.size; + b->out_size = sizeof(s->temp.buf); + + s->ret = xz_dec_lzma2_run(lzma2, b); + + s->temp.size = b->out_pos; + b->out = s->out; + b->out_pos = s->out_pos; + b->out_size = s->out_size; + + if (s->ret != XZ_OK && s->ret != XZ_STREAM_END) + return s->ret; + + bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size); + + /* + * If the next filter returned XZ_STREAM_END, we mark that + * everything is filtered, since the last unfiltered bytes + * of the stream are meant to be left as is. + */ + if (s->ret == XZ_STREAM_END) + s->temp.filtered = s->temp.size; + + bcj_flush(s, b); + if (s->temp.filtered > 0) + return XZ_OK; + } + + return s->ret; +} + +XZ_EXTERN struct xz_dec_bcj * XZ_FUNC xz_dec_bcj_create(bool single_call) +{ + struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s != NULL) + s->single_call = single_call; + + return s; +} + +XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_bcj_reset( + struct xz_dec_bcj *s, uint8_t id) +{ + switch (id) { +#ifdef XZ_DEC_X86 + case BCJ_X86: +#endif +#ifdef XZ_DEC_POWERPC + case BCJ_POWERPC: +#endif +#ifdef XZ_DEC_IA64 + case BCJ_IA64: +#endif +#ifdef XZ_DEC_ARM + case BCJ_ARM: +#endif +#ifdef XZ_DEC_ARMTHUMB + case BCJ_ARMTHUMB: +#endif +#ifdef XZ_DEC_SPARC + case BCJ_SPARC: +#endif + break; + + default: + /* Unsupported Filter ID */ + return XZ_OPTIONS_ERROR; + } + + s->type = id; + s->ret = XZ_OK; + s->pos = 0; + s->x86_prev_mask = 0; + s->temp.filtered = 0; + s->temp.size = 0; + + return XZ_OK; +} + +#endif diff --git a/probe-busybox/archival/libarchive/unxz/xz_dec_lzma2.c b/probe-busybox/archival/libarchive/unxz/xz_dec_lzma2.c new file mode 100644 index 00000000..351251f7 --- /dev/null +++ b/probe-busybox/archival/libarchive/unxz/xz_dec_lzma2.c @@ -0,0 +1,1171 @@ +/* + * LZMA2 decoder + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include "xz_private.h" +#include "xz_lzma2.h" + +/* + * Range decoder initialization eats the first five bytes of each LZMA chunk. + */ +#define RC_INIT_BYTES 5 + +/* + * Minimum number of usable input buffer to safely decode one LZMA symbol. + * The worst case is that we decode 22 bits using probabilities and 26 + * direct bits. This may decode at maximum of 20 bytes of input. However, + * lzma_main() does an extra normalization before returning, thus we + * need to put 21 here. + */ +#define LZMA_IN_REQUIRED 21 + +/* + * Dictionary (history buffer) + * + * These are always true: + * start <= pos <= full <= end + * pos <= limit <= end + * + * In multi-call mode, also these are true: + * end == size + * size <= size_max + * allocated <= size + * + * Most of these variables are size_t to support single-call mode, + * in which the dictionary variables address the actual output + * buffer directly. + */ +struct dictionary { + /* Beginning of the history buffer */ + uint8_t *buf; + + /* Old position in buf (before decoding more data) */ + size_t start; + + /* Position in buf */ + size_t pos; + + /* + * How full dictionary is. This is used to detect corrupt input that + * would read beyond the beginning of the uncompressed stream. + */ + size_t full; + + /* Write limit; we don't write to buf[limit] or later bytes. */ + size_t limit; + + /* + * End of the dictionary buffer. In multi-call mode, this is + * the same as the dictionary size. In single-call mode, this + * indicates the size of the output buffer. + */ + size_t end; + + /* + * Size of the dictionary as specified in Block Header. This is used + * together with "full" to detect corrupt input that would make us + * read beyond the beginning of the uncompressed stream. + */ + uint32_t size; + + /* + * Maximum allowed dictionary size in multi-call mode. + * This is ignored in single-call mode. + */ + uint32_t size_max; + + /* + * Amount of memory currently allocated for the dictionary. + * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, + * size_max is always the same as the allocated size.) + */ + uint32_t allocated; + + /* Operation mode */ + enum xz_mode mode; +}; + +/* Range decoder */ +struct rc_dec { + uint32_t range; + uint32_t code; + + /* + * Number of initializing bytes remaining to be read + * by rc_read_init(). + */ + uint32_t init_bytes_left; + + /* + * Buffer from which we read our input. It can be either + * temp.buf or the caller-provided input buffer. + */ + const uint8_t *in; + size_t in_pos; + size_t in_limit; +}; + +/* Probabilities for a length decoder. */ +struct lzma_len_dec { + /* Probability of match length being at least 10 */ + uint16_t choice; + + /* Probability of match length being at least 18 */ + uint16_t choice2; + + /* Probabilities for match lengths 2-9 */ + uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; + + /* Probabilities for match lengths 10-17 */ + uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; + + /* Probabilities for match lengths 18-273 */ + uint16_t high[LEN_HIGH_SYMBOLS]; +}; + +struct lzma_dec { + /* Distances of latest four matches */ + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; + + /* Types of the most recently seen LZMA symbols */ + enum lzma_state state; + + /* + * Length of a match. This is updated so that dict_repeat can + * be called again to finish repeating the whole match. + */ + uint32_t len; + + /* + * LZMA properties or related bit masks (number of literal + * context bits, a mask dervied from the number of literal + * position bits, and a mask dervied from the number + * position bits) + */ + uint32_t lc; + uint32_t literal_pos_mask; /* (1 << lp) - 1 */ + uint32_t pos_mask; /* (1 << pb) - 1 */ + + /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ + uint16_t is_match[STATES][POS_STATES_MAX]; + + /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ + uint16_t is_rep[STATES]; + + /* + * If 0, distance of a repeated match is rep0. + * Otherwise check is_rep1. + */ + uint16_t is_rep0[STATES]; + + /* + * If 0, distance of a repeated match is rep1. + * Otherwise check is_rep2. + */ + uint16_t is_rep1[STATES]; + + /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ + uint16_t is_rep2[STATES]; + + /* + * If 1, the repeated match has length of one byte. Otherwise + * the length is decoded from rep_len_decoder. + */ + uint16_t is_rep0_long[STATES][POS_STATES_MAX]; + + /* + * Probability tree for the highest two bits of the match + * distance. There is a separate probability tree for match + * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. + */ + uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; + + /* + * Probility trees for additional bits for match distance + * when the distance is in the range [4, 127]. + */ + uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; + + /* + * Probability tree for the lowest four bits of a match + * distance that is equal to or greater than 128. + */ + uint16_t dist_align[ALIGN_SIZE]; + + /* Length of a normal match */ + struct lzma_len_dec match_len_dec; + + /* Length of a repeated match */ + struct lzma_len_dec rep_len_dec; + + /* Probabilities of literals */ + uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; +}; + +struct lzma2_dec { + /* Position in xz_dec_lzma2_run(). */ + enum lzma2_seq { + SEQ_CONTROL, + SEQ_UNCOMPRESSED_1, + SEQ_UNCOMPRESSED_2, + SEQ_COMPRESSED_0, + SEQ_COMPRESSED_1, + SEQ_PROPERTIES, + SEQ_LZMA_PREPARE, + SEQ_LZMA_RUN, + SEQ_COPY + } sequence; + + /* Next position after decoding the compressed size of the chunk. */ + enum lzma2_seq next_sequence; + + /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ + uint32_t uncompressed; + + /* + * Compressed size of LZMA chunk or compressed/uncompressed + * size of uncompressed chunk (64 KiB at maximum) + */ + uint32_t compressed; + + /* + * True if dictionary reset is needed. This is false before + * the first chunk (LZMA or uncompressed). + */ + bool need_dict_reset; + + /* + * True if new LZMA properties are needed. This is false + * before the first LZMA chunk. + */ + bool need_props; +}; + +struct xz_dec_lzma2 { + /* + * The order below is important on x86 to reduce code size and + * it shouldn't hurt on other platforms. Everything up to and + * including lzma.pos_mask are in the first 128 bytes on x86-32, + * which allows using smaller instructions to access those + * variables. On x86-64, fewer variables fit into the first 128 + * bytes, but this is still the best order without sacrificing + * the readability by splitting the structures. + */ + struct rc_dec rc; + struct dictionary dict; + struct lzma2_dec lzma2; + struct lzma_dec lzma; + + /* + * Temporary buffer which holds small number of input bytes between + * decoder calls. See lzma2_lzma() for details. + */ + struct { + uint32_t size; + uint8_t buf[3 * LZMA_IN_REQUIRED]; + } temp; +}; + +/************** + * Dictionary * + **************/ + +/* + * Reset the dictionary state. When in single-call mode, set up the beginning + * of the dictionary to point to the actual output buffer. + */ +static void XZ_FUNC dict_reset(struct dictionary *dict, struct xz_buf *b) +{ + if (DEC_IS_SINGLE(dict->mode)) { + dict->buf = b->out + b->out_pos; + dict->end = b->out_size - b->out_pos; + } + + dict->start = 0; + dict->pos = 0; + dict->limit = 0; + dict->full = 0; +} + +/* Set dictionary write limit */ +static void XZ_FUNC dict_limit(struct dictionary *dict, size_t out_max) +{ + if (dict->end - dict->pos <= out_max) + dict->limit = dict->end; + else + dict->limit = dict->pos + out_max; +} + +/* Return true if at least one byte can be written into the dictionary. */ +static __always_inline bool XZ_FUNC dict_has_space(const struct dictionary *dict) +{ + return dict->pos < dict->limit; +} + +/* + * Get a byte from the dictionary at the given distance. The distance is + * assumed to valid, or as a special case, zero when the dictionary is + * still empty. This special case is needed for single-call decoding to + * avoid writing a '\0' to the end of the destination buffer. + */ +static __always_inline uint32_t XZ_FUNC dict_get( + const struct dictionary *dict, uint32_t dist) +{ + size_t offset = dict->pos - dist - 1; + + if (dist >= dict->pos) + offset += dict->end; + + return dict->full > 0 ? dict->buf[offset] : 0; +} + +/* + * Put one byte into the dictionary. It is assumed that there is space for it. + */ +static inline void XZ_FUNC dict_put(struct dictionary *dict, uint8_t byte) +{ + dict->buf[dict->pos++] = byte; + + if (dict->full < dict->pos) + dict->full = dict->pos; +} + +/* + * Repeat given number of bytes from the given distance. If the distance is + * invalid, false is returned. On success, true is returned and *len is + * updated to indicate how many bytes were left to be repeated. + */ +static bool XZ_FUNC dict_repeat( + struct dictionary *dict, uint32_t *len, uint32_t dist) +{ + size_t back; + uint32_t left; + + if (dist >= dict->full || dist >= dict->size) + return false; + + left = min_t(size_t, dict->limit - dict->pos, *len); + *len -= left; + + back = dict->pos - dist - 1; + if (dist >= dict->pos) + back += dict->end; + + do { + dict->buf[dict->pos++] = dict->buf[back++]; + if (back == dict->end) + back = 0; + } while (--left > 0); + + if (dict->full < dict->pos) + dict->full = dict->pos; + + return true; +} + +/* Copy uncompressed data as is from input to dictionary and output buffers. */ +static void XZ_FUNC dict_uncompressed( + struct dictionary *dict, struct xz_buf *b, uint32_t *left) +{ + size_t copy_size; + + while (*left > 0 && b->in_pos < b->in_size + && b->out_pos < b->out_size) { + copy_size = min(b->in_size - b->in_pos, + b->out_size - b->out_pos); + if (copy_size > dict->end - dict->pos) + copy_size = dict->end - dict->pos; + if (copy_size > *left) + copy_size = *left; + + *left -= copy_size; + + memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size); + dict->pos += copy_size; + + if (dict->full < dict->pos) + dict->full = dict->pos; + + if (DEC_IS_MULTI(dict->mode)) { + if (dict->pos == dict->end) + dict->pos = 0; + + memcpy(b->out + b->out_pos, b->in + b->in_pos, + copy_size); + } + + dict->start = dict->pos; + + b->out_pos += copy_size; + b->in_pos += copy_size; + } +} + +/* + * Flush pending data from dictionary to b->out. It is assumed that there is + * enough space in b->out. This is guaranteed because caller uses dict_limit() + * before decoding data into the dictionary. + */ +static uint32_t XZ_FUNC dict_flush(struct dictionary *dict, struct xz_buf *b) +{ + size_t copy_size = dict->pos - dict->start; + + if (DEC_IS_MULTI(dict->mode)) { + if (dict->pos == dict->end) + dict->pos = 0; + + memcpy(b->out + b->out_pos, dict->buf + dict->start, + copy_size); + } + + dict->start = dict->pos; + b->out_pos += copy_size; + return copy_size; +} + +/***************** + * Range decoder * + *****************/ + +/* Reset the range decoder. */ +static void XZ_FUNC rc_reset(struct rc_dec *rc) +{ + rc->range = (uint32_t)-1; + rc->code = 0; + rc->init_bytes_left = RC_INIT_BYTES; +} + +/* + * Read the first five initial bytes into rc->code if they haven't been + * read already. (Yes, the first byte gets completely ignored.) + */ +static bool XZ_FUNC rc_read_init(struct rc_dec *rc, struct xz_buf *b) +{ + while (rc->init_bytes_left > 0) { + if (b->in_pos == b->in_size) + return false; + + rc->code = (rc->code << 8) + b->in[b->in_pos++]; + --rc->init_bytes_left; + } + + return true; +} + +/* Return true if there may not be enough input for the next decoding loop. */ +static inline bool XZ_FUNC rc_limit_exceeded(const struct rc_dec *rc) +{ + return rc->in_pos > rc->in_limit; +} + +/* + * Return true if it is possible (from point of view of range decoder) that + * we have reached the end of the LZMA chunk. + */ +static inline bool XZ_FUNC rc_is_finished(const struct rc_dec *rc) +{ + return rc->code == 0; +} + +/* Read the next input byte if needed. */ +static __always_inline void XZ_FUNC rc_normalize(struct rc_dec *rc) +{ + if (rc->range < RC_TOP_VALUE) { + rc->range <<= RC_SHIFT_BITS; + rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; + } +} + +/* + * Decode one bit. In some versions, this function has been splitted in three + * functions so that the compiler is supposed to be able to more easily avoid + * an extra branch. In this particular version of the LZMA decoder, this + * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3 + * on x86). Using a non-splitted version results in nicer looking code too. + * + * NOTE: This must return an int. Do not make it return a bool or the speed + * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care, + * and it generates 10-20 % faster code than GCC 3.x from this file anyway.) + */ +static __always_inline int XZ_FUNC rc_bit(struct rc_dec *rc, uint16_t *prob) +{ + uint32_t bound; + int bit; + + rc_normalize(rc); + bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; + if (rc->code < bound) { + rc->range = bound; + *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; + bit = 0; + } else { + rc->range -= bound; + rc->code -= bound; + *prob -= *prob >> RC_MOVE_BITS; + bit = 1; + } + + return bit; +} + +/* Decode a bittree starting from the most significant bit. */ +static __always_inline uint32_t XZ_FUNC rc_bittree( + struct rc_dec *rc, uint16_t *probs, uint32_t limit) +{ + uint32_t symbol = 1; + + do { + if (rc_bit(rc, &probs[symbol])) + symbol = (symbol << 1) + 1; + else + symbol <<= 1; + } while (symbol < limit); + + return symbol; +} + +/* Decode a bittree starting from the least significant bit. */ +static __always_inline void XZ_FUNC rc_bittree_reverse(struct rc_dec *rc, + uint16_t *probs, uint32_t *dest, uint32_t limit) +{ + uint32_t symbol = 1; + uint32_t i = 0; + + do { + if (rc_bit(rc, &probs[symbol])) { + symbol = (symbol << 1) + 1; + *dest += 1 << i; + } else { + symbol <<= 1; + } + } while (++i < limit); +} + +/* Decode direct bits (fixed fifty-fifty probability) */ +static inline void XZ_FUNC rc_direct( + struct rc_dec *rc, uint32_t *dest, uint32_t limit) +{ + uint32_t mask; + + do { + rc_normalize(rc); + rc->range >>= 1; + rc->code -= rc->range; + mask = (uint32_t)0 - (rc->code >> 31); + rc->code += rc->range & mask; + *dest = (*dest << 1) + (mask + 1); + } while (--limit > 0); +} + +/******** + * LZMA * + ********/ + +/* Get pointer to literal coder probability array. */ +static uint16_t * XZ_FUNC lzma_literal_probs(struct xz_dec_lzma2 *s) +{ + uint32_t prev_byte = dict_get(&s->dict, 0); + uint32_t low = prev_byte >> (8 - s->lzma.lc); + uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; + return s->lzma.literal[low + high]; +} + +/* Decode a literal (one 8-bit byte) */ +static void XZ_FUNC lzma_literal(struct xz_dec_lzma2 *s) +{ + uint16_t *probs; + uint32_t symbol; + uint32_t match_byte; + uint32_t match_bit; + uint32_t offset; + uint32_t i; + + probs = lzma_literal_probs(s); + + if (lzma_state_is_literal(s->lzma.state)) { + symbol = rc_bittree(&s->rc, probs, 0x100); + } else { + symbol = 1; + match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; + offset = 0x100; + + do { + match_bit = match_byte & offset; + match_byte <<= 1; + i = offset + match_bit + symbol; + + if (rc_bit(&s->rc, &probs[i])) { + symbol = (symbol << 1) + 1; + offset &= match_bit; + } else { + symbol <<= 1; + offset &= ~match_bit; + } + } while (symbol < 0x100); + } + + dict_put(&s->dict, (uint8_t)symbol); + lzma_state_literal(&s->lzma.state); +} + +/* Decode the length of the match into s->lzma.len. */ +static void XZ_FUNC lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, + uint32_t pos_state) +{ + uint16_t *probs; + uint32_t limit; + + if (!rc_bit(&s->rc, &l->choice)) { + probs = l->low[pos_state]; + limit = LEN_LOW_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN; + } else { + if (!rc_bit(&s->rc, &l->choice2)) { + probs = l->mid[pos_state]; + limit = LEN_MID_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; + } else { + probs = l->high; + limit = LEN_HIGH_SYMBOLS; + s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS + + LEN_MID_SYMBOLS; + } + } + + s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; +} + +/* Decode a match. The distance will be stored in s->lzma.rep0. */ +static void XZ_FUNC lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) +{ + uint16_t *probs; + uint32_t dist_slot; + uint32_t limit; + + lzma_state_match(&s->lzma.state); + + s->lzma.rep3 = s->lzma.rep2; + s->lzma.rep2 = s->lzma.rep1; + s->lzma.rep1 = s->lzma.rep0; + + lzma_len(s, &s->lzma.match_len_dec, pos_state); + + probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; + dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; + + if (dist_slot < DIST_MODEL_START) { + s->lzma.rep0 = dist_slot; + } else { + limit = (dist_slot >> 1) - 1; + s->lzma.rep0 = 2 + (dist_slot & 1); + + if (dist_slot < DIST_MODEL_END) { + s->lzma.rep0 <<= limit; + probs = s->lzma.dist_special + s->lzma.rep0 + - dist_slot - 1; + rc_bittree_reverse(&s->rc, probs, + &s->lzma.rep0, limit); + } else { + rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); + s->lzma.rep0 <<= ALIGN_BITS; + rc_bittree_reverse(&s->rc, s->lzma.dist_align, + &s->lzma.rep0, ALIGN_BITS); + } + } +} + +/* + * Decode a repeated match. The distance is one of the four most recently + * seen matches. The distance will be stored in s->lzma.rep0. + */ +static void XZ_FUNC lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state) +{ + uint32_t tmp; + + if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) { + if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[ + s->lzma.state][pos_state])) { + lzma_state_short_rep(&s->lzma.state); + s->lzma.len = 1; + return; + } + } else { + if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) { + tmp = s->lzma.rep1; + } else { + if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) { + tmp = s->lzma.rep2; + } else { + tmp = s->lzma.rep3; + s->lzma.rep3 = s->lzma.rep2; + } + + s->lzma.rep2 = s->lzma.rep1; + } + + s->lzma.rep1 = s->lzma.rep0; + s->lzma.rep0 = tmp; + } + + lzma_state_long_rep(&s->lzma.state); + lzma_len(s, &s->lzma.rep_len_dec, pos_state); +} + +/* LZMA decoder core */ +static bool XZ_FUNC lzma_main(struct xz_dec_lzma2 *s) +{ + uint32_t pos_state; + + /* + * If the dictionary was reached during the previous call, try to + * finish the possibly pending repeat in the dictionary. + */ + if (dict_has_space(&s->dict) && s->lzma.len > 0) + dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); + + /* + * Decode more LZMA symbols. One iteration may consume up to + * LZMA_IN_REQUIRED - 1 bytes. + */ + while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) { + pos_state = s->dict.pos & s->lzma.pos_mask; + + if (!rc_bit(&s->rc, &s->lzma.is_match[ + s->lzma.state][pos_state])) { + lzma_literal(s); + } else { + if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) + lzma_rep_match(s, pos_state); + else + lzma_match(s, pos_state); + + if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0)) + return false; + } + } + + /* + * Having the range decoder always normalized when we are outside + * this function makes it easier to correctly handle end of the chunk. + */ + rc_normalize(&s->rc); + + return true; +} + +/* + * Reset the LZMA decoder and range decoder state. Dictionary is nore reset + * here, because LZMA state may be reset without resetting the dictionary. + */ +static void XZ_FUNC lzma_reset(struct xz_dec_lzma2 *s) +{ + uint16_t *probs; + size_t i; + + s->lzma.state = STATE_LIT_LIT; + s->lzma.rep0 = 0; + s->lzma.rep1 = 0; + s->lzma.rep2 = 0; + s->lzma.rep3 = 0; + + /* + * All probabilities are initialized to the same value. This hack + * makes the code smaller by avoiding a separate loop for each + * probability array. + * + * This could be optimized so that only that part of literal + * probabilities that are actually required. In the common case + * we would write 12 KiB less. + */ + probs = s->lzma.is_match[0]; + for (i = 0; i < PROBS_TOTAL; ++i) + probs[i] = RC_BIT_MODEL_TOTAL / 2; + + rc_reset(&s->rc); +} + +/* + * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks + * from the decoded lp and pb values. On success, the LZMA decoder state is + * reset and true is returned. + */ +static bool XZ_FUNC lzma_props(struct xz_dec_lzma2 *s, uint8_t props) +{ + if (props > (4 * 5 + 4) * 9 + 8) + return false; + + s->lzma.pos_mask = 0; + while (props >= 9 * 5) { + props -= 9 * 5; + ++s->lzma.pos_mask; + } + + s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; + + s->lzma.literal_pos_mask = 0; + while (props >= 9) { + props -= 9; + ++s->lzma.literal_pos_mask; + } + + s->lzma.lc = props; + + if (s->lzma.lc + s->lzma.literal_pos_mask > 4) + return false; + + s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; + + lzma_reset(s); + + return true; +} + +/********* + * LZMA2 * + *********/ + +/* + * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't + * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This + * wrapper function takes care of making the LZMA decoder's assumption safe. + * + * As long as there is plenty of input left to be decoded in the current LZMA + * chunk, we decode directly from the caller-supplied input buffer until + * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into + * s->temp.buf, which (hopefully) gets filled on the next call to this + * function. We decode a few bytes from the temporary buffer so that we can + * continue decoding from the caller-supplied input buffer again. + */ +static bool XZ_FUNC lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) +{ + size_t in_avail; + uint32_t tmp; + + in_avail = b->in_size - b->in_pos; + if (s->temp.size > 0 || s->lzma2.compressed == 0) { + tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; + if (tmp > s->lzma2.compressed - s->temp.size) + tmp = s->lzma2.compressed - s->temp.size; + if (tmp > in_avail) + tmp = in_avail; + + memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); + + if (s->temp.size + tmp == s->lzma2.compressed) { + memzero(s->temp.buf + s->temp.size + tmp, + sizeof(s->temp.buf) + - s->temp.size - tmp); + s->rc.in_limit = s->temp.size + tmp; + } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) { + s->temp.size += tmp; + b->in_pos += tmp; + return true; + } else { + s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; + } + + s->rc.in = s->temp.buf; + s->rc.in_pos = 0; + + if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp) + return false; + + s->lzma2.compressed -= s->rc.in_pos; + + if (s->rc.in_pos < s->temp.size) { + s->temp.size -= s->rc.in_pos; + memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, + s->temp.size); + return true; + } + + b->in_pos += s->rc.in_pos - s->temp.size; + s->temp.size = 0; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail >= LZMA_IN_REQUIRED) { + s->rc.in = b->in; + s->rc.in_pos = b->in_pos; + + if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED) + s->rc.in_limit = b->in_pos + s->lzma2.compressed; + else + s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; + + if (!lzma_main(s)) + return false; + + in_avail = s->rc.in_pos - b->in_pos; + if (in_avail > s->lzma2.compressed) + return false; + + s->lzma2.compressed -= in_avail; + b->in_pos = s->rc.in_pos; + } + + in_avail = b->in_size - b->in_pos; + if (in_avail < LZMA_IN_REQUIRED) { + if (in_avail > s->lzma2.compressed) + in_avail = s->lzma2.compressed; + + memcpy(s->temp.buf, b->in + b->in_pos, in_avail); + s->temp.size = in_avail; + b->in_pos += in_avail; + } + + return true; +} + +/* + * Take care of the LZMA2 control layer, and forward the job of actual LZMA + * decoding or copying of uncompressed chunks to other functions. + */ +XZ_EXTERN NOINLINE enum xz_ret XZ_FUNC xz_dec_lzma2_run( + struct xz_dec_lzma2 *s, struct xz_buf *b) +{ + uint32_t tmp; + + while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) { + switch (s->lzma2.sequence) { + case SEQ_CONTROL: + /* + * LZMA2 control byte + * + * Exact values: + * 0x00 End marker + * 0x01 Dictionary reset followed by + * an uncompressed chunk + * 0x02 Uncompressed chunk (no dictionary reset) + * + * Highest three bits (s->control & 0xE0): + * 0xE0 Dictionary reset, new properties and state + * reset, followed by LZMA compressed chunk + * 0xC0 New properties and state reset, followed + * by LZMA compressed chunk (no dictionary + * reset) + * 0xA0 State reset using old properties, + * followed by LZMA compressed chunk (no + * dictionary reset) + * 0x80 LZMA chunk (no dictionary or state reset) + * + * For LZMA compressed chunks, the lowest five bits + * (s->control & 1F) are the highest bits of the + * uncompressed size (bits 16-20). + * + * A new LZMA2 stream must begin with a dictionary + * reset. The first LZMA chunk must set new + * properties and reset the LZMA state. + * + * Values that don't match anything described above + * are invalid and we return XZ_DATA_ERROR. + */ + tmp = b->in[b->in_pos++]; + + if (tmp == 0x00) + return XZ_STREAM_END; + + if (tmp >= 0xE0 || tmp == 0x01) { + s->lzma2.need_props = true; + s->lzma2.need_dict_reset = false; + dict_reset(&s->dict, b); + } else if (s->lzma2.need_dict_reset) { + return XZ_DATA_ERROR; + } + + if (tmp >= 0x80) { + s->lzma2.uncompressed = (tmp & 0x1F) << 16; + s->lzma2.sequence = SEQ_UNCOMPRESSED_1; + + if (tmp >= 0xC0) { + /* + * When there are new properties, + * state reset is done at + * SEQ_PROPERTIES. + */ + s->lzma2.need_props = false; + s->lzma2.next_sequence + = SEQ_PROPERTIES; + } else if (s->lzma2.need_props) { + return XZ_DATA_ERROR; + } else { + s->lzma2.next_sequence + = SEQ_LZMA_PREPARE; + if (tmp >= 0xA0) + lzma_reset(s); + } + } else { + if (tmp > 0x02) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_COMPRESSED_0; + s->lzma2.next_sequence = SEQ_COPY; + } + + break; + + case SEQ_UNCOMPRESSED_1: + s->lzma2.uncompressed + += (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_UNCOMPRESSED_2; + break; + + case SEQ_UNCOMPRESSED_2: + s->lzma2.uncompressed + += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = SEQ_COMPRESSED_0; + break; + + case SEQ_COMPRESSED_0: + s->lzma2.compressed + = (uint32_t)b->in[b->in_pos++] << 8; + s->lzma2.sequence = SEQ_COMPRESSED_1; + break; + + case SEQ_COMPRESSED_1: + s->lzma2.compressed + += (uint32_t)b->in[b->in_pos++] + 1; + s->lzma2.sequence = s->lzma2.next_sequence; + break; + + case SEQ_PROPERTIES: + if (!lzma_props(s, b->in[b->in_pos++])) + return XZ_DATA_ERROR; + + s->lzma2.sequence = SEQ_LZMA_PREPARE; + + case SEQ_LZMA_PREPARE: + if (s->lzma2.compressed < RC_INIT_BYTES) + return XZ_DATA_ERROR; + + if (!rc_read_init(&s->rc, b)) + return XZ_OK; + + s->lzma2.compressed -= RC_INIT_BYTES; + s->lzma2.sequence = SEQ_LZMA_RUN; + + case SEQ_LZMA_RUN: + /* + * Set dictionary limit to indicate how much we want + * to be encoded at maximum. Decode new data into the + * dictionary. Flush the new data from dictionary to + * b->out. Check if we finished decoding this chunk. + * In case the dictionary got full but we didn't fill + * the output buffer yet, we may run this loop + * multiple times without changing s->lzma2.sequence. + */ + dict_limit(&s->dict, min_t(size_t, + b->out_size - b->out_pos, + s->lzma2.uncompressed)); + if (!lzma2_lzma(s, b)) + return XZ_DATA_ERROR; + + s->lzma2.uncompressed -= dict_flush(&s->dict, b); + + if (s->lzma2.uncompressed == 0) { + if (s->lzma2.compressed > 0 || s->lzma.len > 0 + || !rc_is_finished(&s->rc)) + return XZ_DATA_ERROR; + + rc_reset(&s->rc); + s->lzma2.sequence = SEQ_CONTROL; + } else if (b->out_pos == b->out_size + || (b->in_pos == b->in_size + && s->temp.size + < s->lzma2.compressed)) { + return XZ_OK; + } + + break; + + case SEQ_COPY: + dict_uncompressed(&s->dict, b, &s->lzma2.compressed); + if (s->lzma2.compressed > 0) + return XZ_OK; + + s->lzma2.sequence = SEQ_CONTROL; + break; + } + } + + return XZ_OK; +} + +XZ_EXTERN struct xz_dec_lzma2 * XZ_FUNC xz_dec_lzma2_create( + enum xz_mode mode, uint32_t dict_max) +{ + struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->dict.mode = mode; + s->dict.size_max = dict_max; + + if (DEC_IS_PREALLOC(mode)) { + s->dict.buf = vmalloc(dict_max); + if (s->dict.buf == NULL) { + kfree(s); + return NULL; + } + } else if (DEC_IS_DYNALLOC(mode)) { + s->dict.buf = NULL; + s->dict.allocated = 0; + } + + return s; +} + +XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_lzma2_reset( + struct xz_dec_lzma2 *s, uint8_t props) +{ + /* This limits dictionary size to 3 GiB to keep parsing simpler. */ + if (props > 39) + return XZ_OPTIONS_ERROR; + + s->dict.size = 2 + (props & 1); + s->dict.size <<= (props >> 1) + 11; + + if (DEC_IS_MULTI(s->dict.mode)) { + if (s->dict.size > s->dict.size_max) + return XZ_MEMLIMIT_ERROR; + + s->dict.end = s->dict.size; + + if (DEC_IS_DYNALLOC(s->dict.mode)) { + if (s->dict.allocated < s->dict.size) { + vfree(s->dict.buf); + s->dict.buf = vmalloc(s->dict.size); + if (s->dict.buf == NULL) { + s->dict.allocated = 0; + return XZ_MEM_ERROR; + } + } + } + } + + s->lzma.len = 0; + + s->lzma2.sequence = SEQ_CONTROL; + s->lzma2.need_dict_reset = true; + + s->temp.size = 0; + + return XZ_OK; +} + +XZ_EXTERN void XZ_FUNC xz_dec_lzma2_end(struct xz_dec_lzma2 *s) +{ + if (DEC_IS_MULTI(s->dict.mode)) + vfree(s->dict.buf); + + kfree(s); +} diff --git a/probe-busybox/archival/libarchive/unxz/xz_dec_stream.c b/probe-busybox/archival/libarchive/unxz/xz_dec_stream.c new file mode 100644 index 00000000..bf791055 --- /dev/null +++ b/probe-busybox/archival/libarchive/unxz/xz_dec_stream.c @@ -0,0 +1,820 @@ +/* + * .xz Stream decoder + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#include "xz_private.h" +#include "xz_stream.h" + +/* Hash used to validate the Index field */ +struct xz_dec_hash { + vli_type unpadded; + vli_type uncompressed; + uint32_t crc32; +}; + +struct xz_dec { + /* Position in dec_main() */ + enum { + SEQ_STREAM_HEADER, + SEQ_BLOCK_START, + SEQ_BLOCK_HEADER, + SEQ_BLOCK_UNCOMPRESS, + SEQ_BLOCK_PADDING, + SEQ_BLOCK_CHECK, + SEQ_INDEX, + SEQ_INDEX_PADDING, + SEQ_INDEX_CRC32, + SEQ_STREAM_FOOTER + } sequence; + + /* Position in variable-length integers and Check fields */ + uint32_t pos; + + /* Variable-length integer decoded by dec_vli() */ + vli_type vli; + + /* Saved in_pos and out_pos */ + size_t in_start; + size_t out_start; + + /* CRC32 value in Block or Index */ + uint32_t crc32; + + /* Type of the integrity check calculated from uncompressed data */ + enum xz_check check_type; + + /* Operation mode */ + enum xz_mode mode; + + /* + * True if the next call to xz_dec_run() is allowed to return + * XZ_BUF_ERROR. + */ + bool allow_buf_error; + + /* Information stored in Block Header */ + struct { + /* + * Value stored in the Compressed Size field, or + * VLI_UNKNOWN if Compressed Size is not present. + */ + vli_type compressed; + + /* + * Value stored in the Uncompressed Size field, or + * VLI_UNKNOWN if Uncompressed Size is not present. + */ + vli_type uncompressed; + + /* Size of the Block Header field */ + uint32_t size; + } block_header; + + /* Information collected when decoding Blocks */ + struct { + /* Observed compressed size of the current Block */ + vli_type compressed; + + /* Observed uncompressed size of the current Block */ + vli_type uncompressed; + + /* Number of Blocks decoded so far */ + vli_type count; + + /* + * Hash calculated from the Block sizes. This is used to + * validate the Index field. + */ + struct xz_dec_hash hash; + } block; + + /* Variables needed when verifying the Index field */ + struct { + /* Position in dec_index() */ + enum { + SEQ_INDEX_COUNT, + SEQ_INDEX_UNPADDED, + SEQ_INDEX_UNCOMPRESSED + } sequence; + + /* Size of the Index in bytes */ + vli_type size; + + /* Number of Records (matches block.count in valid files) */ + vli_type count; + + /* + * Hash calculated from the Records (matches block.hash in + * valid files). + */ + struct xz_dec_hash hash; + } index; + + /* + * Temporary buffer needed to hold Stream Header, Block Header, + * and Stream Footer. The Block Header is the biggest (1 KiB) + * so we reserve space according to that. buf[] has to be aligned + * to a multiple of four bytes; the size_t variables before it + * should guarantee this. + */ + struct { + size_t pos; + size_t size; + uint8_t buf[1024]; + } temp; + + struct xz_dec_lzma2 *lzma2; + +#ifdef XZ_DEC_BCJ + struct xz_dec_bcj *bcj; + bool bcj_active; +#endif +}; + +#ifdef XZ_DEC_ANY_CHECK +/* Sizes of the Check field with different Check IDs */ +static const uint8_t check_sizes[16] = { + 0, + 4, 4, 4, + 8, 8, 8, + 16, 16, 16, + 32, 32, 32, + 64, 64, 64 +}; +#endif + +/* + * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller + * must have set s->temp.pos to indicate how much data we are supposed + * to copy into s->temp.buf. Return true once s->temp.pos has reached + * s->temp.size. + */ +static bool XZ_FUNC fill_temp(struct xz_dec *s, struct xz_buf *b) +{ + size_t copy_size = min_t(size_t, + b->in_size - b->in_pos, s->temp.size - s->temp.pos); + + memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); + b->in_pos += copy_size; + s->temp.pos += copy_size; + + if (s->temp.pos == s->temp.size) { + s->temp.pos = 0; + return true; + } + + return false; +} + +/* Decode a variable-length integer (little-endian base-128 encoding) */ +static enum xz_ret XZ_FUNC dec_vli(struct xz_dec *s, + const uint8_t *in, size_t *in_pos, size_t in_size) +{ + uint8_t byte; + + if (s->pos == 0) + s->vli = 0; + + while (*in_pos < in_size) { + byte = in[*in_pos]; + ++*in_pos; + + s->vli |= (vli_type)(byte & 0x7F) << s->pos; + + if ((byte & 0x80) == 0) { + /* Don't allow non-minimal encodings. */ + if (byte == 0 && s->pos != 0) + return XZ_DATA_ERROR; + + s->pos = 0; + return XZ_STREAM_END; + } + + s->pos += 7; + if (s->pos == 7 * VLI_BYTES_MAX) + return XZ_DATA_ERROR; + } + + return XZ_OK; +} + +/* + * Decode the Compressed Data field from a Block. Update and validate + * the observed compressed and uncompressed sizes of the Block so that + * they don't exceed the values possibly stored in the Block Header + * (validation assumes that no integer overflow occurs, since vli_type + * is normally uint64_t). Update the CRC32 if presence of the CRC32 + * field was indicated in Stream Header. + * + * Once the decoding is finished, validate that the observed sizes match + * the sizes possibly stored in the Block Header. Update the hash and + * Block count, which are later used to validate the Index field. + */ +static enum xz_ret XZ_FUNC dec_block(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + s->in_start = b->in_pos; + s->out_start = b->out_pos; + +#ifdef XZ_DEC_BCJ + if (s->bcj_active) + ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); + else +#endif + ret = xz_dec_lzma2_run(s->lzma2, b); + + s->block.compressed += b->in_pos - s->in_start; + s->block.uncompressed += b->out_pos - s->out_start; + + /* + * There is no need to separately check for VLI_UNKNOWN, since + * the observed sizes are always smaller than VLI_UNKNOWN. + */ + if (s->block.compressed > s->block_header.compressed + || s->block.uncompressed + > s->block_header.uncompressed) + return XZ_DATA_ERROR; + + if (s->check_type == XZ_CHECK_CRC32) + s->crc32 = xz_crc32(b->out + s->out_start, + b->out_pos - s->out_start, s->crc32); + + if (ret == XZ_STREAM_END) { + if (s->block_header.compressed != VLI_UNKNOWN + && s->block_header.compressed + != s->block.compressed) + return XZ_DATA_ERROR; + + if (s->block_header.uncompressed != VLI_UNKNOWN + && s->block_header.uncompressed + != s->block.uncompressed) + return XZ_DATA_ERROR; + + s->block.hash.unpadded += s->block_header.size + + s->block.compressed; + +#ifdef XZ_DEC_ANY_CHECK + s->block.hash.unpadded += check_sizes[s->check_type]; +#else + if (s->check_type == XZ_CHECK_CRC32) + s->block.hash.unpadded += 4; +#endif + + s->block.hash.uncompressed += s->block.uncompressed; + s->block.hash.crc32 = xz_crc32( + (const uint8_t *)&s->block.hash, + sizeof(s->block.hash), s->block.hash.crc32); + + ++s->block.count; + } + + return ret; +} + +/* Update the Index size and the CRC32 value. */ +static void XZ_FUNC index_update(struct xz_dec *s, const struct xz_buf *b) +{ + size_t in_used = b->in_pos - s->in_start; + s->index.size += in_used; + s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32); +} + +/* + * Decode the Number of Records, Unpadded Size, and Uncompressed Size + * fields from the Index field. That is, Index Padding and CRC32 are not + * decoded by this function. + * + * This can return XZ_OK (more input needed), XZ_STREAM_END (everything + * successfully decoded), or XZ_DATA_ERROR (input is corrupt). + */ +static enum xz_ret XZ_FUNC dec_index(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + do { + ret = dec_vli(s, b->in, &b->in_pos, b->in_size); + if (ret != XZ_STREAM_END) { + index_update(s, b); + return ret; + } + + switch (s->index.sequence) { + case SEQ_INDEX_COUNT: + s->index.count = s->vli; + + /* + * Validate that the Number of Records field + * indicates the same number of Records as + * there were Blocks in the Stream. + */ + if (s->index.count != s->block.count) + return XZ_DATA_ERROR; + + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + + case SEQ_INDEX_UNPADDED: + s->index.hash.unpadded += s->vli; + s->index.sequence = SEQ_INDEX_UNCOMPRESSED; + break; + + case SEQ_INDEX_UNCOMPRESSED: + s->index.hash.uncompressed += s->vli; + s->index.hash.crc32 = xz_crc32( + (const uint8_t *)&s->index.hash, + sizeof(s->index.hash), + s->index.hash.crc32); + --s->index.count; + s->index.sequence = SEQ_INDEX_UNPADDED; + break; + } + } while (s->index.count > 0); + + return XZ_STREAM_END; +} + +/* + * Validate that the next four input bytes match the value of s->crc32. + * s->pos must be zero when starting to validate the first byte. + */ +static enum xz_ret XZ_FUNC crc32_validate(struct xz_dec *s, struct xz_buf *b) +{ + do { + if (b->in_pos == b->in_size) + return XZ_OK; + + if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++]) + return XZ_DATA_ERROR; + + s->pos += 8; + } while (s->pos < 32); + + s->crc32 = 0; + s->pos = 0; + + return XZ_STREAM_END; +} + +#ifdef XZ_DEC_ANY_CHECK +/* + * Skip over the Check field when the Check ID is not supported. + * Returns true once the whole Check field has been skipped over. + */ +static bool XZ_FUNC check_skip(struct xz_dec *s, struct xz_buf *b) +{ + while (s->pos < check_sizes[s->check_type]) { + if (b->in_pos == b->in_size) + return false; + + ++b->in_pos; + ++s->pos; + } + + s->pos = 0; + + return true; +} +#endif + +/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ +static enum xz_ret XZ_FUNC dec_stream_header(struct xz_dec *s) +{ + if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) + return XZ_FORMAT_ERROR; + + if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) + != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) + return XZ_DATA_ERROR; + + if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) + return XZ_OPTIONS_ERROR; + + /* + * Of integrity checks, we support only none (Check ID = 0) and + * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined, + * we will accept other check types too, but then the check won't + * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. + */ + s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; + +#ifdef XZ_DEC_ANY_CHECK + if (s->check_type > XZ_CHECK_MAX) + return XZ_OPTIONS_ERROR; + + if (s->check_type > XZ_CHECK_CRC32) + return XZ_UNSUPPORTED_CHECK; +#else + if (s->check_type > XZ_CHECK_CRC32) + return XZ_OPTIONS_ERROR; +#endif + + return XZ_OK; +} + +/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ +static enum xz_ret XZ_FUNC dec_stream_footer(struct xz_dec *s) +{ + if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) + return XZ_DATA_ERROR; + + if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) + return XZ_DATA_ERROR; + + /* + * Validate Backward Size. Note that we never added the size of the + * Index CRC32 field to s->index.size, thus we use s->index.size / 4 + * instead of s->index.size / 4 - 1. + */ + if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) + return XZ_DATA_ERROR; + + if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) + return XZ_DATA_ERROR; + + /* + * Use XZ_STREAM_END instead of XZ_OK to be more convenient + * for the caller. + */ + return XZ_STREAM_END; +} + +/* Decode the Block Header and initialize the filter chain. */ +static enum xz_ret XZ_FUNC dec_block_header(struct xz_dec *s) +{ + enum xz_ret ret; + + /* + * Validate the CRC32. We know that the temp buffer is at least + * eight bytes so this is safe. + */ + s->temp.size -= 4; + if (xz_crc32(s->temp.buf, s->temp.size, 0) + != get_le32(s->temp.buf + s->temp.size)) + return XZ_DATA_ERROR; + + s->temp.pos = 2; + + /* + * Catch unsupported Block Flags. We support only one or two filters + * in the chain, so we catch that with the same test. + */ +#ifdef XZ_DEC_BCJ + if (s->temp.buf[1] & 0x3E) +#else + if (s->temp.buf[1] & 0x3F) +#endif + return XZ_OPTIONS_ERROR; + + /* Compressed Size */ + if (s->temp.buf[1] & 0x40) { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) + != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.compressed = s->vli; + } else { + s->block_header.compressed = VLI_UNKNOWN; + } + + /* Uncompressed Size */ + if (s->temp.buf[1] & 0x80) { + if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) + != XZ_STREAM_END) + return XZ_DATA_ERROR; + + s->block_header.uncompressed = s->vli; + } else { + s->block_header.uncompressed = VLI_UNKNOWN; + } + +#ifdef XZ_DEC_BCJ + /* If there are two filters, the first one must be a BCJ filter. */ + s->bcj_active = s->temp.buf[1] & 0x01; + if (s->bcj_active) { + if (s->temp.size - s->temp.pos < 2) + return XZ_OPTIONS_ERROR; + + ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; + + /* + * We don't support custom start offset, + * so Size of Properties must be zero. + */ + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; + } +#endif + + /* Valid Filter Flags always take at least two bytes. */ + if (s->temp.size - s->temp.pos < 2) + return XZ_DATA_ERROR; + + /* Filter ID = LZMA2 */ + if (s->temp.buf[s->temp.pos++] != 0x21) + return XZ_OPTIONS_ERROR; + + /* Size of Properties = 1-byte Filter Properties */ + if (s->temp.buf[s->temp.pos++] != 0x01) + return XZ_OPTIONS_ERROR; + + /* Filter Properties contains LZMA2 dictionary size. */ + if (s->temp.size - s->temp.pos < 1) + return XZ_DATA_ERROR; + + ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); + if (ret != XZ_OK) + return ret; + + /* The rest must be Header Padding. */ + while (s->temp.pos < s->temp.size) + if (s->temp.buf[s->temp.pos++] != 0x00) + return XZ_OPTIONS_ERROR; + + s->temp.pos = 0; + s->block.compressed = 0; + s->block.uncompressed = 0; + + return XZ_OK; +} + +static enum xz_ret XZ_FUNC dec_main(struct xz_dec *s, struct xz_buf *b) +{ + enum xz_ret ret; + + /* + * Store the start position for the case when we are in the middle + * of the Index field. + */ + s->in_start = b->in_pos; + + while (true) { + switch (s->sequence) { + case SEQ_STREAM_HEADER: + /* + * Stream Header is copied to s->temp, and then + * decoded from there. This way if the caller + * gives us only little input at a time, we can + * still keep the Stream Header decoding code + * simple. Similar approach is used in many places + * in this file. + */ + if (!fill_temp(s, b)) + return XZ_OK; + + /* + * If dec_stream_header() returns + * XZ_UNSUPPORTED_CHECK, it is still possible + * to continue decoding if working in multi-call + * mode. Thus, update s->sequence before calling + * dec_stream_header(). + */ + s->sequence = SEQ_BLOCK_START; + + ret = dec_stream_header(s); + if (ret != XZ_OK) + return ret; + + case SEQ_BLOCK_START: + /* We need one byte of input to continue. */ + if (b->in_pos == b->in_size) + return XZ_OK; + + /* See if this is the beginning of the Index field. */ + if (b->in[b->in_pos] == 0) { + s->in_start = b->in_pos++; + s->sequence = SEQ_INDEX; + break; + } + + /* + * Calculate the size of the Block Header and + * prepare to decode it. + */ + s->block_header.size + = ((uint32_t)b->in[b->in_pos] + 1) * 4; + + s->temp.size = s->block_header.size; + s->temp.pos = 0; + s->sequence = SEQ_BLOCK_HEADER; + + case SEQ_BLOCK_HEADER: + if (!fill_temp(s, b)) + return XZ_OK; + + ret = dec_block_header(s); + if (ret != XZ_OK) + return ret; + + s->sequence = SEQ_BLOCK_UNCOMPRESS; + + case SEQ_BLOCK_UNCOMPRESS: + ret = dec_block(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->sequence = SEQ_BLOCK_PADDING; + + case SEQ_BLOCK_PADDING: + /* + * Size of Compressed Data + Block Padding + * must be a multiple of four. We don't need + * s->block.compressed for anything else + * anymore, so we use it here to test the size + * of the Block Padding field. + */ + while (s->block.compressed & 3) { + if (b->in_pos == b->in_size) + return XZ_OK; + + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + + ++s->block.compressed; + } + + s->sequence = SEQ_BLOCK_CHECK; + + case SEQ_BLOCK_CHECK: + if (s->check_type == XZ_CHECK_CRC32) { + ret = crc32_validate(s, b); + if (ret != XZ_STREAM_END) + return ret; + } +#ifdef XZ_DEC_ANY_CHECK + else if (!check_skip(s, b)) { + return XZ_OK; + } +#endif + + s->sequence = SEQ_BLOCK_START; + break; + + case SEQ_INDEX: + ret = dec_index(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->sequence = SEQ_INDEX_PADDING; + + case SEQ_INDEX_PADDING: + while ((s->index.size + (b->in_pos - s->in_start)) + & 3) { + if (b->in_pos == b->in_size) { + index_update(s, b); + return XZ_OK; + } + + if (b->in[b->in_pos++] != 0) + return XZ_DATA_ERROR; + } + + /* Finish the CRC32 value and Index size. */ + index_update(s, b); + + /* Compare the hashes to validate the Index field. */ + if (!memeq(&s->block.hash, &s->index.hash, + sizeof(s->block.hash))) + return XZ_DATA_ERROR; + + s->sequence = SEQ_INDEX_CRC32; + + case SEQ_INDEX_CRC32: + ret = crc32_validate(s, b); + if (ret != XZ_STREAM_END) + return ret; + + s->temp.size = STREAM_HEADER_SIZE; + s->sequence = SEQ_STREAM_FOOTER; + + case SEQ_STREAM_FOOTER: + if (!fill_temp(s, b)) + return XZ_OK; + + return dec_stream_footer(s); + } + } + + /* Never reached */ +} + +/* + * xz_dec_run() is a wrapper for dec_main() to handle some special cases in + * multi-call and single-call decoding. + * + * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we + * are not going to make any progress anymore. This is to prevent the caller + * from calling us infinitely when the input file is truncated or otherwise + * corrupt. Since zlib-style API allows that the caller fills the input buffer + * only when the decoder doesn't produce any new output, we have to be careful + * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only + * after the second consecutive call to xz_dec_run() that makes no progress. + * + * In single-call mode, if we couldn't decode everything and no error + * occurred, either the input is truncated or the output buffer is too small. + * Since we know that the last input byte never produces any output, we know + * that if all the input was consumed and decoding wasn't finished, the file + * must be corrupt. Otherwise the output buffer has to be too small or the + * file is corrupt in a way that decoding it produces too big output. + * + * If single-call decoding fails, we reset b->in_pos and b->out_pos back to + * their original values. This is because with some filter chains there won't + * be any valid uncompressed data in the output buffer unless the decoding + * actually succeeds (that's the price to pay of using the output buffer as + * the workspace). + */ +XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_run(struct xz_dec *s, struct xz_buf *b) +{ + size_t in_start; + size_t out_start; + enum xz_ret ret; + + if (DEC_IS_SINGLE(s->mode)) + xz_dec_reset(s); + + in_start = b->in_pos; + out_start = b->out_pos; + ret = dec_main(s, b); + + if (DEC_IS_SINGLE(s->mode)) { + if (ret == XZ_OK) + ret = b->in_pos == b->in_size + ? XZ_DATA_ERROR : XZ_BUF_ERROR; + + if (ret != XZ_STREAM_END) { + b->in_pos = in_start; + b->out_pos = out_start; + } + } else if (ret == XZ_OK && in_start == b->in_pos + && out_start == b->out_pos) { + if (s->allow_buf_error) + ret = XZ_BUF_ERROR; + + s->allow_buf_error = true; + } else { + s->allow_buf_error = false; + } + + return ret; +} + +XZ_EXTERN struct xz_dec * XZ_FUNC xz_dec_init( + enum xz_mode mode, uint32_t dict_max) +{ + struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->mode = mode; + +#ifdef XZ_DEC_BCJ + s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); + if (s->bcj == NULL) + goto error_bcj; +#endif + + s->lzma2 = xz_dec_lzma2_create(mode, dict_max); + if (s->lzma2 == NULL) + goto error_lzma2; + + xz_dec_reset(s); + return s; + +error_lzma2: +#ifdef XZ_DEC_BCJ + xz_dec_bcj_end(s->bcj); +error_bcj: +#endif + kfree(s); + return NULL; +} + +XZ_EXTERN void XZ_FUNC xz_dec_reset(struct xz_dec *s) +{ + s->sequence = SEQ_STREAM_HEADER; + s->allow_buf_error = false; + s->pos = 0; + s->crc32 = 0; + memzero(&s->block, sizeof(s->block)); + memzero(&s->index, sizeof(s->index)); + s->temp.pos = 0; + s->temp.size = STREAM_HEADER_SIZE; +} + +XZ_EXTERN void XZ_FUNC xz_dec_end(struct xz_dec *s) +{ + if (s != NULL) { + xz_dec_lzma2_end(s->lzma2); +#ifdef XZ_DEC_BCJ + xz_dec_bcj_end(s->bcj); +#endif + kfree(s); + } +} diff --git a/probe-busybox/archival/libarchive/unxz/xz_lzma2.h b/probe-busybox/archival/libarchive/unxz/xz_lzma2.h new file mode 100644 index 00000000..47f21afb --- /dev/null +++ b/probe-busybox/archival/libarchive/unxz/xz_lzma2.h @@ -0,0 +1,204 @@ +/* + * LZMA2 definitions + * + * Authors: Lasse Collin + * Igor Pavlov + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_LZMA2_H +#define XZ_LZMA2_H + +/* Range coder constants */ +#define RC_SHIFT_BITS 8 +#define RC_TOP_BITS 24 +#define RC_TOP_VALUE (1 << RC_TOP_BITS) +#define RC_BIT_MODEL_TOTAL_BITS 11 +#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS) +#define RC_MOVE_BITS 5 + +/* + * Maximum number of position states. A position state is the lowest pb + * number of bits of the current uncompressed offset. In some places there + * are different sets of probabilities for different position states. + */ +#define POS_STATES_MAX (1 << 4) + +/* + * This enum is used to track which LZMA symbols have occurred most recently + * and in which order. This information is used to predict the next symbol. + * + * Symbols: + * - Literal: One 8-bit byte + * - Match: Repeat a chunk of data at some distance + * - Long repeat: Multi-byte match at a recently seen distance + * - Short repeat: One-byte repeat at a recently seen distance + * + * The symbol names are in from STATE_oldest_older_previous. REP means + * either short or long repeated match, and NONLIT means any non-literal. + */ +enum lzma_state { + STATE_LIT_LIT, + STATE_MATCH_LIT_LIT, + STATE_REP_LIT_LIT, + STATE_SHORTREP_LIT_LIT, + STATE_MATCH_LIT, + STATE_REP_LIT, + STATE_SHORTREP_LIT, + STATE_LIT_MATCH, + STATE_LIT_LONGREP, + STATE_LIT_SHORTREP, + STATE_NONLIT_MATCH, + STATE_NONLIT_REP +}; + +/* Total number of states */ +#define STATES 12 + +/* The lowest 7 states indicate that the previous state was a literal. */ +#define LIT_STATES 7 + +/* Indicate that the latest symbol was a literal. */ +static inline void XZ_FUNC lzma_state_literal(enum lzma_state *state) +{ + if (*state <= STATE_SHORTREP_LIT_LIT) + *state = STATE_LIT_LIT; + else if (*state <= STATE_LIT_SHORTREP) + *state -= 3; + else + *state -= 6; +} + +/* Indicate that the latest symbol was a match. */ +static inline void XZ_FUNC lzma_state_match(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH; +} + +/* Indicate that the latest state was a long repeated match. */ +static inline void XZ_FUNC lzma_state_long_rep(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP; +} + +/* Indicate that the latest symbol was a short match. */ +static inline void XZ_FUNC lzma_state_short_rep(enum lzma_state *state) +{ + *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP; +} + +/* Test if the previous symbol was a literal. */ +static inline bool XZ_FUNC lzma_state_is_literal(enum lzma_state state) +{ + return state < LIT_STATES; +} + +/* Each literal coder is divided in three sections: + * - 0x001-0x0FF: Without match byte + * - 0x101-0x1FF: With match byte; match bit is 0 + * - 0x201-0x2FF: With match byte; match bit is 1 + * + * Match byte is used when the previous LZMA symbol was something else than + * a literal (that is, it was some kind of match). + */ +#define LITERAL_CODER_SIZE 0x300 + +/* Maximum number of literal coders */ +#define LITERAL_CODERS_MAX (1 << 4) + +/* Minimum length of a match is two bytes. */ +#define MATCH_LEN_MIN 2 + +/* Match length is encoded with 4, 5, or 10 bits. + * + * Length Bits + * 2-9 4 = Choice=0 + 3 bits + * 10-17 5 = Choice=1 + Choice2=0 + 3 bits + * 18-273 10 = Choice=1 + Choice2=1 + 8 bits + */ +#define LEN_LOW_BITS 3 +#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS) +#define LEN_MID_BITS 3 +#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS) +#define LEN_HIGH_BITS 8 +#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS) +#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS) + +/* + * Maximum length of a match is 273 which is a result of the encoding + * described above. + */ +#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1) + +/* + * Different sets of probabilities are used for match distances that have + * very short match length: Lengths of 2, 3, and 4 bytes have a separate + * set of probabilities for each length. The matches with longer length + * use a shared set of probabilities. + */ +#define DIST_STATES 4 + +/* + * Get the index of the appropriate probability array for decoding + * the distance slot. + */ +static inline uint32_t XZ_FUNC lzma_get_dist_state(uint32_t len) +{ + return len < DIST_STATES + MATCH_LEN_MIN + ? len - MATCH_LEN_MIN : DIST_STATES - 1; +} + +/* + * The highest two bits of a 32-bit match distance are encoded using six bits. + * This six-bit value is called a distance slot. This way encoding a 32-bit + * value takes 6-36 bits, larger values taking more bits. + */ +#define DIST_SLOT_BITS 6 +#define DIST_SLOTS (1 << DIST_SLOT_BITS) + +/* Match distances up to 127 are fully encoded using probabilities. Since + * the highest two bits (distance slot) are always encoded using six bits, + * the distances 0-3 don't need any additional bits to encode, since the + * distance slot itself is the same as the actual distance. DIST_MODEL_START + * indicates the first distance slot where at least one additional bit is + * needed. + */ +#define DIST_MODEL_START 4 + +/* + * Match distances greater than 127 are encoded in three pieces: + * - distance slot: the highest two bits + * - direct bits: 2-26 bits below the highest two bits + * - alignment bits: four lowest bits + * + * Direct bits don't use any probabilities. + * + * The distance slot value of 14 is for distances 128-191. + */ +#define DIST_MODEL_END 14 + +/* Distance slots that indicate a distance <= 127. */ +#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2) +#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS) + +/* + * For match distances greater than 127, only the highest two bits and the + * lowest four bits (alignment) is encoded using probabilities. + */ +#define ALIGN_BITS 4 +#define ALIGN_SIZE (1 << ALIGN_BITS) +#define ALIGN_MASK (ALIGN_SIZE - 1) + +/* Total number of all probability variables */ +#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE) + +/* + * LZMA remembers the four most recent match distances. Reusing these + * distances tends to take less space than re-encoding the actual + * distance value. + */ +#define REPS 4 + +#endif diff --git a/probe-busybox/archival/libarchive/unxz/xz_private.h b/probe-busybox/archival/libarchive/unxz/xz_private.h new file mode 100644 index 00000000..145649a8 --- /dev/null +++ b/probe-busybox/archival/libarchive/unxz/xz_private.h @@ -0,0 +1,159 @@ +/* + * Private includes and definitions + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_PRIVATE_H +#define XZ_PRIVATE_H + +#ifdef __KERNEL__ + /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ +# ifndef XZ_PREBOOT +# include +# include +# include +# define memeq(a, b, size) (memcmp(a, b, size) == 0) +# define memzero(buf, size) memset(buf, 0, size) +# endif +# include +# include +# define get_le32(p) le32_to_cpup((const uint32_t *)(p)) + /* XZ_IGNORE_KCONFIG may be defined only via decompress_unxz.c. */ +# ifndef XZ_IGNORE_KCONFIG +# ifdef CONFIG_XZ_DEC_X86 +# define XZ_DEC_X86 +# endif +# ifdef CONFIG_XZ_DEC_POWERPC +# define XZ_DEC_POWERPC +# endif +# ifdef CONFIG_XZ_DEC_IA64 +# define XZ_DEC_IA64 +# endif +# ifdef CONFIG_XZ_DEC_ARM +# define XZ_DEC_ARM +# endif +# ifdef CONFIG_XZ_DEC_ARMTHUMB +# define XZ_DEC_ARMTHUMB +# endif +# ifdef CONFIG_XZ_DEC_SPARC +# define XZ_DEC_SPARC +# endif +# endif +# include +#else + /* + * For userspace builds, use a separate header to define the required + * macros and functions. This makes it easier to adapt the code into + * different environments and avoids clutter in the Linux kernel tree. + */ +# include "xz_config.h" +#endif + +/* If no specific decoding mode is requested, enable support for all modes. */ +#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \ + && !defined(XZ_DEC_DYNALLOC) +# define XZ_DEC_SINGLE +# define XZ_DEC_PREALLOC +# define XZ_DEC_DYNALLOC +#endif + +/* + * The DEC_IS_foo(mode) macros are used in "if" statements. If only some + * of the supported modes are enabled, these macros will evaluate to true or + * false at compile time and thus allow the compiler to omit unneeded code. + */ +#ifdef XZ_DEC_SINGLE +# define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE) +#else +# define DEC_IS_SINGLE(mode) (false) +#endif + +#ifdef XZ_DEC_PREALLOC +# define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC) +#else +# define DEC_IS_PREALLOC(mode) (false) +#endif + +#ifdef XZ_DEC_DYNALLOC +# define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC) +#else +# define DEC_IS_DYNALLOC(mode) (false) +#endif + +#if !defined(XZ_DEC_SINGLE) +# define DEC_IS_MULTI(mode) (true) +#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC) +# define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE) +#else +# define DEC_IS_MULTI(mode) (false) +#endif + +/* + * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ. + * XZ_DEC_BCJ is used to enable generic support for BCJ decoders. + */ +#ifndef XZ_DEC_BCJ +# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \ + || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \ + || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \ + || defined(XZ_DEC_SPARC) +# define XZ_DEC_BCJ +# endif +#endif + +/* + * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used + * before calling xz_dec_lzma2_run(). + */ +XZ_EXTERN struct xz_dec_lzma2 * XZ_FUNC xz_dec_lzma2_create( + enum xz_mode mode, uint32_t dict_max); + +/* + * Decode the LZMA2 properties (one byte) and reset the decoder. Return + * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not + * big enough, and XZ_OPTIONS_ERROR if props indicates something that this + * decoder doesn't support. + */ +XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_lzma2_reset( + struct xz_dec_lzma2 *s, uint8_t props); + +/* Decode raw LZMA2 stream from b->in to b->out. */ +XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_lzma2_run( + struct xz_dec_lzma2 *s, struct xz_buf *b); + +/* Free the memory allocated for the LZMA2 decoder. */ +XZ_EXTERN void XZ_FUNC xz_dec_lzma2_end(struct xz_dec_lzma2 *s); + +#ifdef XZ_DEC_BCJ +/* + * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before + * calling xz_dec_bcj_run(). + */ +XZ_EXTERN struct xz_dec_bcj * XZ_FUNC xz_dec_bcj_create(bool single_call); + +/* + * Decode the Filter ID of a BCJ filter. This implementation doesn't + * support custom start offsets, so no decoding of Filter Properties + * is needed. Returns XZ_OK if the given Filter ID is supported. + * Otherwise XZ_OPTIONS_ERROR is returned. + */ +XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_bcj_reset( + struct xz_dec_bcj *s, uint8_t id); + +/* + * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is + * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run() + * must be called directly. + */ +XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_bcj_run(struct xz_dec_bcj *s, + struct xz_dec_lzma2 *lzma2, struct xz_buf *b); + +/* Free the memory allocated for the BCJ filters. */ +#define xz_dec_bcj_end(s) kfree(s) +#endif + +#endif diff --git a/probe-busybox/archival/libarchive/unxz/xz_stream.h b/probe-busybox/archival/libarchive/unxz/xz_stream.h new file mode 100644 index 00000000..66cb5a70 --- /dev/null +++ b/probe-busybox/archival/libarchive/unxz/xz_stream.h @@ -0,0 +1,62 @@ +/* + * Definitions for handling the .xz file format + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_STREAM_H +#define XZ_STREAM_H + +#if defined(__KERNEL__) && !XZ_INTERNAL_CRC32 +# include +# undef crc32 +# define xz_crc32(buf, size, crc) \ + (~crc32_le(~(uint32_t)(crc), buf, size)) +#endif + +/* + * See the .xz file format specification at + * http://tukaani.org/xz/xz-file-format.txt + * to understand the container format. + */ + +#define STREAM_HEADER_SIZE 12 + +#define HEADER_MAGIC "\3757zXZ" +#define HEADER_MAGIC_SIZE 6 + +#define FOOTER_MAGIC "YZ" +#define FOOTER_MAGIC_SIZE 2 + +/* + * Variable-length integer can hold a 63-bit unsigned integer or a special + * value indicating that the value is unknown. + * + * Experimental: vli_type can be defined to uint32_t to save a few bytes + * in code size (no effect on speed). Doing so limits the uncompressed and + * compressed size of the file to less than 256 MiB and may also weaken + * error detection slightly. + */ +typedef uint64_t vli_type; + +#define VLI_MAX ((vli_type)-1 / 2) +#define VLI_UNKNOWN ((vli_type)-1) + +/* Maximum encoded size of a VLI */ +#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7) + +/* Integrity Check types */ +enum xz_check { + XZ_CHECK_NONE = 0, + XZ_CHECK_CRC32 = 1, + XZ_CHECK_CRC64 = 4, + XZ_CHECK_SHA256 = 10 +}; + +/* Maximum possible Check ID */ +#define XZ_CHECK_MAX 15 + +#endif diff --git a/probe-busybox/build-config/gitlab-docker/Dockerfile b/probe-busybox/build-config/gitlab-docker/Dockerfile new file mode 100644 index 00000000..9faacb6c --- /dev/null +++ b/probe-busybox/build-config/gitlab-docker/Dockerfile @@ -0,0 +1,11 @@ +FROM centos:7.9.2009 + +MAINTAINER phomburg@ripe.net + +RUN yum -y update +RUN yum -y install autoconf +RUN yum -y install automake +RUN yum -y install libtool +RUN yum -y install make +RUN yum -y install ncurses-devel +RUN yum -y install openssl-devel diff --git a/probe-busybox/coreutils/Config.src b/probe-busybox/coreutils/Config.src new file mode 100644 index 00000000..c056320f --- /dev/null +++ b/probe-busybox/coreutils/Config.src @@ -0,0 +1,54 @@ +# +# For a description of the syntax of this configuration file, +# see scripts/kbuild/config-language.txt. +# + +menu "Coreutils" + +INSERT + +comment "Common options" + +config FEATURE_VERBOSE + bool "Support verbose options (usually -v) for various applets" + default y + help + Enable cp -v, rm -v and similar messages. + Also enables long option (--verbose) if it exists. + Without this option, -v is accepted but ignored. + +comment "Common options for cp and mv" + depends on CP || MV + +config FEATURE_PRESERVE_HARDLINKS + bool "Preserve hard links" + default y + depends on CP || MV + help + Allow cp and mv to preserve hard links. + +comment "Common options for ls, more and telnet" + depends on LS || MORE || TELNET + +config FEATURE_AUTOWIDTH + bool "Calculate terminal & column widths" + default y + depends on LS || MORE || TELNET + help + This option allows utilities such as 'ls', 'more' and 'telnet' + to determine the width of the screen, which can allow them to + display additional text or avoid wrapping text onto the next line. + If you leave this disabled, your utilities will be especially + primitive and will be unable to determine the current screen width. + +comment "Common options for df, du, ls" + depends on DF || DU || LS + +config FEATURE_HUMAN_READABLE + bool "Support for human readable output (example 13k, 23M, 235G)" + default y + depends on DF || DU || LS + help + Allow df, du, and ls to have human readable output. + +endmenu diff --git a/probe-busybox/coreutils/Kbuild.src b/probe-busybox/coreutils/Kbuild.src new file mode 100644 index 00000000..d9a44878 --- /dev/null +++ b/probe-busybox/coreutils/Kbuild.src @@ -0,0 +1,25 @@ +# Makefile for busybox +# +# Copyright (C) 1999-2005 by Erik Andersen +# +# Licensed under GPLv2, see file LICENSE in this source tree. + +libs-y += libcoreutils/ + +lib-y:= + +INSERT + +lib-$(CONFIG_MORE) += cat.o # more uses it if stdout isn't a tty +lib-$(CONFIG_LESS) += cat.o # less too +lib-$(CONFIG_CRONTAB) += cat.o # crontab -l +lib-$(CONFIG_ADDUSER) += chown.o # used by adduser +lib-$(CONFIG_ADDGROUP) += chown.o # used by adduser +lib-$(CONFIG_ASH) += echo.o # used by ash +lib-$(CONFIG_SH_IS_ASH) += echo.o # used by ash +lib-$(CONFIG_BASH_IS_ASH) += echo.o # used by ash +lib-$(CONFIG_HUSH) += echo.o # used by hush +lib-$(CONFIG_SH_IS_HUSH) += echo.o # used by hush +lib-$(CONFIG_BASH_IS_HUSH) += echo.o # used by hush +lib-$(CONFIG_FTPD) += ls.o # used by ftpd +lib-$(CONFIG_ASH_BUILTIN_PRINTF) += printf.o diff --git a/probe-busybox/coreutils/buddyinfo.c b/probe-busybox/coreutils/buddyinfo.c new file mode 100644 index 00000000..863e473a --- /dev/null +++ b/probe-busybox/coreutils/buddyinfo.c @@ -0,0 +1,101 @@ +/* vi: set sw=2 ts=2: + * + * 2010-2013 Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * read /cat/proc/buddyinfo and print out. + * if env variable LOWMEM_REBOOT is set KBytes same as buddyinfo reboot + * + */ +//config:config BUDDYINFO +//config: bool "buddyinfo" +//config: default n +//config: help +//config: buddyinfo reports on the amount of free memory + +//applet:IF_CONDMV(APPLET(buddyinfo, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_BUDDYINFO) += buddyinfo.o + +//usage:#define buddyinfo_trivial_usage +//usage: "" +//usage:#define buddyinfo_full_usage "\n\n" +//usage: "" + +#include "libbb.h" + +#include + +#define DBQ(str) "\"" #str "\"" + +/* This is a NOFORK applet. Be very careful! */ + +int buddyinfo_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int buddyinfo_main(int argc UNUSED_PARAM, char **argv) +{ + char *lowmemChar; + unsigned lowmem = 0; + FILE *fp = xfopen_for_read("/proc/buddyinfo"); + char aa[10]; + char *my_mac ; + int i = 0; + int j = 0; + int memBlock = 4; + int need_reboot = 0; // don't reboot + int freeMem = 0; + int jMax = 64; // enough + struct sysinfo info; + + lowmemChar = argv[1]; + + if(lowmemChar) + lowmem = xatou(lowmemChar); + fscanf(fp, "%s", aa); + fscanf(fp, "%s", aa); + fscanf(fp, "%s", aa); + fscanf(fp, "%s", aa); + + my_mac = getenv("ETHER_SCANNED"); + + if (lowmem >= 4 ) + { + /* We need to reboot unless we find a big enough chunk + * of memory. + */ + need_reboot = 1; + } + printf ("RESULT { " DBQ(id) ": " DBQ(9001) ", " DBQ(time) ": %lld", + (long long)time(0)); + if (my_mac != NULL) + printf(", " DBQ(macaddr) ": " DBQ(%s), my_mac); + + /* get uptime and print it */ + sysinfo(&info); + printf (", " DBQ(uptime) ": %ld", info.uptime ); + + printf(", " DBQ(buddyinfo) ": [ "); + for (j=0; j < jMax; j++) + { + if (fscanf(fp, "%d", &i) != 1) + break; + printf("%s%d", j == 0 ? "" : ", ", i); + freeMem += ( memBlock * i); + if (i > 0 && lowmem >= 4 && memBlock >= lowmem) + { + /* Found a big enough chunk */ + need_reboot = 0; + } + memBlock *= 2; + } + + /* now print it */ + printf (" ], " DBQ(freemem) ": %d }\n" , freeMem); + + fclose (fp); + + if(need_reboot) + { + fprintf(stderr, "buddyinfo: nothing found for size %d\n", lowmem); + return (EXIT_FAILURE); + } + return 0; +} diff --git a/probe-busybox/coreutils/condmv.c b/probe-busybox/coreutils/condmv.c new file mode 100644 index 00000000..d792b35c --- /dev/null +++ b/probe-busybox/coreutils/condmv.c @@ -0,0 +1,402 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * condmv.c -- move a file only if the destination doesn't exist + */ +//config:config CONDMV +//config: bool "condmv" +//config: default n +//config: help +//config: condmv is used to rename a file if the destination does not exists + +//applet:IF_CONDMV(APPLET(condmv, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_CONDMV) += condmv.o + +//usage:#define condmv_trivial_usage +//usage: "[-A ][-f] FILE1 FILE2" +//usage:#define condmv_full_usage "\n\n" +//usage: "Rename FILE1 to FILE2 if FILE2 does not exist\n" +//usage: "\nOptions:" +//usage: "\n -A Append before renaming FILE1" +//usage: "\n -f Force. Move even if FILE2 does exist" + +#include "libbb.h" +#include "atlas_path.h" + +#define SAFE_PREFIX_FROM1_REL ATLAS_DATA_NEW_REL +#define SAFE_PREFIX_FROM2_REL ATLAS_DATA_OUT_REL +#define SAFE_PREFIX_TO1_REL ATLAS_DATA_OUT_REL +#define SAFE_PREFIX_TO2_REL ATLAS_DATA_STORAGE_REL + +#define A_FLAG (1 << 0) +#define a_FLAG (1 << 1) +#define D_FLAG (1 << 2) +#define f_FLAG (1 << 3) +#define t_FLAG (1 << 4) +#define x_FLAG (1 << 5) + +static time_t age_value; +static int cross_filesystems, append_timestamp; + +static int do_dir(char *from_dir, char *to_dir); +static int do_cprm(char *from_file, char *to_file); + +int condmv_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int condmv_main(int argc, char *argv[]) +{ + int r; + char *opt_add, *opt_age, *from, *to, *check; + char *rebased_from= NULL; + char *rebased_to= NULL; + uint32_t opt; + struct stat sb; + FILE *file; + time_t mytime; + + opt_add= NULL; + opt_age= NULL; + opt_complementary= NULL; /* For when we are called by crond */ + opt= getopt32(argv, "!A:a:Dftx", &opt_add, &opt_age); + + if (opt == (uint32_t)-1) + { + fprintf(stderr, "condmv: bad options\n"); + return 1; + } + + if (argc != optind + 2) + { + fprintf(stderr, "condmv: two arguments expected\n"); + return 1; + } + + from= argv[optind]; + to= argv[optind+1]; + + rebased_from= rebased_validated_filename(ATLAS_SPOOLDIR, + from, SAFE_PREFIX_FROM1_REL); + if (rebased_from == NULL) + { + rebased_from= rebased_validated_filename(ATLAS_SPOOLDIR, + from, SAFE_PREFIX_FROM2_REL); + } + if (rebased_from == NULL) + { + fprintf(stderr, "insecure from file '%s'\n", from); + goto err; + } + rebased_to= rebased_validated_filename(ATLAS_SPOOLDIR, + to, SAFE_PREFIX_TO1_REL); + if (rebased_to == NULL) + { + rebased_to= rebased_validated_filename(ATLAS_SPOOLDIR, + to, SAFE_PREFIX_TO2_REL); + } + if (rebased_to == NULL) + { + rebased_to= rebased_validated_filename(ATLAS_SPOOLDIR, + to, SAFE_PREFIX_FROM1_REL); + } + if (rebased_to == NULL) + { + fprintf(stderr, "insecure to file '%s'\n", to); + goto err; + } + + if (opt_age) + { + age_value= strtol(opt_age, &check, 0); + if (check[0] != '\0' || age_value <= 0) + { + fprintf(stderr, "bad age value '%s'\n", opt_age); + goto err; + } + } + else + age_value= 0; + + cross_filesystems= !!(opt & x_FLAG); + append_timestamp= !!(opt & t_FLAG); + + if (opt & D_FLAG) + { + r= do_dir(rebased_from, rebased_to); + free(rebased_from); rebased_from= NULL; + free(rebased_to); rebased_to= NULL; + return r; + } + + if (stat(rebased_to, &sb) == 0 && !(opt & f_FLAG)) + { + /* Destination exists */ + fprintf(stderr, + "condmv: not moving, destination '%s' exists\n", + rebased_to); + goto err; + } + + if (opt_add) + { + mytime = time(NULL); + /* We have to add something to the existing file before moving + * to. + */ + file= fopen(rebased_from, "a"); + if (file == NULL) + { + fprintf(stderr, + "condmv: unable to append to '%s': %s\n", + rebased_from, strerror(errno)); + goto err; + } + if (fprintf(file, "%s %llu %s\n", opt_add, + (unsigned long long)mytime, from) < 0) + { + fprintf(stderr, + "condmv: unable to append to '%s': %s\n", + rebased_from, strerror(errno)); + fclose(file); + goto err; + } + if (fclose(file) != 0) + { + fprintf(stderr, + "condmv: unable to close '%s': %s\n", + rebased_from, strerror(errno)); + goto err; + } + } + + + /* Make sure that rebased_from exists before trying to rename */ + if (stat(rebased_from, &sb) == -1 && errno == ENOENT) + { + /* Leave now to avoid a diagnostic from the rename code */ + goto err; + } + + if (rename(rebased_from, rebased_to) != 0) + { + fprintf(stderr, "condmv: unable to rename '%s' to '%s': %s\n", + rebased_from, rebased_to, strerror(errno)); + goto err; + } + + free(rebased_from); rebased_from= NULL; + free(rebased_to); rebased_to= NULL; + return 0; + +err: + if (rebased_from) free(rebased_from); + if (rebased_to) free(rebased_to); + return 1; +} + +static int do_dir(char *from_dir, char *to_dir) +{ + int r, error; + size_t len, extra_len; + time_t now; + DIR *dir; + struct dirent *de; + char *from_file, *new_from_file; + char *to_file, *new_to_file; + size_t from_file_len, to_file_len; + struct stat sb; + + from_file= NULL; + from_file_len= 0; + to_file= NULL; + to_file_len= 0; + + dir= opendir(from_dir); + if (dir == NULL) + { + fprintf(stderr, "condmv: unable to open dir '%s': %s\n", + from_dir, strerror(errno)); + return 1; + } + + now= time (NULL); /* For age_value */ + + error= 0; /* Assume no failures */ + while (de= readdir(dir), de != NULL) + { + len= strlen(from_dir) + 1 + strlen(de->d_name) + 1; + if (len > from_file_len) + { + new_from_file= realloc(from_file, len); + if (new_from_file == NULL) + { + fprintf(stderr, + "condmv: out of memory (from_file)\n"); + error= 1; + break; + } + from_file= new_from_file; new_from_file= NULL; + from_file_len= len; + } + snprintf(from_file, from_file_len, "%s/%s", + from_dir, de->d_name); + r= stat(from_file, &sb); + if (r == -1) + { + fprintf(stderr, "condmv: stat %s failed: %s\n", + from_file, strerror(errno)); + error= 1; + break; + } + if (!S_ISREG(sb.st_mode)) + { + /* Skip non-regular objects */ + continue; + } + + if (age_value) + { + if (sb.st_mtime + age_value > now) + continue; + } + + if (append_timestamp) + { + /* A unix timestamp is currently 10 characters. + * Allocate an extra 16 characters to have enough + * space, also for the separator. + */ + extra_len= 16; + } + else + extra_len= 0; + len= strlen(to_dir) + 1 + strlen(de->d_name) + extra_len + 1; + if (len > to_file_len) + { + new_to_file= realloc(to_file, len); + if (new_to_file == NULL) + { + fprintf(stderr, + "condmv: out of memory (to_file)\n"); + error= 1; + break; + } + to_file= new_to_file; new_to_file= NULL; + to_file_len= len; + } + if (append_timestamp) + { + snprintf(to_file, to_file_len, "%s/%s.%lu", + to_dir, de->d_name, (unsigned long)now); + } + else + { + snprintf(to_file, to_file_len, "%s/%s", + to_dir, de->d_name); + } + + /* Make sure to_file doesn't exist */ + r= stat(to_file, &sb); + if (r == 0 || (r == -1 && errno != ENOENT)) + { + /* Something wrong with to_file */ + continue; + } + + if (cross_filesystems) + { + r= do_cprm(from_file, to_file); + if (r == 0) + { + /* Okay, next one */ + continue; + } + error= 1; + break; + } + + r= rename(from_file, to_file); + if (r == -1) + { + fprintf(stderr, + "condmv: rename %s to %s failed: %s\n", + from_file, to_file, strerror(errno)); + error= 1; + break; + } + } + + closedir(dir); + + if (from_file) + { + free(from_file); + from_file= NULL; + } + if (to_file) + { + free(to_file); + to_file= NULL; + } + + return error; +} + +static int do_cprm(char *from_file, char *to_file) +{ + FILE *fp_in, *fp_out; + size_t len_in, len_out; + char buf[1024]; + + fp_in= fopen(from_file, "rb"); + if (fp_in == NULL) + { + fprintf(stderr, "condmv: cannot open '%s' for reading: %s\n", + from_file, strerror(errno)); + return 1; + } + + fp_out= fopen(to_file, "wb"); + if (fp_out == NULL) + { + fprintf(stderr, "condmv: cannot open '%s' for writing: %s\n", + to_file, strerror(errno)); + fclose(fp_in); fp_in= NULL; + return 1; + } + + for (;;) + { + len_in= fread(buf, 1, sizeof(buf), fp_in); + if (len_in == 0) + break; /* EOF or error */ + + len_out= fwrite(buf, 1, len_in, fp_out); + if (len_out != len_in) + { + fprintf(stderr, + "condmv: error writing to '%s': %s\n", + to_file, strerror(errno)); + fclose(fp_in); fp_in= NULL; + fclose(fp_out); fp_out= NULL; + unlink(to_file); + return 1; + } + } + + if (ferror(fp_in)) + { + fprintf(stderr, + "condmv: error reading from '%s': %s\n", + from_file, strerror(errno)); + fclose(fp_in); fp_in= NULL; + fclose(fp_out); fp_out= NULL; + unlink(to_file); + return 1; + } + + fclose(fp_in); fp_in= NULL; + fclose(fp_out); fp_out= NULL; + unlink(from_file); + + return 0; +} diff --git a/probe-busybox/coreutils/date.c b/probe-busybox/coreutils/date.c new file mode 100644 index 00000000..03094bd7 --- /dev/null +++ b/probe-busybox/coreutils/date.c @@ -0,0 +1,404 @@ +/* vi: set sw=4 ts=4: */ +/* + * Mini date implementation for busybox + * + * by Matthew Grant + * + * iso-format handling added by Robert Griebl + * bugfixes and cleanup by Bernhard Reutner-Fischer + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +/* This 'date' command supports only 2 time setting formats, + all the GNU strftime stuff (its in libc, lets use it), + setting time using UTC and displaying it, as well as + an RFC 2822 compliant date output for shell scripting + mail commands */ + +/* Input parsing code is always bulky - used heavy duty libc stuff as + much as possible, missed out a lot of bounds checking */ + +//config:config DATE +//config: bool "date" +//config: default y +//config: help +//config: date is used to set the system date or display the +//config: current time in the given format. +//config: +//config:config FEATURE_DATE_ISOFMT +//config: bool "Enable ISO date format output (-I)" +//config: default y +//config: depends on DATE +//config: help +//config: Enable option (-I) to output an ISO-8601 compliant +//config: date/time string. +//config: +//config:# defaults to "no": stat's nanosecond field is a bit non-portable +//config:config FEATURE_DATE_NANO +//config: bool "Support %[num]N nanosecond format specifier" +//config: default n +//config: depends on DATE # clock_gettime() +//config: select PLATFORM_LINUX +//config: help +//config: Support %[num]N format specifier. Adds ~250 bytes of code. +//config: +//config:config FEATURE_DATE_COMPAT +//config: bool "Support weird 'date MMDDhhmm[[YY]YY][.ss]' format" +//config: default y +//config: depends on DATE +//config: help +//config: System time can be set by 'date -s DATE' and simply 'date DATE', +//config: but formats of DATE string are different. 'date DATE' accepts +//config: a rather weird MMDDhhmm[[YY]YY][.ss] format with completely +//config: unnatural placement of year between minutes and seconds. +//config: date -s (and other commands like touch -d) use more sensible +//config: formats (for one, ISO format YYYY-MM-DD hh:mm:ss.ssssss). +//config: +//config: With this option off, 'date DATE' is 'date -s DATE' support +//config: the same format. With it on, 'date DATE' additionally supports +//config: MMDDhhmm[[YY]YY][.ss] format. + +//applet:IF_DATE(APPLET(date, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_DATE) += date.o + +/* GNU coreutils 6.9 man page: + * date [OPTION]... [+FORMAT] + * date [-u|--utc|--universal] [MMDDhhmm[[CC]YY][.ss]] + * -d, --date=STRING + * display time described by STRING, not `now' + * -f, --file=DATEFILE + * like --date once for each line of DATEFILE + * -r, --reference=FILE + * display the last modification time of FILE + * -R, --rfc-2822 + * output date and time in RFC 2822 format. + * Example: Mon, 07 Aug 2006 12:34:56 -0600 + * --rfc-3339=TIMESPEC + * output date and time in RFC 3339 format. + * TIMESPEC='date', 'seconds', or 'ns' + * Date and time components are separated by a single space: + * 2006-08-07 12:34:56-06:00 + * -s, --set=STRING + * set time described by STRING + * -u, --utc, --universal + * print or set Coordinated Universal Time + * + * Busybox: + * long options are not supported + * -f is not supported + * -I seems to roughly match --rfc-3339, but -I has _optional_ param + * (thus "-I seconds" doesn't work, only "-Iseconds"), + * and does not support -Ins + * -D FMT is a bbox extension for _input_ conversion of -d DATE + */ + +//usage:#define date_trivial_usage +//usage: "[OPTIONS] [+FMT] [TIME]" +//usage:#define date_full_usage "\n\n" +//usage: "Display time (using +FMT), or set time\n" +//usage: IF_NOT_LONG_OPTS( +//usage: "\n [-s] TIME Set time to TIME" +//usage: "\n -u Work in UTC (don't convert to local time)" +//usage: "\n -R Output RFC-2822 compliant date string" +//usage: ) IF_LONG_OPTS( +//usage: "\n [-s,--set] TIME Set time to TIME" +//usage: "\n -u,--utc Work in UTC (don't convert to local time)" +//usage: "\n -R,--rfc-2822 Output RFC-2822 compliant date string" +//usage: ) +//usage: IF_FEATURE_DATE_ISOFMT( +//usage: "\n -I[SPEC] Output ISO-8601 compliant date string" +//usage: "\n SPEC='date' (default) for date only," +//usage: "\n 'hours', 'minutes', or 'seconds' for date and" +//usage: "\n time to the indicated precision" +//usage: ) +//usage: IF_NOT_LONG_OPTS( +//usage: "\n -r FILE Display last modification time of FILE" +//usage: "\n -d TIME Display TIME, not 'now'" +//usage: ) IF_LONG_OPTS( +//usage: "\n -r,--reference FILE Display last modification time of FILE" +//usage: "\n -d,--date TIME Display TIME, not 'now'" +//usage: ) +//usage: IF_FEATURE_DATE_ISOFMT( +//usage: "\n -D FMT Use FMT for -d TIME conversion" +//usage: ) +//usage: "\n" +//usage: "\nRecognized TIME formats:" +//usage: "\n hh:mm[:ss]" +//usage: "\n [YYYY.]MM.DD-hh:mm[:ss]" +//usage: "\n YYYY-MM-DD hh:mm[:ss]" +//usage: "\n [[[[[YY]YY]MM]DD]hh]mm[.ss]" +//usage: IF_FEATURE_DATE_COMPAT( +//usage: "\n 'date TIME' form accepts MMDDhhmm[[YY]YY][.ss] instead" +//usage: ) +//usage: +//usage:#define date_example_usage +//usage: "$ date\n" +//usage: "Wed Apr 12 18:52:41 MDT 2000\n" + +#include "libbb.h" +#include "common_bufsiz.h" +#if ENABLE_FEATURE_DATE_NANO +# include +#endif + +enum { + OPT_RFC2822 = (1 << 0), /* R */ + OPT_SET = (1 << 1), /* s */ + OPT_UTC = (1 << 2), /* u */ + OPT_DATE = (1 << 3), /* d */ + OPT_REFERENCE = (1 << 4), /* r */ + OPT_UNIXSECS = (1 << 5), /* S */ + OPT_TIMESPEC = (1 << 6) * ENABLE_FEATURE_DATE_ISOFMT, /* I */ + OPT_HINT = (1 << 7) * ENABLE_FEATURE_DATE_ISOFMT, /* D */ +}; + +static void maybe_set_utc(int opt) +{ + if (opt & OPT_UTC) + putenv((char*)"TZ=UTC0"); +} + +#if ENABLE_LONG_OPTS +static const char date_longopts[] ALIGN1 = + "rfc-822\0" No_argument "R" + "rfc-2822\0" No_argument "R" + "set\0" Required_argument "s" + "utc\0" No_argument "u" + /* "universal\0" No_argument "u" */ + "date\0" Required_argument "d" + "reference\0" Required_argument "r" + ; +#endif + +int date_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int date_main(int argc UNUSED_PARAM, char **argv) +{ + struct timespec ts; + struct tm tm_time; + char buf_fmt_dt2str[64]; + unsigned opt; + int ifmt = -1; + char *date_str; + char *fmt_dt2str; + char *fmt_str2dt; + char *filename; + char *isofmt_arg = NULL; + char *check; + + opt_complementary = "d--s:s--d" + IF_FEATURE_DATE_ISOFMT(":R--I:I--R"); + IF_LONG_OPTS(applet_long_options = date_longopts;) + opt = getopt32(argv, "Rs:ud:r:S" + IF_FEATURE_DATE_ISOFMT("I::D:"), + &date_str, &date_str, &filename + IF_FEATURE_DATE_ISOFMT(, &isofmt_arg, &fmt_str2dt)); + argv += optind; + maybe_set_utc(opt); + + if (ENABLE_FEATURE_DATE_ISOFMT && (opt & OPT_TIMESPEC)) { + ifmt = 0; /* default is date */ + if (isofmt_arg) { + static const char isoformats[] ALIGN1 = + "date\0""hours\0""minutes\0""seconds\0"; /* ns? */ + ifmt = index_in_substrings(isoformats, isofmt_arg); + if (ifmt < 0) + bb_show_usage(); + } + } + + fmt_dt2str = NULL; + if (argv[0] && argv[0][0] == '+') { + fmt_dt2str = &argv[0][1]; /* skip over the '+' */ + argv++; + } + if (!(opt & (OPT_SET | OPT_DATE))) { + opt |= OPT_SET; + date_str = argv[0]; /* can be NULL */ + if (date_str) { +#if ENABLE_FEATURE_DATE_COMPAT + int len = strspn(date_str, "0123456789"); + if (date_str[len] == '\0' + || (date_str[len] == '.' + && isdigit(date_str[len+1]) + && isdigit(date_str[len+2]) + && date_str[len+3] == '\0' + ) + ) { + /* Dreaded MMDDhhmm[[CC]YY][.ss] format! + * It does not match -d or -s format. + * Some users actually do use it. + */ + len -= 8; + if (len < 0 || len > 4 || (len & 1)) + bb_error_msg_and_die(bb_msg_invalid_date, date_str); + if (len != 0) { /* move YY or CCYY to front */ + char buf[4]; + memcpy(buf, date_str + 8, len); + memmove(date_str + len, date_str, 8); + memcpy(date_str, buf, len); + } + } +#endif + argv++; + } + } + if (*argv) + bb_show_usage(); + + /* Clear ts.tv_nsec, in case we need to set the time later */ + ts.tv_nsec= 0; + + /* Now we have parsed all the information except the date format + * which depends on whether the clock is being set or read */ + + if (opt & OPT_REFERENCE) { + struct stat statbuf; + xstat(filename, &statbuf); + ts.tv_sec = statbuf.st_mtime; +#if ENABLE_FEATURE_DATE_NANO + ts.tv_nsec = statbuf.st_mtim.tv_nsec; + /* Some toolchains use .st_mtimensec instead of st_mtim.tv_nsec. + * If you need #define _SVID_SOURCE 1 to enable st_mtim.tv_nsec, + * drop a mail to project mailing list please + */ +#endif + } else { +#if ENABLE_FEATURE_DATE_NANO + clock_gettime(CLOCK_REALTIME, &ts); +#else + time(&ts.tv_sec); +#endif + } + localtime_r(&ts.tv_sec, &tm_time); + + /* If date string is given, update tm_time, and maybe set date */ + if (date_str != NULL) { + /* Zero out fields - take her back to midnight! */ + tm_time.tm_sec = 0; + tm_time.tm_min = 0; + tm_time.tm_hour = 0; + + /* Process any date input to UNIX time since 1 Jan 1970 */ + if (opt & OPT_UNIXSECS) + { + ts.tv_sec= strtoul(date_str, &check, 10); + if (check[0] != '\0') + { + bb_error_msg_and_die(bb_msg_invalid_date, + date_str); + } + + /* Fill in tm_time */ + tm_time= *localtime(&ts.tv_sec); + } + else if (ENABLE_FEATURE_DATE_ISOFMT && (opt & OPT_HINT)) { + if (strptime(date_str, fmt_str2dt, &tm_time) == NULL) + bb_error_msg_and_die(bb_msg_invalid_date, date_str); + } else { + parse_datestr(date_str, &tm_time); + } + + if (!(opt & OPT_UNIXSECS)) + { + /* Correct any day of week and day of year etc. fields */ + /* Be sure to recheck dst (but not if date is time_t format) */ + if (date_str[0] != '@') + tm_time.tm_isdst = -1; + ts.tv_sec = validate_tm_time(date_str, &tm_time); + + maybe_set_utc(opt); + } + + /* if setting time, set it */ + if ((opt & OPT_SET) && clock_settime(CLOCK_REALTIME, &ts) < 0) { + bb_perror_msg("can't set date"); + } + } + + /* Display output */ + + /* Deal with format string */ + if (fmt_dt2str == NULL) { + int i; + fmt_dt2str = buf_fmt_dt2str; + if (ENABLE_FEATURE_DATE_ISOFMT && ifmt >= 0) { + /* -I[SPEC]: 0:date 1:hours 2:minutes 3:seconds */ + strcpy(fmt_dt2str, "%Y-%m-%dT%H:%M:%S"); + i = 8 + 3 * ifmt; + if (ifmt != 0) { + /* TODO: if (ifmt==4) i += sprintf(&fmt_dt2str[i], ",%09u", nanoseconds); */ + format_utc: + fmt_dt2str[i++] = '%'; + fmt_dt2str[i++] = (opt & OPT_UTC) ? 'Z' : 'z'; + } + fmt_dt2str[i] = '\0'; + } else if (opt & OPT_RFC2822) { + /* -R. undo busybox.c setlocale */ + if (ENABLE_LOCALE_SUPPORT) + setlocale(LC_TIME, "C"); + strcpy(fmt_dt2str, "%a, %d %b %Y %H:%M:%S "); + i = sizeof("%a, %d %b %Y %H:%M:%S ")-1; + goto format_utc; + } else { /* default case */ + fmt_dt2str = (char*)"%a %b %e %H:%M:%S %Z %Y"; + } + } +#if ENABLE_FEATURE_DATE_NANO + else { + /* User-specified fmt_dt2str */ + /* Search for and process "%N" */ + char *p = fmt_dt2str; + while ((p = strchr(p, '%')) != NULL) { + int n, m; + unsigned pres, scale; + + p++; + if (*p == '%') { + p++; + continue; + } + n = strspn(p, "0123456789"); + if (p[n] != 'N') { + p += n; + continue; + } + /* We have "%[nnn]N" */ + p[-1] = '\0'; + p[n] = '\0'; + scale = 1; + pres = 9; + if (n) { + pres = xatoi_positive(p); + if (pres == 0) + pres = 9; + m = 9 - pres; + while (--m >= 0) + scale *= 10; + } + + m = p - fmt_dt2str; + p += n + 1; + fmt_dt2str = xasprintf("%s%0*u%s", fmt_dt2str, pres, (unsigned)ts.tv_nsec / scale, p); + p = fmt_dt2str + m; + } + } +#endif + +#define date_buf bb_common_bufsiz1 + setup_common_bufsiz(); + if (*fmt_dt2str == '\0') { + /* With no format string, just print a blank line */ + date_buf[0] = '\0'; + } else { + /* Handle special conversions */ + if (is_prefixed_with(fmt_dt2str, "%f")) { + fmt_dt2str = (char*)"%Y.%m.%d-%H:%M:%S"; + } + /* Generate output string */ + strftime(date_buf, COMMON_BUFSIZE, fmt_dt2str, &tm_time); + } + puts(date_buf); + + return EXIT_SUCCESS; +} diff --git a/probe-busybox/coreutils/dfrm.c b/probe-busybox/coreutils/dfrm.c new file mode 100644 index 00000000..046120d7 --- /dev/null +++ b/probe-busybox/coreutils/dfrm.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * dfrm.c + * Remove the contents of directories if the amount of free space gets too low + */ +//config:config DFRM +//config: bool "dfrm" +//config: default n +//config: help +//config: dfrm deletes files from directories when the amount of free space is +//config: too low + +//applet:IF_DFRM(APPLET(dfrm, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_DFRM) += dfrm.o + +//usage:#define dfrm_trivial_usage +//usage: " ..." +//usage:#define dfrm_full_usage +//usage: " ..." + +#include +#include +#include +#include +#include +#include + +#include "libbb.h" + +#define DBQ(str) "\"" #str "\"" + +int dfrm_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int dfrm_main(int argc, char *argv[]) +{ + int i; + size_t len; + uint32_t opt; + unsigned long limit, avail; + char *opt_atlas; + char *dev, *limit_str, *dir_str, *check, *path; + DIR *dir; + struct dirent *de; + struct statfs sb; + + opt_atlas= NULL; + opt_complementary= NULL; /* Just in case */ + opt= getopt32(argv, "A:", &opt_atlas); + + if (argc < optind+3) + { + printf("not enough arguments\n"); + return 1; + } + dev= argv[optind]; + limit_str= argv[optind+1]; + + if (statfs(dev, &sb) != 0) + { + fprintf(stderr, "statfs on %s failed: %s\n", + dev, strerror(errno)); + return 1; + } + + printf("RESULT { "); + if (opt_atlas) + { + printf( + DBQ(id) ":" DBQ(%s) ", %s, " DBQ(time) ": %ld, ", + opt_atlas, atlas_get_version_json_str(), + (long)time(NULL)); + } + printf(DBQ(bsize) ": %ld, " DBQ(blocks) ": %ld, " + DBQ(bfree) ": %ld, " DBQ(free) ": %ld", + (long)sb.f_bsize, (long)sb.f_blocks, (long)sb.f_bfree, + (long)sb.f_bfree*(sb.f_bsize/1024)); + printf(" }\n"); + + avail= sb.f_bavail*(sb.f_bsize/1024); + + limit= strtoul(limit_str, &check, 10); + if (check[0] != '\0') + { + fprintf(stderr, "unable to parse limit '%s'\n", limit_str); + return 1; + } + if (avail > limit) + return 1; + + for (i= optind+2; i < argc; i++) + { + dir_str= argv[i]; + + dir= opendir(dir_str); + if (!dir) + { + fprintf(stderr, "opendir failed for '%s'\n", dir_str); + continue; + } + + path= NULL; + while (de= readdir(dir), de != NULL) + { + if (strcmp(de->d_name, ".") == 0 || + strcmp(de->d_name, "..") == 0) + { + continue; + } + len= strlen(dir_str) + 1 + strlen(de->d_name) + 1; + path= realloc(path, len); /* Avoid leaks */ + if (path == NULL) + { + fprintf(stderr, + "unable to allocate %ld bytes\n", + (long)len); + continue; + } + strlcpy(path, dir_str, len); + strlcat(path, "/", len); + strlcat(path, de->d_name, len); + + if (unlink(path) != 0) + { + fprintf(stderr, "unable to unlink '%s': %s\n", + path, strerror(errno)); + continue; + } + fprintf(stderr, "rm %s\n", path); + } + closedir(dir); + free(path); path= NULL; + + } + + return 0; +} diff --git a/probe-busybox/eperd/Config.src b/probe-busybox/eperd/Config.src new file mode 100644 index 00000000..511e0a6d --- /dev/null +++ b/probe-busybox/eperd/Config.src @@ -0,0 +1,10 @@ +# +# For a description of the syntax of this configuration file, +# see scripts/kbuild/config-language.txt. +# + +menu "Eperd" + +INSERT + +endmenu diff --git a/probe-busybox/eperd/Kbuild.src b/probe-busybox/eperd/Kbuild.src new file mode 100644 index 00000000..8f86de6f --- /dev/null +++ b/probe-busybox/eperd/Kbuild.src @@ -0,0 +1,9 @@ +# Makefile for busybox +# +# Copyright (c) 2013 RIPE NCC +# +# Licensed under the GPL v2, see the file LICENSE in this tarball. + +lib-y:= + +INSERT diff --git a/probe-busybox/eperd/condmv.c b/probe-busybox/eperd/condmv.c new file mode 100644 index 00000000..b8a42be7 --- /dev/null +++ b/probe-busybox/eperd/condmv.c @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * condmv.c -- move a file only if the destination doesn't exist + */ + +#include "libbb.h" +#include "eperd.h" +#include "atlas_path.h" + +#define SAFE_PREFIX_FROM_REL ATLAS_DATA_NEW_REL +#define SAFE_PREFIX_TO_REL ATLAS_DATA_OUT_REL + +#define A_FLAG (1 << 0) +#define F_FLAG (1 << 1) + +#define DEFAULT_INTERVAL 60 + +struct condmvstate +{ + char *from; + char *to; + char *atlas; + int force; + int interval; +}; + +static void *condmv_init(int argc, char *argv[], + void (*done)(void *state, int error) UNUSED_PARAM) +{ + char *opt_add, *opt_interval, *from, *to, *check; + char *rebased_from, *rebased_to; + int interval; + uint32_t opt; + struct condmvstate *state; + + opt_add= NULL; + opt_interval= NULL; + opt_complementary= NULL; /* For when we are called by crond */ + opt= getopt32(argv, "!A:fi:", &opt_add, &opt_interval); + if (opt == (uint32_t)-1) + return NULL; + + if (argc != optind + 2) + { + crondlog(LVL8 "too many or too few arguments (required 2)"); + return NULL; + } + + if (opt_interval) + { + interval= strtoul(opt_interval, &check, 0); + if (interval <= 0) + { + crondlog(LVL8 "unable to parse interval '%s'", + opt_interval); + return NULL; + } + } + else + interval= DEFAULT_INTERVAL; + + from= argv[optind]; + to= argv[optind+1]; + + rebased_from= rebased_validated_filename(ATLAS_SPOOLDIR, from, SAFE_PREFIX_FROM_REL); + if (!rebased_from) + { + fprintf(stderr, "insecure from file '%s'\n", from); + return NULL; + } + rebased_to= rebased_validated_filename(ATLAS_SPOOLDIR, to, SAFE_PREFIX_TO_REL); + if (!rebased_to) + { + free(rebased_from); rebased_from= NULL; + fprintf(stderr, "insecure to file '%s'\n", to); + return NULL; + } + + state= malloc(sizeof(*state)); + state->from= rebased_from; rebased_from= NULL; + state->to= rebased_to; rebased_to= NULL; + state->atlas= opt_add ? strdup(opt_add) : NULL; + state->force= !!(opt & F_FLAG); + state->interval= interval; + + return state; +} + +static void condmv_start(void *state) +{ + size_t len; + time_t mytime; + char *to; + FILE *file; + struct condmvstate *condmvstate; + struct stat sb; + + condmvstate= state; + + len= strlen(condmvstate->to) + 20; + to= malloc(len); + snprintf(to, len, "%s.%llu", condmvstate->to, + (unsigned long long)time(NULL)/condmvstate->interval); + + crondlog(LVL7 "condmv_start: destination '%s'\n", to); + + if (stat(to, &sb) == 0 && !condmvstate->force) + { + free(to); + return; + } + + if (condmvstate->atlas) + { + mytime = time(NULL); + /* We have to add something to the existing file before moving + * to. + */ + file= fopen(condmvstate->from, "a"); + if (file == NULL) + { + crondlog(LVL9 "condmv: unable to append to '%s': %s\n", + condmvstate->from, strerror(errno)); + free(to); + return; + } + if (fprintf(file, "%s %llu %s\n", condmvstate->atlas, + (unsigned long long)mytime, + condmvstate->from) < 0) + { + crondlog(LVL9 "condmv: unable to append to '%s': %s\n", + condmvstate->from, strerror(errno)); + fclose(file); + free(to); + return; + } + if (fclose(file) != 0) + { + crondlog(LVL9 "condmv: unable to close '%s': %s\n", + condmvstate->from, strerror(errno)); + free(to); + return; + } + } + if (rename(condmvstate->from, to) != 0) + { + crondlog(LVL9 "condmv: unable to rename '%s' to '%s': %s\n", + condmvstate->from, to, strerror(errno)); + } + free(to); +} + +static int condmv_delete(void *state) +{ + struct condmvstate *condmvstate; + + condmvstate= state; + free(condmvstate->from); + condmvstate->from= NULL; + free(condmvstate->to); + condmvstate->to= NULL; + free(condmvstate->atlas); + condmvstate->atlas= NULL; + + free(condmvstate); + + return 1; +} + +struct testops condmv_ops = { condmv_init, condmv_start, condmv_delete }; + diff --git a/probe-busybox/eperd/eooqd.c b/probe-busybox/eperd/eooqd.c new file mode 100644 index 00000000..01c780e6 --- /dev/null +++ b/probe-busybox/eperd/eooqd.c @@ -0,0 +1,912 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * eooqd.c Libevent-based One-off queue daemon + */ +//config:config EOOQD +//config: bool "Eooqd" +//config: default n +//config: select FEATURE_SUID +//config: select FEATURE_SYSLOG +//config: help +//config: Eooqd runs Atlas measurements just once. + +//applet:IF_EOOQD(APPLET(eooqd, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_EOOQD) += eooqd.o + +//usage:#define eooqd_trivial_usage +//usage: "" +//usage:#define eooqd_full_usage + +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include "eperd.h" +#include "atlas_path.h" + +#define SUFFIX ".curr" +#define OOQD_NEW_PREFIX_REL "data/new/ooq" +#define OOQD_OUT_PREFIX_REL "data/out/ooq" +#define ATLAS_SESSION_FILE_REL "status/con_session_id.txt" +#define REPORT_HEADER_REL "status/p_to_c_report_header" +#define SESSION_ID_REL "status/con_session_id.txt" +#define OOQ_SENT_REL "data/new/ooq_sent.vol" + +#define ATLAS_NARGS 64 /* Max arguments to a built-in command */ +#define ATLAS_ARGSIZE 512 /* Max size of the command line */ + +#define SAFE_PREFIX_REL ATLAS_DATA_NEW_REL + +#define DBQ(str) "\"" #str "\"" + +#define BARRIER_CMD "barrier" +#define POST_CMD "post" +#define RELOAD_RESOLV_CONF_CMD "reload_resolv_conf" + +#define RESOLV_CONF "/etc/resolv.conf" + +struct slot +{ + void *cmdstate; + struct builtin *bp; +}; + +static struct +{ + char *queue_file; + const char *atlas_id; + char curr_qfile[256]; + FILE *curr_file; + int max_busy; + int curr_busy; + int curr_index; + struct slot *slots; + + int barrier; + char *barrier_file; +} *state; + +static struct builtin +{ + const char *cmd; + struct testops *testops; +} builtin_cmds[]= +{ + { "evhttpget", &httpget_ops }, + { "evntp", &ntp_ops }, + { "evping", &ping_ops }, + { "evtdig", &tdig_ops }, +#if ENABLE_EVSSLGETCERT + { "evsslgetcert", &sslgetcert_ops }, +#endif +#if ENABLE_EVTLSSCAN + { "evtlsscan", &tlsscan_ops }, +#endif + { "evtraceroute", &traceroute_ops }, + { NULL, NULL } +}; + +static const char *atlas_id; +static const char *queue_id; + +static char *resolv_conf; +static char output_filename[80]; + +static void report(const char *fmt, ...); +static void report_err(const char *fmt, ...); + +static void checkQueue(evutil_socket_t fd, short what, void *arg); +static int add_line(void); +static void cmddone(void *cmdstate, int error); +static void re_post(evutil_socket_t fd, short what, void *arg); +static void post_results(int force_post); +static void skip_space(char *cp, char **ncpp); +static void skip_nonspace(char *cp, char **ncpp); +static void find_eos(char *cp, char **ncpp); +static void check_resolv_conf2(const char *out_file, const char *atlasid); +static const char *get_session_id(void); + +extern int httppost_main(int argc, char *argv[]); /* in networking/httppost.c */ + +int eooqd_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int eooqd_main(int argc, char *argv[]) +{ + int r; + size_t len; + char *pid_file_name, *interface_name, *instance_id_str; + char *check; + struct event *checkQueueEvent, *rePostEvent; + struct timeval tv; + struct rlimit limit; + struct stat sb; + + atlas_id= NULL; + interface_name= NULL; + instance_id_str= NULL; + pid_file_name= NULL; + queue_id= ""; + + (void)getopt32(argv, "A:I:i:P:q:", &atlas_id, + &interface_name, &instance_id_str, + &pid_file_name, &queue_id); + + if (argc != optind+1) + { + bb_show_usage(); + return 1; + } + + instance_id= 0; + if (instance_id_str) + { + instance_id= strtoul(instance_id_str, &check, 0); + if (check[0] != '\0') + { + report("unable to parse instance id '%s'", + instance_id_str); + return 1; + } + } + + if (interface_name) + { + len= strlen(RESOLV_CONF) + 1 + + strlen(interface_name) + 1; + resolv_conf= malloc(len); + snprintf(resolv_conf, len, "%s.%s", + RESOLV_CONF, interface_name); + + /* Check if this resolv.conf exists. If it doen't, switch + * to the standard one. + */ + if (stat(resolv_conf, &sb) == -1) + { + free(resolv_conf); + resolv_conf= strdup(RESOLV_CONF); + } + } + else + { + resolv_conf= strdup(RESOLV_CONF); + } + + if(pid_file_name) + { + if (!check_pidfile(pid_file_name)) { + report("a process is still running"); + return 1; + } + + if (write_pidfile(pid_file_name) < 0) { + report("error writing PID file '%s'; %s", pid_file_name, strerror(errno)); + return 1; + } + } + + state = xzalloc(sizeof(*state)); + + state->atlas_id= atlas_id; + state->queue_file= argv[optind]; + + state->max_busy= 10; + + state->slots= xzalloc(sizeof(*state->slots) * state->max_busy); + + if (strlen(state->queue_file) + strlen(SUFFIX) + 1 > + sizeof(state->curr_qfile)) + { + report("filename too long ('%s')", state->queue_file); + return 1; + } + + strlcpy(state->curr_qfile, state->queue_file, + sizeof(state->curr_qfile)); + strlcat(state->curr_qfile, SUFFIX, sizeof(state->curr_qfile)); + + snprintf(output_filename, sizeof(output_filename), + "%s/" OOQD_OUT_PREFIX_REL "%s/ooq.out", + ATLAS_SPOOLDIR, queue_id); + + signal(SIGQUIT, SIG_DFL); + limit.rlim_cur= RLIM_INFINITY; + limit.rlim_max= RLIM_INFINITY; + setrlimit(RLIMIT_CORE, &limit); + + /* Ignore SIGPIPE, broken TCP sessions may trigger them */ + signal(SIGPIPE, SIG_IGN); + + /* Create libevent event base */ + EventBase= event_base_new(); + if (!EventBase) + { + crondlog(DIE9 "event_base_new failed"); /* exits */ + } + DnsBase= evdns_base_new(EventBase, 0 /*initialize*/); + if (!DnsBase) + { + event_base_free(EventBase); + crondlog(DIE9 "evdns_base_new failed"); /* exits */ + } + + if (interface_name) + { + r= evdns_base_set_interface(DnsBase, interface_name); + if (r == -1) + { + event_base_free(EventBase); + crondlog(DIE9 "evdns_base_set_interface failed"); + /* exits */ + } + } + + r = evdns_base_resolv_conf_parse(DnsBase, DNS_OPTIONS_ALL, + resolv_conf); + if (r == -1) + { + event_base_free(EventBase); + crondlog(DIE9 "evdns_base_resolv_conf_parse failed"); /* exits */ + } + + checkQueueEvent= event_new(EventBase, -1, EV_TIMEOUT|EV_PERSIST, + checkQueue, NULL); + if (!checkQueueEvent) + crondlog(DIE9 "event_new failed"); /* exits */ + tv.tv_sec= 1; + tv.tv_usec= 0; + event_add(checkQueueEvent, &tv); + + rePostEvent= event_new(EventBase, -1, EV_TIMEOUT|EV_PERSIST, + re_post, NULL); + if (!rePostEvent) + crondlog(DIE9 "event_new failed"); /* exits */ + tv.tv_sec= 60; + tv.tv_usec= 0; + event_add(rePostEvent, &tv); + + r= event_base_loop(EventBase, 0); + if (r != 0) + crondlog(LVL9 "event_base_loop failed"); + return 0; +} + +static void checkQueue(evutil_socket_t fd UNUSED_PARAM, + short what UNUSED_PARAM, void *arg UNUSED_PARAM) +{ + int r; + struct stat sb; + + if (!state->curr_file) + { + if (stat(state->queue_file, &sb) == -1) + { + if (errno == ENOENT) + { + return; + } + report_err("stat failed"); + return; + } + + /* Remove curr_qfile. Renaming queue_file to curr_qfile + * will silently fail to delete queue_file if queue_file and + * curr_qfile are hard links. + */ + if (unlink(state->curr_qfile) == -1) + { + /* Expect ENOENT */ + if (errno != ENOENT) + { + report_err("unlink failed"); + return; + } + } + + /* Try to move queue_file to curr_qfile. This provides at most + * once behavior and allows producers to create a new + * queue_file while we process the old one. + */ + if (rename(state->queue_file, state->curr_qfile) == -1) + { + /* We verified queue_file is there so any failure is + * fatal. + */ + report_err("rename failed"); + return; + } + + state->curr_file= fopen(state->curr_qfile, "r"); + if (state->curr_file == NULL) + { + report_err("open '%s' failed", state->curr_qfile); + return; + } + } + + while (state->curr_file && state->curr_busy < state->max_busy) + { + r= add_line(); + if (r == -1) + break; /* Wait for barrier to complete */ + } + + check_resolv_conf2(output_filename, atlas_id); +} + +static int add_line(void) +{ + char c; + int i, argc, fd, skip, slot; + size_t len; + char *cp, *ncp; + struct builtin *bp; + char *p, *validated_fn; + const char *reason; + void *cmdstate; + FILE *fn; + const char *argv[ATLAS_NARGS]; + char args[ATLAS_ARGSIZE]; + char cmdline[256]; + char filename[80]; + char filename2[80]; + struct stat sb; + + if (state->barrier) + { + if (state->curr_busy > 0) + return -1; + fd= open(state->barrier_file, O_CREAT, 0); + if (fd != -1) + close(fd); + else + { + report_err("unable to create barrier file '%s'", + state->barrier_file); + } + free(state->barrier_file); + state->barrier_file= NULL; + state->barrier= 0; + } + + if (fgets(cmdline, sizeof(cmdline), state->curr_file) == NULL) + { + if (ferror(state->curr_file)) + report_err("error reading queue file"); + fclose(state->curr_file); + state->curr_file= NULL; + return 0; + } + + cp= strchr(cmdline, '\n'); + if (cp) + *cp= '\0'; + + crondlog(LVL7 "atlas_run: looking for '%s'", cmdline); + + /* Check for post command */ + if (strcmp(cmdline, POST_CMD) == 0) + { + /* Trigger a post */ + post_results(1 /* force_post */); + return 0; /* Done */ + } + + /* Check for barrier command */ + len= strlen(BARRIER_CMD); + if (strlen(cmdline) >= len && + strncmp(cmdline, BARRIER_CMD, len) == 0 && + cmdline[len] == ' ') + { + p= &cmdline[len]; + while (*p != '\0' && *p == ' ') + p++; + validated_fn= rebased_validated_filename(ATLAS_SPOOLDIR, + p, SAFE_PREFIX_REL); + if (validated_fn == NULL) + { + crondlog(LVL8 "insecure file '%s'. allowed path '%s'", + p, SAFE_PREFIX_REL); + } + state->barrier= 1; + state->barrier_file= validated_fn; + return 0; + } + + /* Check for the reload resolv.conf command */ + if (strcmp(cmdline, RELOAD_RESOLV_CONF_CMD) == 0) + { + /* Trigger a reload */ + check_resolv_conf2(output_filename, atlas_id); + return 0; /* Done */ + } + + cmdstate= NULL; + reason= NULL; + for (bp= builtin_cmds; bp->cmd != NULL; bp++) + { + len= strlen(bp->cmd); + if (strncmp(cmdline, bp->cmd, len) != 0) + continue; + if (cmdline[len] != ' ') + continue; + break; + } + if (bp->cmd == NULL) + { + reason="command not found"; + goto error; + } + + crondlog(LVL7 "found cmd '%s' for '%s'", bp->cmd, cmdline); + + len= strlen(cmdline); + if (len+1 > ATLAS_ARGSIZE) + { + crondlog(LVL8 "atlas_run: command line too big: '%s'", cmdline); + reason="command line too big"; + goto error; + } + strcpy(args, cmdline); + + /* Split the command line */ + cp= args; + argc= 0; + argv[argc]= cp; + skip_nonspace(cp, &ncp); + cp= ncp; + + for(;;) + { + /* End of list */ + if (cp[0] == '\0') + { + argc++; + break; + } + + /* Find start of next argument */ + skip_space(cp, &ncp); + + /* Terminate current one */ + cp[0]= '\0'; + argc++; + + if (argc >= ATLAS_NARGS-1) + { + crondlog( + LVL8 "atlas_run: command line '%s', too many arguments", + cmdline); + reason="too many arguments"; + goto error; + } + + cp= ncp; + argv[argc]= cp; + if (cp[0] == '"') + { + /* Special code for strings */ + find_eos(cp+1, &ncp); + if (ncp[0] != '"') + { + crondlog( + LVL8 "atlas_run: command line '%s', end of string not found", + cmdline); + reason="end of string not found"; + goto error; + } + argv[argc]= cp+1; + cp= ncp; + cp[0]= '\0'; + cp++; + } + else + { + skip_nonspace(cp, &ncp); + cp= ncp; + } + } + + if (argc >= ATLAS_NARGS-2) + { + crondlog( + LVL8 "atlas_run: command line '%s', too many arguments", + cmdline); + reason="too many arguments"; + goto error; + } + + /* find a slot for this command */ + for (skip= 1; skip <= state->max_busy; skip++) + { + slot= (state->curr_index+skip) % state->max_busy; + if (state->slots[slot].cmdstate == NULL) + break; + } + if (state->slots[slot].cmdstate != NULL) + crondlog(DIE9 "no empty slot?"); + argv[argc++]= "-O"; + snprintf(filename, sizeof(filename), + "%s/" OOQD_NEW_PREFIX_REL "%s.%d", + ATLAS_SPOOLDIR, queue_id, slot); + argv[argc++]= filename; + + argv[argc]= NULL; + + for (i= 0; itestops->init(argc, (char **)argv, cmddone); + crondlog(LVL7 "init returned %p for '%s'", cmdstate, cmdline); + + if (cmdstate != NULL) + { + state->slots[slot].cmdstate= cmdstate; + state->slots[slot].bp= bp; + state->curr_index= slot; + state->curr_busy++; + + bp->testops->start(cmdstate); + } + +error: + if (cmdstate == NULL) + { + snprintf(filename, sizeof(filename), + "%s/" OOQD_NEW_PREFIX_REL "%s", + ATLAS_SPOOLDIR, queue_id); + fn= fopen(filename, "a"); + if (!fn) + { + crondlog(DIE9 "unable to append to '%s'", filename); + } + fprintf(fn, "RESULT { "); + if (state->atlas_id) + fprintf(fn, DBQ(id) ":" DBQ(%s) ", ", state->atlas_id); + fprintf(fn, "%s, " DBQ(time) ":%ld, ", + atlas_get_version_json_str(), (long)time(NULL)); + if (reason) + fprintf(fn, DBQ(reason) ":" DBQ(%s) ", ", reason); + fprintf(fn, DBQ(cmd) ": \""); + for (p= cmdline; *p; p++) + { + c= *p; + if (c == '"' || c == '\\') + fprintf(fn, "\\%c", c); + else if (isprint_asciionly((unsigned char)c)) + fputc(c, fn); + else + fprintf(fn, "\\u%04x", (unsigned char)c); + } + fprintf(fn, "\""); + fprintf(fn, " }\n"); + fclose(fn); + + snprintf(filename2, sizeof(filename2), + "%s/" OOQD_OUT_PREFIX_REL "%s/ooq", + ATLAS_SPOOLDIR, queue_id); + if (stat(filename2, &sb) == -1 && + stat(filename, &sb) == 0) + { + if (rename(filename, filename2) == -1) + { + report_err("move '%s' to '%s' failed", + filename, filename2); + } + } + post_results(0 /* !force_post */); + } + + return 0; +} + +static void cmddone(void *cmdstate, int error UNUSED_PARAM) +{ + int i, r; + char from_filename[80]; + char to_filename[80]; + struct stat sb; + + report("command is done for cmdstate %p", cmdstate); + + /* Find command */ + for (i= 0; imax_busy; i++) + { + if (state->slots[i].cmdstate == cmdstate) + break; + } + if (i >= state->max_busy) + { + report("cmddone: state state %p", cmdstate); + return; + } + r= state->slots[i].bp->testops->delete(cmdstate); + if (r != 0) + { + state->slots[i].cmdstate= NULL; + state->curr_busy--; + } + else + report("cmddone: strange, cmd %p is busy", cmdstate); + + snprintf(from_filename, sizeof(from_filename), + "%s/" OOQD_NEW_PREFIX_REL "%s.%d", + ATLAS_SPOOLDIR, queue_id, i); + + snprintf(to_filename, sizeof(to_filename), + "%s/%s/%s/%d", ATLAS_SPOOLDIR, + OOQD_OUT_PREFIX_REL, queue_id, i); + + if (stat(to_filename, &sb) == 0) + { + report("output file '%s' is busy", to_filename); + + /* continue, we may have to post */ + } + else if (rename(from_filename, to_filename) == -1) + { + report_err("move '%s' to '%s' failed", + from_filename, to_filename); + } + + if (state->curr_busy == 0) + { + post_results(0 /* !force_post */); + } +} + +static void check_resolv_conf2(const char *out_file, const char *atlasid) +{ + static time_t last_time= -1; + + int r; + FILE *fn; + struct stat sb; + + r= stat(resolv_conf, &sb); + if (r == -1) + { + crondlog(LVL8 "error accessing resolv.conf: %s", + strerror(errno)); + return; + } + + if (sb.st_mtime == last_time) + { + crondlog(LVL7 "check_resolv_conf2: no change (time %d)", + sb.st_mtime); + return; /* resolv.conf did not change */ + } + evdns_base_clear_nameservers_and_suspend(DnsBase); + r= evdns_base_resolv_conf_parse(DnsBase, DNS_OPTIONS_ALL, + resolv_conf); + evdns_base_resume(DnsBase); + + if ((r != 0 || last_time != -1) && out_file != NULL) + { + fn= fopen(out_file, "a"); + if (!fn) + crondlog(DIE9 "unable to append to '%s'", out_file); + fprintf(fn, "RESULT { "); + if (atlasid) + fprintf(fn, DBQ(id) ":" DBQ(%s) ", ", atlasid); + fprintf(fn, "%s, " DBQ(time) ":%ld, ", + atlas_get_version_json_str(), (long)time(NULL)); + fprintf(fn, DBQ(event) ": " DBQ(load resolv.conf) + ", " DBQ(result) ": %d", r); + + fprintf(fn, " }\n"); + fclose(fn); + } + + last_time= sb.st_mtime; +} + +static void re_post(evutil_socket_t fd UNUSED_PARAM, short what UNUSED_PARAM, + void *arg UNUSED_PARAM) +{ + /* Just call post_results every once in awhile in case some results + * were left behind. + */ + post_results(0 /* !force_post */); +} + +static void post_results(int force_post) +{ + int i, j, r, need_post, probe_id; + char *fn_header, *fn_session_id, *fn_ooq_sent; + const char *session_id; + const char *argv[20]; + char from_filename[80]; + char to_filename[80]; + char url[200]; + struct stat sb; + + for (j= 0; j<5; j++) + { + /* Grab results and see if something need to be done. */ + need_post= force_post; + force_post= 0; /* Only one time */ + + snprintf(from_filename, sizeof(from_filename), + "%s/" OOQD_NEW_PREFIX_REL "%s", + ATLAS_SPOOLDIR, queue_id); + snprintf(to_filename, sizeof(to_filename), + "%s/" OOQD_OUT_PREFIX_REL "%s/ooq", + ATLAS_SPOOLDIR, queue_id); + if (stat(to_filename, &sb) == 0) + { + /* There is more to post */ + need_post= 1; + } else if (stat(from_filename, &sb) == 0) + { + if (rename(from_filename, to_filename) == 0) + need_post= 1; + else + { + report_err("move '%s' to '%s' failed", + from_filename, to_filename); + } + } + for (i= 0; imax_busy; i++) + { + snprintf(from_filename, sizeof(from_filename), + "%s/" OOQD_NEW_PREFIX_REL "%s.%d", + ATLAS_SPOOLDIR, queue_id, i); + snprintf(to_filename, sizeof(to_filename), + "%s/" OOQD_OUT_PREFIX_REL "%s/%d", + ATLAS_SPOOLDIR, queue_id, i); + if (stat(to_filename, &sb) == 0) + { + /* There is more to post */ + need_post= 1; + continue; + } + if (stat(from_filename, &sb) == -1) + { + /* Nothing to do */ + continue; + } + + need_post= 1; + if (rename(from_filename, to_filename) == -1) + { + report_err("move '%s' to '%s' failed", + from_filename, to_filename); + } + } + + if (!need_post) + break; + + probe_id= get_probe_id(); + if (probe_id == -1) + break; + session_id= get_session_id(); + if (session_id == NULL) + break; + snprintf(url, sizeof(url), + "http://127.0.0.1:8080/?PROBE_ID=%d&SESSION_ID=%s&SRC=oneoff", + probe_id, session_id); + snprintf(from_filename, sizeof(from_filename), + "%s/" OOQD_OUT_PREFIX_REL "%s", + ATLAS_SPOOLDIR, queue_id); + + asprintf(&fn_header, "%s/%s", ATLAS_RUNDIR, REPORT_HEADER_REL); + asprintf(&fn_session_id, "%s/%s", ATLAS_RUNDIR, SESSION_ID_REL); + asprintf(&fn_ooq_sent, "%s/%s", ATLAS_SPOOLDIR, OOQ_SENT_REL); + i= 0; + argv[i++]= "httppost"; + argv[i++]= "-A"; + argv[i++]= "9015"; + argv[i++]= "--delete-file"; + argv[i++]= "--post-header"; + argv[i++]= fn_header; + argv[i++]= "--post-dir"; + argv[i++]= from_filename; + argv[i++]= "--post-footer"; + argv[i++]= fn_session_id; + argv[i++]= "-O"; + argv[i++]= fn_ooq_sent; + argv[i++]= url; + argv[i]= NULL; + r= httppost_main(i, (char **)argv); + free(fn_header); fn_header= NULL; + free(fn_session_id); fn_session_id= NULL; + free(fn_ooq_sent); fn_ooq_sent= NULL; + if (r != 0) + { + report("httppost failed with %d", r); + return; + } + + } +} + +static const char *get_session_id(void) +{ + static char session_id[80]; + + char *cp, *fn; + FILE *file; + + asprintf(&fn, "%s/%s", ATLAS_RUNDIR, ATLAS_SESSION_FILE_REL); + file= fopen(fn, "r"); + free(fn); fn= NULL; + if (file == NULL) + { + return NULL; + } + + /* Skip first empty line */ + fgets(session_id, sizeof(session_id), file); + + if (fgets(session_id, sizeof(session_id), file) == NULL) + { + fclose(file); + return NULL; + } + fclose(file); + cp= strchr(session_id, '\n'); + if (cp) + *cp= '\0'; + cp= strrchr(session_id, ' '); + if (!cp) + return NULL; + return cp+1; +} + +static void skip_space(char *cp, char **ncpp) +{ + while (cp[0] != '\0' && isspace(*(unsigned char *)cp)) + cp++; + *ncpp= cp; +} + +static void skip_nonspace(char *cp, char **ncpp) +{ + while (cp[0] != '\0' && !isspace(*(unsigned char *)cp)) + cp++; + *ncpp= cp; +} + +static void find_eos(char *cp, char **ncpp) +{ + while (cp[0] != '\0' && cp[0] != '"') + cp++; + *ncpp= cp; +} + +static void report(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + fprintf(stderr, "ooqd: "); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + + va_end(ap); +} + +static void report_err(const char *fmt, ...) +{ + int terrno; + va_list ap; + + terrno= errno; + + va_start(ap, fmt); + fprintf(stderr, "ooqd: "); + vfprintf(stderr, fmt, ap); + fprintf(stderr, ": %s\n", strerror(terrno)); + + va_end(ap); +} diff --git a/probe-busybox/eperd/eperd.c b/probe-busybox/eperd/eperd.c new file mode 100644 index 00000000..c8282839 --- /dev/null +++ b/probe-busybox/eperd/eperd.c @@ -0,0 +1,1341 @@ +/* vi: set sw=4 ts=4: + * eperd formerly crond but now heavily hacked for Atlas + * + * crond -d[#] -c -f -b + * + * run as root, but NOT setuid root + * + * Copyright(c) 2013 RIPE NCC + * Copyright 1994 Matthew Dillon (dillon@apollo.west.oic.com) + * (version 2.3.2) + * Vladimir Oleynik (C) 2002 + * + * Licensed under the GPL v2 or later, see the file LICENSE in this tarball. + */ +//config:config EPERD +//config: bool "Eperd" +//config: default n +//config: select FEATURE_SUID +//config: select FEATURE_SYSLOG +//config: help +//config: Eperd periodically runs Atlas measurements. It is based on crond. + +//applet:IF_EPERD(APPLET(eperd, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_EPERD) += eooqd.o eperd.o condmv.o http2.o httpget.o ping.o sslgetcert.o traceroute.o evhttpget.o evping.o evsslgetcert.o evtdig.o evtraceroute.o tcputil.o readresolv.o evntp.o ntp.o + +//usage:#define eperd_trivial_usage +//usage: "-fbSAD -P pidfile -l N -d N -L LOGFILE -c DIR" +//usage:#define eperd_full_usage "\n\n" +//usage: " -f Foreground" +//usage: "\n -b Background (default)" +//usage: "\n -S Log to syslog (default)" +//usage: "\n -l Set log level. 0 is the most verbose, default 8" +//usage: "\n -d Set log level, log to stderr" +//usage: "\n -L Log to file" +//usage: "\n -c Working dir" +//usage: "\n -A Atlas specific processing" +//usage: "\n -D Periodically kick watchdog" +//usage: "\n -P pidfile to use" + +#include "libbb.h" +#include "atlas_path.h" +#include +#include +#include +#include +#include +#include + +#include "eperd.h" + +#define SAFE_PREFIX_REL ATLAS_DATA_NEW_REL + +/* glibc frees previous setenv'ed value when we do next setenv() + * of the same variable. uclibc does not do this! */ +#if (defined(__GLIBC__) && !defined(__UCLIBC__)) /* || OTHER_SAFE_LIBC... */ +#define SETENV_LEAKS 0 +#else +#define SETENV_LEAKS 1 +#endif + +#define DBQ(str) "\"" #str "\"" + +#ifndef CRONTABS +#define CRONTABS "/var/spool/cron/crontabs" +#endif +#ifndef TMPDIR +#define TMPDIR "/var/spool/cron" +#endif +#ifndef SENDMAIL +#define SENDMAIL "sendmail" +#endif +#ifndef SENDMAIL_ARGS +#define SENDMAIL_ARGS "-ti", "oem" +#endif +#ifndef CRONUPDATE +#define CRONUPDATE "cron.update" +#endif +#ifndef MAXLINES +#define MAXLINES 256 /* max lines in non-root crontabs */ +#endif + +#define MAX_INTERVAL (2*366*24*3600) /* No intervals bigger than 2 years */ + +#define URANDOM_DEV "/dev/urandom" + +#define RESOLV_CONF "/etc/resolv.conf" + +#ifndef ENABLE_FEATURE_CROND_CALL_SENDMAIL +#define ENABLE_FEATURE_CROND_CALL_SENDMAIL 0 +#endif +struct CronLine { + struct CronLine *cl_Next; + char *cl_Shell; /* shell command */ + pid_t cl_Pid; /* running pid, 0, or armed (-1) */ +#if ENABLE_FEATURE_CROND_CALL_SENDMAIL + int cl_MailPos; /* 'empty file' size */ + smallint cl_MailFlag; /* running pid is for mail */ + char *cl_MailTo; /* whom to mail results */ +#endif + unsigned interval; + time_t nextcycle; + time_t start_time; + time_t end_time; + enum distribution { DISTR_NONE, DISTR_UNIFORM } distribution; + int distr_param; /* Parameter for distribution, if any */ + struct timeval distr_offset; /* Current offset to randomize the + * interval + */ + struct event event; + struct testops *testops; + void *teststate; + + /* For cleanup */ + char needs_delete; + + /* For debugging */ + time_t lasttime; + time_t nexttime; + time_t waittime; + time_t debug_cycle; + time_t debug_generated; +}; + + +#define DaemonUid 0 + + +enum { + OPT_i = (1 << 0), + OPT_l = (1 << 1), + OPT_L = (1 << 2), + OPT_f = (1 << 3), + OPT_c = (1 << 4), + OPT_A = (1 << 5), + OPT_D = (1 << 6), + OPT_P = (1 << 7), + OPT_d = (1 << 8) * ENABLE_FEATURE_CROND_D, +}; +#if ENABLE_FEATURE_CROND_D +#define DebugOpt (option_mask32 & OPT_d) +#else +#define DebugOpt 0 +#endif + + +struct globals G; +#define INIT_G() do { \ + LogLevel = 8; \ + CDir = CRONTABS; \ +} while (0) + +static int do_kick_watchdog; +static char *out_filename= NULL; +static char *atlas_id= NULL; +static char *resolv_conf; + +static void CheckUpdates(evutil_socket_t fd, short what, void *arg); +static void CheckUpdatesHour(evutil_socket_t fd, short what, void *arg); +static void SynchronizeDir(void); +#if ENABLE_FEATURE_CROND_CALL_SENDMAIL +static void EndJob(const char *user, CronLine *line); +#else +#define EndJob(user, line) ((line)->cl_Pid = 0) +#endif +static void DeleteFile(void); +static int Insert(CronLine *line); +static void Start(CronLine *line); +static void atlas_init(CronLine *line); +static void RunJob(evutil_socket_t fd, short what, void *arg); + +void crondlog(const char *ctl, ...) +{ + va_list va; + int level = (ctl[0] & 0x1f); + + va_start(va, ctl); + if (level >= (int)LogLevel) { + /* Debug mode: all to (non-redirected) stderr, */ + /* Syslog mode: all to syslog (logmode = LOGMODE_SYSLOG), */ + if (!DebugOpt && LogFile) { + /* Otherwise (log to file): we reopen log file at every write: */ + int logfd = open3_or_warn(LogFile, O_WRONLY | O_CREAT | O_APPEND, 0600); + if (logfd >= 0) + xmove_fd(logfd, STDERR_FILENO); + } +// TODO: ERR -> error, WARN -> warning, LVL -> info + bb_verror_msg(ctl + 1, va, /* strerr: */ NULL); + } + va_end(va); + if (ctl[0] & 0x80) + exit(20); +} + +static void kick_watchdog(void) +{ + if(do_kick_watchdog) + { + int fdwatchdog = open("/dev/watchdog", O_RDWR); + if (fdwatchdog != -1) + { + write(fdwatchdog, "1", 1); + close(fdwatchdog); + } + } +} + +#if 0 +static void FAST_FUNC Xbb_daemonize_or_rexec(int flags, char **argv) +{ + int fd; + + if (flags & DAEMON_CHDIR_ROOT) + xchdir("/"); + + if (flags & DAEMON_DEVNULL_STDIO) { + close(0); + close(1); + close(2); + } + + fd = open(bb_dev_null, O_RDWR); + if (fd < 0) { + /* NB: we can be called as bb_sanitize_stdio() from init + * or mdev, and there /dev/null may legitimately not (yet) exist! + * Do not use xopen above, but obtain _ANY_ open descriptor, + * even bogus one as below. */ + fd = xopen("/", O_RDONLY); /* don't believe this can fail */ + } + + while ((unsigned)fd < 2) + fd = dup(fd); /* have 0,1,2 open at least to /dev/null */ + + if (!(flags & DAEMON_ONLY_SANITIZE)) { + //forkexit_or_rexec(argv); + /* if daemonizing, make sure we detach from stdio & ctty */ + setsid(); + dup2(fd, 0); + dup2(fd, 1); + dup2(fd, 2); + } + while (fd > 2) { + close(fd--); + if (!(flags & DAEMON_CLOSE_EXTRA_FDS)) + return; + /* else close everything after fd#2 */ + } +} +#endif + +int eperd_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int eperd_main(int argc UNUSED_PARAM, char **argv) +{ + unsigned opt; + int r, fd; + unsigned seed; + size_t len; + char *validated_fn; + struct event *updateEventMin, *updateEventHour; + struct timeval tv; + struct rlimit limit; + struct stat sb; + + const char *PidFileName = NULL; + const char *path; + char *interface_name= NULL; + + INIT_G(); + + /* "-b after -f is ignored", and so on for every pair a-b */ + opt_complementary = "d-l" + ":i+:l+:d+"; /* -i, -l and -d have numeric param */ + opt = getopt32(argv, "I:i:l:L:fc:A:DP:d:O:", + &interface_name, &instance_id, &LogLevel, + &LogFile, &CDir, + &atlas_id, &PidFileName,&LogLevel, &out_filename); + /* both -d N and -l N set the same variable: LogLevel */ + + if (out_filename) + { + validated_fn= rebased_validated_filename(ATLAS_SPOOLDIR, + out_filename, SAFE_PREFIX_REL); + if (validated_fn == NULL) + { + crondlog(DIE9 "insecure file '%s'. allowed path '%s'", + out_filename, SAFE_PREFIX_REL); + } + + /* Use validate_fn from now on instead of out_filename */ + out_filename= validated_fn; + } + + if (!(opt & OPT_f)) { + /* close stdin, stdout, stderr. + * close unused descriptors - don't need them. */ + bb_daemonize_or_rexec(DAEMON_CLOSE_EXTRA_FDS, argv); + } + + if (!DebugOpt && LogFile == NULL) { + /* logging to syslog */ + openlog(applet_name, LOG_CONS | LOG_PID, LOG_LOCAL6); + logmode = LOGMODE_SYSLOG; + } + + if (interface_name) + { + len= strlen(RESOLV_CONF) + 1 + + strlen(interface_name) + 1; + resolv_conf= malloc(len); + snprintf(resolv_conf, len, "%s.%s", + RESOLV_CONF, interface_name); + + /* Check if this resolv.conf exists. If it doen't, switch + * to the standard one. + */ + if (stat(resolv_conf, &sb) == -1) + { + free(resolv_conf); + resolv_conf= strdup(RESOLV_CONF); + } + } + else + { + resolv_conf= strdup(RESOLV_CONF); + } + + + do_kick_watchdog= !!(opt & OPT_D); + + xchdir(CDir); + //signal(SIGHUP, SIG_IGN); /* ? original crond dies on HUP... */ + xsetenv("SHELL", DEFAULT_SHELL); /* once, for all future children */ + crondlog(LVL9 "crond (busybox "BB_VER") started, log level %d", LogLevel); + + signal(SIGQUIT, SIG_DFL); + limit.rlim_cur= RLIM_INFINITY; + limit.rlim_max= RLIM_INFINITY; + setrlimit(RLIMIT_CORE, &limit); + + /* Ignore SIGPIPE, broken TCP sessions may trigger them */ + signal(SIGPIPE, SIG_IGN); + + /* Create libevent event base */ + EventBase= event_base_new(); + if (!EventBase) + { + crondlog(DIE9 "event_base_new failed"); /* exits */ + } + DnsBase= evdns_base_new(EventBase, 0 /*!initialize*/); + if (!DnsBase) + { + crondlog(DIE9 "evdns_base_new failed"); /* exits */ + } + + if (interface_name) + { + r= evdns_base_set_interface(DnsBase, interface_name); + if (r == -1) + { + event_base_free(EventBase); + crondlog(DIE9 "evdns_base_set_interface failed"); + /* exits */ + } + } + + r = evdns_base_resolv_conf_parse(DnsBase, DNS_OPTIONS_ALL, + resolv_conf); + if (r == -1) + { + event_base_free(EventBase); + crondlog(DIE9 "evdns_base_resolv_conf_parse failed"); /* exits */ + } + + fd= open(URANDOM_DEV, O_RDONLY); + + /* Best effort, just ignore errors */ + if (fd != -1) + { + read(fd, &seed, sizeof(seed)); + close(fd); + } + crondlog(LVL7 "using seed '%u'", seed); + srandom(seed); + + SynchronizeDir(); + + updateEventMin= event_new(EventBase, -1, EV_TIMEOUT|EV_PERSIST, + CheckUpdates, NULL); + if (!updateEventMin) + crondlog(DIE9 "event_new failed"); /* exits */ + tv.tv_sec= 60; + tv.tv_usec= 0; + event_add(updateEventMin, &tv); + + updateEventHour= event_new(EventBase, -1, EV_TIMEOUT|EV_PERSIST, + CheckUpdatesHour, NULL); + if (!updateEventHour) + crondlog(DIE9 "event_new failed"); /* exits */ + tv.tv_sec= 3600; + tv.tv_usec= 0; + event_add(updateEventHour, &tv); + + path = PidFileName ? PidFileName : "/var/run/crond.pid"; + if (!check_pidfile(path)) + crondlog(DIE9 "A process is still running"); + + r = write_pidfile(path); + if (r < 0) + crondlog(DIE9 "unable to write to PID file %s - %s", path, strerror(errno)); + +#if 0 + /* main loop - synchronize to 1 second after the minute, minimum sleep + * of 1 second. */ + { + time_t t1 = time(NULL); + time_t next; + time_t last_minutely= 0; + time_t last_hourly= 0; + int sleep_time = 10; /* AA previously 60 */ + for (;;) { + kick_watchdog(); + sleep(sleep_time); + + kick_watchdog(); + + if (t1 >= last_minutely + 60) + { + last_minutely= t1; + CheckUpdates(); + } + if (t1 >= last_hourly + 3600) + { + last_hourly= t1; + SynchronizeDir(); + } + { + sleep_time= 60; + if (do_kick_watchdog) + sleep_time= 10; + TestJobs(&next); + crondlog(LVL7 "got next %d, now %d", + next, time(NULL)); + if (!next) + { + crondlog(LVL7 "calling RunJobs at %d", + time(NULL)); + RunJobs(); + crondlog(LVL7 "RunJobs ended at %d", + time(NULL)); + sleep_time= 1; + } else if (next > t1 && next < t1+sleep_time) + sleep_time= next-t1; + if (CheckJobs() > 0) { + sleep_time = 10; + } + crondlog( + LVL7 "t1 = %d, next = %d, sleep_time = %d", + t1, next, sleep_time); + } + t1= time(NULL); + } + } +#endif + r= event_base_loop(EventBase, 0); + if (r != 0) + crondlog(LVL9 "event_base_loop failed"); + return 0; /* not reached */ +} + +#if SETENV_LEAKS +/* We set environment *before* vfork (because we want to use vfork), + * so we cannot use setenv() - repeated calls to setenv() may leak memory! + * Using putenv(), and freeing memory after unsetenv() won't leak */ +static void safe_setenv4(char **pvar_val, const char *var, const char *val /*, int len*/) +{ + const int len = 4; /* both var names are 4 char long */ + char *var_val = *pvar_val; + + if (var_val) { + var_val[len] = '\0'; /* nuke '=' */ + unsetenv(var_val); + free(var_val); + } + *pvar_val = xasprintf("%s=%s", var, val); + putenv(*pvar_val); +} +#endif + +static void do_distr(CronLine *line) +{ + long n, r, modulus, max; + + line->distr_offset.tv_sec= 0; /* Safe default */ + line->distr_offset.tv_usec= 0; + if (line->distribution == DISTR_UNIFORM) + { + /* Generate a random number in the range [0..distr_param] */ + modulus= line->distr_param+1; + n= LONG_MAX/modulus; + max= n*modulus; + do + { + r= random(); + } while (r >= max); + r %= modulus; + line->distr_offset.tv_sec= r - line->distr_param/2; + line->distr_offset.tv_usec= random() % 1000000; + } + crondlog(LVL7 "do_distr: using %f", line->distr_offset.tv_sec + + line->distr_offset.tv_usec/1e6); +} + +static void SynchronizeFile(const char *fileName) +{ + struct parser_t *parser; + struct stat sbuf; + int r, maxLines; + char *tokens[6]; +#if ENABLE_FEATURE_CROND_CALL_SENDMAIL + char *mailTo = NULL; +#endif + char *check0, *check1, *check2; + CronLine *line; + + if (!fileName) + return; + + for (line= LineBase; line; line= line->cl_Next) + line->needs_delete= 1; + + parser = config_open(fileName); + if (!parser) + { + /* We have to get rid of the old entries if the file is not + * there. Assume a non-existant file is the only reason for + * failure. + */ + DeleteFile(); + return; + } + + maxLines = (strcmp(fileName, "root") == 0) ? 65535 : MAXLINES; + + if (fstat(fileno(parser->fp), &sbuf) == 0 /* && sbuf.st_uid == DaemonUid */) { + int n; + + while (1) { + if (!--maxLines) + break; + n = config_read(parser, tokens, 6, 1, "# \t", PARSE_NORMAL | PARSE_KEEP_COPY); + if (!n) + break; + + if (DebugOpt) + crondlog(LVL5 "user:%s entry:%s", fileName, parser->data); + + /* check if line is setting MAILTO= */ + if (0 == strncmp(tokens[0], "MAILTO=", 7)) { +#if ENABLE_FEATURE_CROND_CALL_SENDMAIL + free(mailTo); + mailTo = (tokens[0][7]) ? xstrdup(&tokens[0][7]) : NULL; +#endif /* otherwise just ignore such lines */ + continue; + } + /* check if a minimum of tokens is specified */ + if (n < 6) + continue; + line = xzalloc(sizeof(*line)); + line->interval= strtoul(tokens[0], &check0, 10); + line->start_time= strtoul(tokens[1], &check1, 10); + line->end_time= strtoul(tokens[2], &check2, 10); + + if (line->interval <= 0 || + line->interval > MAX_INTERVAL || + check0[0] != '\0' || + check1[0] != '\0' || + check2[0] != '\0') + { + crondlog(LVL9 "bad crontab line"); + free(line); + continue; + } + + if (strcmp(tokens[3], "NONE") == 0) + { + line->distribution= DISTR_NONE; + } + else if (strcmp(tokens[3], "UNIFORM") == 0) + { + line->distribution= DISTR_UNIFORM; + line->distr_param= + strtoul(tokens[4], &check0, 10); + if (check0[0] != '\0') + { + crondlog(LVL9 "bad crontab line"); + free(line); + continue; + } + if (line->distr_param == 0 || + LONG_MAX/line->distr_param == 0) + { + line->distribution= DISTR_NONE; + } + } + + line->lasttime= 0; +#if ENABLE_FEATURE_CROND_CALL_SENDMAIL + /* copy mailto (can be NULL) */ + line->cl_MailTo = xstrdup(mailTo); +#endif + /* copy command */ + line->cl_Shell = xstrdup(tokens[5]); + if (DebugOpt) { + crondlog(LVL5 " command:%s", tokens[5]); + } +//bb_error_msg("M[%s]F[%s][%s][%s][%s][%s][%s]", mailTo, tokens[0], tokens[1], tokens[2], tokens[3], tokens[4], tokens[5]); + + evtimer_assign(&line->event, EventBase, RunJob, line); + + r= Insert(line); + if (!r) + { + /* Existing line. Delete new one */ +#if ENABLE_FEATURE_CROND_CALL_SENDMAIL + free(line->cl_MailTo); +#endif + free(line->cl_Shell); + free(line); + continue; + } + + /* New line, should schedule start event */ + Start(line); + + kick_watchdog(); + } + + if (maxLines == 0) { + crondlog(WARN9 "user %s: too many lines", fileName); + } + } + config_close(parser); + + DeleteFile(); +} + +static void check_resolv_conf(void) +{ + static time_t last_time= -1; + + int r; + FILE *fn; + struct stat sb; + + r= stat(resolv_conf, &sb); + if (r == -1) + { + crondlog(LVL8 "error accessing resolv.conf: %s", + strerror(errno)); + return; + } + + if (sb.st_mtime == last_time) + return; /* resolv.conf did not change */ + evdns_base_clear_nameservers_and_suspend(DnsBase); + r= evdns_base_resolv_conf_parse(DnsBase, DNS_OPTIONS_ALL, + resolv_conf); + evdns_base_resume(DnsBase); + + if ((r != 0 || last_time != -1) && out_filename) + { + fn= fopen(out_filename, "a"); + if (!fn) + crondlog(DIE9 "unable to append to '%s'", out_filename); + fprintf(fn, "RESULT { "); + if (atlas_id) + fprintf(fn, DBQ(id) ":" DBQ(%s) ", ", atlas_id); + fprintf(fn, "%s, " DBQ(time) ":%ld, ", + atlas_get_version_json_str(), (long)time(NULL)); + fprintf(fn, DBQ(event) ": " DBQ(load resolv.conf) + ", " DBQ(result) ": %d", r); + + fprintf(fn, " }\n"); + fclose(fn); + } + + last_time= sb.st_mtime; +} + +static void CheckUpdates(evutil_socket_t __attribute__ ((unused)) fd, + short __attribute__ ((unused)) what, + void __attribute__ ((unused)) *arg) +{ + FILE *fi; + char buf[256]; + + fi = fopen_for_read(CRONUPDATE); + if (fi != NULL) { + unlink(CRONUPDATE); + while (fgets(buf, sizeof(buf), fi) != NULL) { + /* use first word only */ + SynchronizeFile(strtok(buf, " \t\r\n")); + } + fclose(fi); + } + + check_resolv_conf(); +} + +static void CheckUpdatesHour(evutil_socket_t __attribute__ ((unused)) fd, + short __attribute__ ((unused)) what, + void __attribute__ ((unused)) *arg) +{ + SynchronizeDir(); +} + +static void SynchronizeDir(void) +{ + /* + * Remove cron update file + * + * Re-chdir, in case directory was renamed & deleted, or otherwise + * screwed up. + * + * Only load th crontab for 'root' + */ + unlink(CRONUPDATE); + if (chdir(CDir) < 0) { + crondlog(DIE9 "can't chdir(%s)", CDir); + } + + SynchronizeFile("root"); + DeleteFile(); +} + +static void set_timeout(CronLine *line, int init_next_cycle) +{ + struct timeval now, tv; + + gettimeofday(&now, NULL); + if (now.tv_sec > line->end_time) + return; /* This job has expired */ + + if (init_next_cycle) + { + if (now.tv_sec < line->start_time) + line->nextcycle= 0; + else + { + line->nextcycle= (now.tv_sec-line->start_time)/ + line->interval + 1; + } + do_distr(line); + } + + tv.tv_sec= line->nextcycle*line->interval + line->start_time + + line->distr_offset.tv_sec - now.tv_sec; + tv.tv_usec= line->distr_offset.tv_usec - now.tv_usec; + if (tv.tv_usec < 0) + { + tv.tv_usec += 1e6; + tv.tv_sec--; + } + if (tv.tv_sec < 0) + tv.tv_sec= tv.tv_usec= 0; + crondlog(LVL7 "set_timeout: nextcycle %d, interval %d, start_time %d, distr_offset %f, now %d, tv_sec %d", + line->nextcycle, line->interval, + line->start_time, + line->distr_offset.tv_sec + line->distr_offset.tv_usec/1e6, + now.tv_sec, tv.tv_sec); + line->debug_cycle= line->nextcycle; + line->debug_generated= now.tv_sec; + line->nexttime= line->nextcycle*line->interval + line->start_time + + line->distr_offset.tv_sec; + line->waittime= tv.tv_sec; + event_add(&line->event, &tv); +} + +/* + * Insert - insert if not already there + */ +static int Insert(CronLine *line) +{ + CronLine *last; + + if (oldLine) + { + /* Try to match line expected to be next */ + if (oldLine->interval == line->interval && + oldLine->start_time == line->start_time && + strcmp(oldLine->cl_Shell, line->cl_Shell) == 0) + { + crondlog(LVL9 "next line matches"); + ; /* okay */ + } + else + oldLine= NULL; + } + + if (!oldLine) + { + /* Try to find one */ + for (last= NULL, oldLine= LineBase; oldLine; + last= oldLine, oldLine= oldLine->cl_Next) + { + if (oldLine->interval == line->interval && + oldLine->start_time == line->start_time && + strcmp(oldLine->cl_Shell, line->cl_Shell) == 0) + { + break; + } + } + } + + if (oldLine) + { + crondlog(LVL7 "Insert: found match for line '%s'", + line->cl_Shell); + crondlog(LVL7 "Insert: setting end time to %d", line->end_time); + oldLine->end_time= line->end_time; + oldLine->needs_delete= 0; + + /* Reschedule event */ + set_timeout(oldLine, 0 /*!init_netcycle*/); + + return 0; + } + + crondlog(LVL7 "found no match for line '%s'", line->cl_Shell); + line->cl_Next= NULL; + if (last) + last->cl_Next= line; + else + LineBase= line; + return 1; +} + +static void Start(CronLine *line) +{ + line->testops= NULL; + + /* Parse command line and init test */ + atlas_init(line); + if (!line->testops) + return; /* Test failed to initialize */ + + set_timeout(line, 1 /*init_nextcycle*/); +} + +/* + * DeleteFile() - delete user database + * + * Note: multiple entries for same user may exist if we were unable to + * completely delete a database due to running processes. + */ +static void DeleteFile(void) +{ + int r; + CronLine **pline = &LineBase; + CronLine *line; + + oldLine= NULL; + + while ((line = *pline) != NULL) { + if (!line->needs_delete) + { + pline= &line->cl_Next; + continue; + } + kick_watchdog(); + if (!line->teststate) + { + crondlog(LVL8 "DeleteFile: no state to delete for '%s'", + line->cl_Shell); + } + if (line->testops && line->teststate) + { + r= line->testops->delete(line->teststate); + if (r != 1) + { + crondlog(LVL9 "DeleteFile: line is busy"); + pline= &line->cl_Next; + continue; + } + line->testops= NULL; + line->teststate= NULL; + } + event_del(&line->event); + free(line->cl_Shell); + line->cl_Shell= NULL; + + *pline= line->cl_Next; + free(line); + } +} + +static void skip_space(char *cp, char **ncpp) +{ + while (cp[0] != '\0' && isspace(*(unsigned char *)cp)) + cp++; + *ncpp= cp; +} + +static void skip_nonspace(char *cp, char **ncpp) +{ + while (cp[0] != '\0' && !isspace(*(unsigned char *)cp)) + cp++; + *ncpp= cp; +} + +static void find_eos(char *cp, char **ncpp) +{ + while (cp[0] != '\0' && cp[0] != '"') + cp++; + *ncpp= cp; +} + +static struct builtin +{ + const char *cmd; + struct testops *testops; +} builtin_cmds[]= +{ + { "evhttpget", &httpget_ops }, + { "evntp", &ntp_ops }, + { "evping", &ping_ops }, +#if ENABLE_EVSSLGETCERT + { "evsslgetcert", &sslgetcert_ops }, +#endif + { "evtdig", &tdig_ops }, + { "evtraceroute", &traceroute_ops }, + { "condmv", &condmv_ops }, + { NULL, NULL } +}; + + +#define ATLAS_NARGS 64 /* Max arguments to a built-in command */ +#define ATLAS_ARGSIZE 512 /* Max size of the command line */ + +static void atlas_init(CronLine *line) +{ + char c; + int i, argc; + size_t len; + char *cp, *ncp; + struct builtin *bp; + char *cmdline, *p; + const char *reason; + void *state; + FILE *fn; + char *argv[ATLAS_NARGS]; + char args[ATLAS_ARGSIZE]; + + cmdline= line->cl_Shell; + crondlog(LVL7 "atlas_run: looking for %p '%s'", cmdline, cmdline); + + state= NULL; + reason= NULL; + for (bp= builtin_cmds; bp->cmd != NULL; bp++) + { + len= strlen(bp->cmd); + if (strncmp(cmdline, bp->cmd, len) != 0) + continue; + if (cmdline[len] != ' ') + continue; + break; + } + if (bp->cmd == NULL) + { + reason="command not found"; + goto error; + } + + crondlog(LVL7 "found cmd '%s' for '%s'", bp->cmd, cmdline); + + len= strlen(cmdline); + if (len+1 > ATLAS_ARGSIZE) + { + crondlog(LVL8 "atlas_run: command line too big: '%s'", cmdline); + reason="command line too big"; + goto error; + } + strcpy(args, cmdline); + + /* Split the command line */ + cp= args; + argc= 0; + argv[argc]= cp; + skip_nonspace(cp, &ncp); + cp= ncp; + + for(;;) + { + /* End of list */ + if (cp[0] == '\0') + { + argc++; + break; + } + + /* Find start of next argument */ + skip_space(cp, &ncp); + + /* Terminate current one */ + cp[0]= '\0'; + argc++; + + if (argc >= ATLAS_NARGS-1) + { + crondlog( + LVL8 "atlas_run: command line '%s', too many arguments", + cmdline); + reason="too many arguments"; + goto error; + } + + cp= ncp; + argv[argc]= cp; + if (cp[0] == '"') + { + /* Special code for strings */ + find_eos(cp+1, &ncp); + if (ncp[0] != '"') + { + crondlog( + LVL8 "atlas_run: command line '%s', end of string not found", + cmdline); + reason="end of string not found"; + goto error; + } + argv[argc]= cp+1; + cp= ncp; + cp[0]= '\0'; + cp++; + } + else + { + skip_nonspace(cp, &ncp); + cp= ncp; + } + } + + if (argc >= ATLAS_NARGS) + { + crondlog( + LVL8 "atlas_run: command line '%s', too many arguments", + cmdline); + reason="too many arguments"; + goto error; + } + argv[argc]= NULL; + + for (i= 0; itestops->init(argc, argv, 0); + crondlog(LVL7 "init returned %p for '%s'", state, line->cl_Shell); + line->teststate= state; + line->testops= bp->testops; + +error: + if (state == NULL && out_filename) + { + fn= fopen(out_filename, "a"); + if (!fn) + crondlog(DIE9 "unable to append to '%s'", out_filename); + fprintf(fn, "RESULT { "); + if (atlas_id) + fprintf(fn, DBQ(id) ":" DBQ(%s) ", ", atlas_id); + fprintf(fn, "%s, " DBQ(time) ":%ld, ", + atlas_get_version_json_str(), (long)time(NULL)); + if (reason) + fprintf(fn, DBQ(reason) ":" DBQ(%s) ", ", reason); + fprintf(fn, DBQ(cmd) ": \""); + for (p= line->cl_Shell; *p; p++) + { + c= *p; + if (c == '"' || c == '\\') + fprintf(fn, "\\%c", c); + else if (isprint_asciionly((unsigned char)c)) + fputc(c, fn); + else + fprintf(fn, "\\u%04x", (unsigned char)c); + } + fprintf(fn, "\""); + fprintf(fn, " }\n"); + fclose(fn); + } +} + +#if ENABLE_FEATURE_CROND_CALL_SENDMAIL + +// TODO: sendmail should be _run-time_ option, not compile-time! + +static void +ForkJob(const char *user, CronLine *line, int mailFd, + const char *prog, const char *cmd, const char *arg, + const char *mail_filename) +{ + struct passwd *pas; + pid_t pid; + + /* prepare things before vfork */ + pas = getpwnam(user); + if (!pas) { + crondlog(LVL9 "can't get uid for %s", user); + goto err; + } + SetEnv(pas); + + pid = vfork(); + if (pid == 0) { + /* CHILD */ + /* change running state to the user in question */ + ChangeUser(pas); + if (DebugOpt) { + crondlog(LVL5 "child running %s", prog); + } + if (mailFd >= 0) { + xmove_fd(mailFd, mail_filename ? 1 : 0); + dup2(1, 2); + } + /* crond 3.0pl1-100 puts tasks in separate process groups */ + bb_setpgrp(); + execlp(prog, prog, cmd, arg, NULL); + crondlog(ERR20 "can't exec, user %s cmd %s %s %s", user, prog, cmd, arg); + if (mail_filename) { + fdprintf(1, "Exec failed: %s -c %s\n", prog, arg); + } + _exit(EXIT_SUCCESS); + } + + line->cl_Pid = pid; + if (pid < 0) { + /* FORK FAILED */ + crondlog(ERR20 "can't vfork"); + err: + line->cl_Pid = 0; + if (mail_filename) { + unlink(mail_filename); + } + } else if (mail_filename) { + /* PARENT, FORK SUCCESS + * rename mail-file based on pid of process + */ + char mailFile2[128]; + + snprintf(mailFile2, sizeof(mailFile2), "%s/cron.%s.%d", TMPDIR, user, pid); + rename(mail_filename, mailFile2); // TODO: xrename? + } + + /* + * Close the mail file descriptor.. we can't just leave it open in + * a structure, closing it later, because we might run out of descriptors + */ + if (mailFd >= 0) { + close(mailFd); + } +} + +static void RunJob(const char *user, CronLine *line) +{ + char mailFile[128]; + int mailFd = -1; + + line->cl_Pid = 0; + line->cl_MailFlag = 0; + + if (line->cl_MailTo) { + /* open mail file - owner root so nobody can screw with it. */ + snprintf(mailFile, sizeof(mailFile), "%s/cron.%s.%d", TMPDIR, user, getpid()); + mailFd = open(mailFile, O_CREAT | O_TRUNC | O_WRONLY | O_EXCL | O_APPEND, 0600); + + if (mailFd >= 0) { + line->cl_MailFlag = 1; + fdprintf(mailFd, "To: %s\nSubject: cron: %s\n\n", line->cl_MailTo, + line->cl_Shell); + line->cl_MailPos = lseek(mailFd, 0, SEEK_CUR); + } else { + crondlog(ERR20 "cannot create mail file %s for user %s, " + "discarding output", mailFile, user); + } + } + + + if (atlas_outfile && atlas_run(line->cl_Shell)) + { + /* Internal command */ + return; + } + + ForkJob(user, line, mailFd, DEFAULT_SHELL, "-c", line->cl_Shell, mailFile); +} + +/* + * EndJob - called when job terminates and when mail terminates + */ +static void EndJob(const char *user, CronLine *line) +{ + int mailFd; + char mailFile[128]; + struct stat sbuf; + + /* No job */ + if (line->cl_Pid <= 0) { + line->cl_Pid = 0; + return; + } + + /* + * End of job and no mail file + * End of sendmail job + */ + snprintf(mailFile, sizeof(mailFile), "%s/cron.%s.%d", TMPDIR, user, line->cl_Pid); + line->cl_Pid = 0; + + if (line->cl_MailFlag == 0) { + return; + } + line->cl_MailFlag = 0; + + /* + * End of primary job - check for mail file. If size has increased and + * the file is still valid, we sendmail it. + */ + mailFd = open(mailFile, O_RDONLY); + unlink(mailFile); + if (mailFd < 0) { + return; + } + + if (fstat(mailFd, &sbuf) < 0 || sbuf.st_uid != DaemonUid + || sbuf.st_nlink != 0 || sbuf.st_size == line->cl_MailPos + || !S_ISREG(sbuf.st_mode) + ) { + close(mailFd); + return; + } + if (line->cl_MailTo) + ForkJob(user, line, mailFd, SENDMAIL, SENDMAIL_ARGS, NULL); +} + +#else /* crond without sendmail */ + +static void RunJob(evutil_socket_t __attribute__ ((unused)) fd, + short __attribute__ ((unused)) what, void *arg) +{ + char c; + char *p; + CronLine *line; + struct timeval now; + FILE *fn; + + line= arg; + + crondlog(LVL7 "RunJob for %p, '%s'\n", arg, line->cl_Shell); + + if (line->needs_delete) + { + crondlog(LVL7 "RunJob: needs delete\n"); + return; /* Line is to be deleted */ + } + + gettimeofday(&now, NULL); + + crondlog(LVL7 "RubJob, now %d, end_time %d\n", now.tv_sec, + line->end_time); + if (now.tv_sec < line->nexttime-10 || now.tv_sec > line->nexttime+10) + { + if (out_filename) + { + fn= fopen(out_filename, "a"); + if (!fn) + { + crondlog(DIE9 "unable to append to '%s'", + out_filename); + } + fprintf(fn, "RESULT { "); + if (atlas_id) + fprintf(fn, DBQ(id) ":" DBQ(%s) ", ", atlas_id); + fprintf(fn, "%s, " DBQ(time) ":%ld, ", + atlas_get_version_json_str(), (long)time(NULL)); + fprintf(fn, DBQ(reason) ": " + DBQ(inconsistent time; now %d; nexttime %d; waittime %d; cycle %d; generated %d) ", ", + (int)now.tv_sec, (int)line->nexttime, + (int)line->waittime, (int)line->debug_cycle, + (int)line->debug_generated); + + fprintf(fn, DBQ(cmd) ": \""); + for (p= line->cl_Shell; *p; p++) + { + c= *p; + if (c == '"' || c == '\\') + fprintf(fn, "\\%c", c); + else if (isprint_asciionly((unsigned char)c)) + fputc(c, fn); + else + fprintf(fn, "\\u%04x", (unsigned char)c); + } + fprintf(fn, "\""); + fprintf(fn, " }\n"); + fclose(fn); + } + crondlog( + LVL7 "RunJob: weird, now %d, nexttime %d, waittime %d\n", + now.tv_sec, line->nexttime, line->waittime); + + /* Recompute nextcycle */ + set_timeout(line, 1 /*init_next_cycle*/); + return; + } + + if (now.tv_sec > line->end_time) + { + crondlog(LVL7 "RunJob: expired\n"); + return; /* This job has expired */ + } + + if (!line->teststate) + { + crondlog(LVL8 "not starting cmd '%s' (not init)\n", + line->cl_Shell); + return; + } + + line->testops->start(line->teststate); + + line->nextcycle++; + if (line->start_time + line->nextcycle*line->interval < now.tv_sec) + { + crondlog(LVL7 "recomputing nextcycle"); + line->nextcycle= (now.tv_sec-line->start_time)/line->interval + + 1; + } + + do_distr(line); + + set_timeout(line, 0 /*!init_nextcycle*/); +} + +#endif /* ENABLE_FEATURE_CROND_CALL_SENDMAIL */ diff --git a/probe-busybox/eperd/eperd.h b/probe-busybox/eperd/eperd.h new file mode 100644 index 00000000..c7d55bf0 --- /dev/null +++ b/probe-busybox/eperd/eperd.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * eperd.h + */ + +typedef struct CronLine CronLine; + +struct globals { + unsigned LogLevel; /* = 8; */ + const char *LogFile; + const char *CDir; /* = CRONTABS; */ + CronLine *LineBase; + CronLine *oldLine; + unsigned instance_id; + struct event_base *EventBase; + struct evdns_base *DnsBase; +}; +extern struct globals G; +#define LogLevel (G.LogLevel ) +#define LogFile (G.LogFile ) +#define CDir (G.CDir ) +#define LineBase (G.LineBase ) +#define FileBase (G.FileBase ) +#define oldLine (G.oldLine ) +#define instance_id (G.instance_id ) +#define EventBase (G.EventBase ) +#define DnsBase (G.DnsBase ) + +#define TRT_ICMP4_INSTANCE_ID_SHIFT 12 +#define TRT_ICMP4_INSTANCE_ID_MASK 0xf000 + +#define LVL5 "\x05" +#define LVL7 "\x07" +#define LVL8 "\x08" +#define LVL9 "\x09" +#define WARN9 "\x49" +#define DIE9 "\xc9" +/* level >= 20 is "error" */ +#define ERR20 "\x14" + +struct testops +{ + void *(*init)(int argc, char *argv[], + void (*done)(void *teststate, int error)); + void (*start)(void *teststate); + int (*delete)(void *teststate); +}; + +extern struct testops condmv_ops; +extern struct testops httpget_ops; +extern struct testops ntp_ops; +extern struct testops ping_ops; +extern struct testops sslgetcert_ops; +extern struct testops tdig_ops; +extern struct testops tlsscan_ops; +extern struct testops traceroute_ops; + +void crondlog(const char *ctl, ...); diff --git a/probe-busybox/eperd/evhttpget.c b/probe-busybox/eperd/evhttpget.c new file mode 100644 index 00000000..5d8b6ceb --- /dev/null +++ b/probe-busybox/eperd/evhttpget.c @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under the GPL v2 or later, see the file LICENSE in this tarball. + * Standalone version of the event-based httpget. + */ +//config:config EVHTTPGET +//config: bool "evhttpget" +//config: default n +//config: help +//config: standalone version of event-driven httpget + +//config:config FEATURE_EVHTTPGET_HTTPS +//config: bool "Enable https support" +//config: default n +//config: depends on EVHTTPGET +//config: help +//config: Enable https:// support for httpget + +//applet:IF_EVHTTPGET(APPLET(evhttpget, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_EVHTTPGET) += evhttpget.o + +//usage:#define evhttpget_trivial_usage +//usage: "[-ac0146] [--all [--combine]] [--get|--head|--post]" +//usage: "\n\t[--host ] [--post-file ] " +//usage: "[--post-header ]\n\t[--post-footer ] " +//usage: "[-read-limit ]" +//usage: "\n\t[--store-headers ] [--sni ] " +//usage: "[--timeout ] " +//usage: "[--user-agent ]\n\t[--etim] [--etim] [-I interface] " +//usage: "[-A ] [-b ]\n\t[-O ] " +//usage: "[-R ] [-W ]" +//usage:#define evhttpget_full_usage "\n\n" +//usage: "\nOptions:" +//usage: "\n -a --all Report on all addresses" +//usage: "\n -c --combine Combine the reports for all address in one JSON" +//usage: "\n --get GET method" +//usage: "\n --head HEAD method" +//usage: "\n --post POST mehod" +//usage: "\n --host Host header" +//usage: "\n --post-file File to post" +//usage: "\n --post-header File to post (comes first)" +//usage: "\n --post-footer File to post (comes last)" +//usage: "\n --read-limit Amount of body to read" +//usage: "\n --store-body Number of bytes of the body to store" +//usage: "\n --store-headers Number of bytes of the header to store" +//usage: "\n --sni Service Name Identification" +//usage: "\n --timeout Time (in ms) to wait between read calls" +//usage: "\n --user-agent User agent header" +//usage: "\n --etim Extended timings" +//usage: "\n --eetim Extended extended timings" +//usage: "\n -0 HTTP/1.0" +//usage: "\n -1 HTTP/1.1" +//usage: "\n -I Outgoing interface" +//usage: "\n -A Atlas ID" +//usage: "\n -b Bundle ID" +//usage: "\n -O Output file" +//usage: "\n -R Response in file" +//usage: "\n -W Response out file" +//usage: "\n -4 Only IPv4 addresses" +//usage: "\n -6 Only IPv6 addresses" + +#include "libbb.h" +#include +#include +#include +#include + +#include "eperd.h" + +static void done(void *state UNUSED_PARAM, int error) +{ + exit(error); +} + +int evhttpget_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int evhttpget_main(int argc UNUSED_PARAM, char **argv) +{ + int r; + void *state; + + /* Create libevent event base */ + EventBase= event_base_new(); + if (!EventBase) + { + fprintf(stderr, "evhttpget_base_new failed\n"); + exit(1); + } + DnsBase= evdns_base_new(EventBase, 1 /*initialize*/); + if (!DnsBase) + { + fprintf(stderr, "evdns_base_new failed\n"); + exit(1); + } + + state= httpget_ops.init(argc, argv, done); + if (!state) + { + fprintf(stderr, "evhttpget: httpget_ops.init failed\n"); + exit(1); + } + httpget_ops.start(state); + + r= event_base_loop(EventBase, 0); + if (r != 0) + { + fprintf(stderr, "evhttpget: event_base_loop failed\n"); + exit(1); + } + return 0; /* not reached */ +} + diff --git a/probe-busybox/eperd/evntp.c b/probe-busybox/eperd/evntp.c new file mode 100644 index 00000000..130ea6cc --- /dev/null +++ b/probe-busybox/eperd/evntp.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * Standalone version of the event-based ntp. + */ +//config:config EVNTP +//config: bool "evntp" +//config: default n +//config: help +//config: standalone version of event-driven ntp + +//applet:IF_EVNTP(APPLET(evntp, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_EVNTP) += evntp.o + +//usage:#define evntp_trivial_usage +//usage: "-[46] [-c ] [-i ] [-w ]" +//usage: "\n\t[-A ] [-B ] [-O ]" +//usage: "\n\t[-R ] [-W ] [-s ] " +//usage: "\n" +//usage:#define evntp_full_usage +//usage: "\nOptions:" +//usage: "\n -4 IPv4" +//usage: "\n -6 IPv6" +//usage: "\n -c Number of packets" +//usage: "\n -i Outgoing interface" +//usage: "\n -w Time to wait for reply" +//usage: "\n -A Atlas measurement ID" +//usage: "\n -B bundle ID" +//usage: "\n -O Output file name" +//usage: "\n -R Read response from a file" +//usage: "\n -W Write responses to a file" +//usage: "\n -s Additional size added to request" +//usage: "\n" + +#include "libbb.h" +#include +#include +#include +#include + +#include "eperd.h" + +static void done(void *state UNUSED_PARAM, int error) +{ + exit(error); +} + +int evntp_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int evntp_main(int argc UNUSED_PARAM, char **argv) +{ + int r; + void *state; + + /* Create libevent event base */ + EventBase= event_base_new(); + if (!EventBase) + { + fprintf(stderr, "evntp: event_base_new failed\n"); + exit(1); + } + DnsBase= evdns_base_new(EventBase, 1 /*initialize*/); + if (!DnsBase) + { + fprintf(stderr, "evdns_base_new failed\n"); + exit(1); + } + + state= ntp_ops.init(argc, argv, done); + if (!state) + { + fprintf(stderr, "evntp: ntp_ops.init failed\n"); + exit(1); + } + ntp_ops.start(state); + + r= event_base_loop(EventBase, 0); + if (r != 0) + { + fprintf(stderr, "evntp: event_base_loop failed\n"); + exit(1); + } + return 0; /* not reached */ +} + diff --git a/probe-busybox/eperd/evping.c b/probe-busybox/eperd/evping.c new file mode 100644 index 00000000..3957c26f --- /dev/null +++ b/probe-busybox/eperd/evping.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * Standalone version of the event-based ping. + */ +//config:config EVPING +//config: bool "evping" +//config: default n +//config: help +//config: standalone version of event-driven ping + +//applet:IF_EVPING(APPLET(evping, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_EVPING) += evping.o + +//usage:#define evping_trivial_usage +//usage: "-[46ep] [-c ] [-s ] [-A ] " +//usage: "[-B \n\t[-O ] [-i ] " +//usage: "[-I ] [-R ]\n\t[-W ] " +//usage: "" +//usage:#define evping_full_usage "\n\n" +//usage: "\nOptions:" +//usage: "\n -4 IPv4" +//usage: "\n -6 IPv6" +//usage: "\n -e use the libc stub resolver" +//usage: "\n -r use the libevent resolver (default)" +//usage: "\n -c Number of packets" +//usage: "\n -s Size" +//usage: "\n -A Atlas measurement ID" +//usage: "\n -B bundle ID" +//usage: "\n -O Output file name" +//usage: "\n -i Inter packet interval" +//usage: "\n -I Outgoing interface" +//usage: "\n -R Read response from a file" +//usage: "\n -W Write responses to a file" +//usage: "\n" + +#include "libbb.h" +#include +#include +#include +#include + +#include "eperd.h" + +static void done(void *state UNUSED_PARAM, int error) +{ + exit(error); +} + +int evping_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int evping_main(int argc UNUSED_PARAM, char **argv) +{ + int r; + void *state; + + /* Create libevent event base */ + EventBase= event_base_new(); + if (!EventBase) + { + fprintf(stderr, "evping_base_new failed\n"); + exit(1); + } + DnsBase= evdns_base_new(EventBase, 1 /*initialize*/); + if (!DnsBase) + { + fprintf(stderr, "evdns_base_new failed\n"); + exit(1); + } + + + state= ping_ops.init(argc, argv, done); + if (!state) + { + fprintf(stderr, "evping_ops.init failed\n"); + exit(1); + } + ping_ops.start(state); + + r= event_base_loop(EventBase, 0); + if (r != 0) + { + fprintf(stderr, "evping_base_loop failed\n"); + exit(1); + } + return 0; /* not reached */ +} + diff --git a/probe-busybox/eperd/evsslgetcert.c b/probe-busybox/eperd/evsslgetcert.c new file mode 100644 index 00000000..5d1c4022 --- /dev/null +++ b/probe-busybox/eperd/evsslgetcert.c @@ -0,0 +1,81 @@ +/* Standalone version of the event-based sslgetcert. */ +//config:config EVSSLGETCERT +//config: bool "evsslgetcert" +//config: default n +//config: help +//config: standalone version of event-driven sslgetcert + +//applet:IF_EVSSLGETCERT(APPLET(evsslgetcert, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_EVSSLGETCERT) += evsslgetcert.o + +//usage:#define evsslgetcert_trivial_usage +//usage: "-[46] [-A ] [-B ] [-h ]" +//usage: "\n\t[-O ] [-R ] [-V ] " +//usage: "\n\t[-W ] [-i ] [-p ] " +//usage: "\n" +//usage:#define evsslgetcert_full_usage +//usage: "\nOptions:" +//usage: "\n -4 IPv4" +//usage: "\n -6 IPv6" +//usage: "\n -A Atlas measurement ID" +//usage: "\n -B bundle ID" +//usage: "\n -h Host name for SNI" +//usage: "\n -O Output file name" +//usage: "\n -R Read response from a file" +//usage: "\n -V Client TLS version" +//usage: "\n -W Write responses to a file" +//usage: "\n -i Outgoing interface" +//usage: "\n -p TCP port of service" +//usage: "\n" + +#include "libbb.h" +#include +#include +#include +#include + +#include "eperd.h" + +static void done(void *state UNUSED_PARAM, int error) +{ + exit(error); +} + +int evsslgetcert_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int evsslgetcert_main(int argc UNUSED_PARAM, char **argv) +{ + int r; + void *state; + + /* Create libevent event base */ + EventBase= event_base_new(); + if (!EventBase) + { + fprintf(stderr, "evsslgetcert_base_new failed\n"); + exit(1); + } + DnsBase= evdns_base_new(EventBase, 1 /*initialize*/); + if (!DnsBase) + { + fprintf(stderr, "evdns_base_new failed\n"); + exit(1); + } + + state= sslgetcert_ops.init(argc, argv, done); + if (!state) + { + fprintf(stderr, "evsslgetcert: sslgetcert_ops.init failed\n"); + exit(1); + } + sslgetcert_ops.start(state); + + r= event_base_loop(EventBase, 0); + if (r != 0) + { + fprintf(stderr, "evsslgetcert: event_base_loop failed\n"); + exit(1); + } + return 0; /* not reached */ +} + diff --git a/probe-busybox/eperd/evtdig.c b/probe-busybox/eperd/evtdig.c new file mode 100644 index 00000000..06cb67f6 --- /dev/null +++ b/probe-busybox/eperd/evtdig.c @@ -0,0 +1,4239 @@ +/* + * Copyright (c) 2011-2014 RIPE NCC + * Copyright (c) 2009 Rocco Carbone + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ +//config:config EVTDIG +//config: bool "evtdig" +//config: default n +//config: depends on EPERD +//config: help +//config: tiny dig event driven version. support only limited queries id.sever +//config: txt chaos. RIPE NCC 2011 + +//config:config FEATURE_EVTDIG_TLS +//config: bool "Enable DNS over TLS." +//config: default n +//config: depends on EVTDIG +//config: help +//config: Enable DNS over TLS. 2016 IETF Dprive draft + +//config:config FEATURE_EVTDIG_DEBUG +//config: bool "Enable debug support in evtdig" +//config: default n +//config: depends on EVTDIG +//config: help +//config: extra debug info. Also may cause segfault or/and memory leak. Add at your own risk. + +//applet:IF_EVTDIG(APPLET(evtdig, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_EVTDIG) += evtdig.o + +//usage:#define evtdig_trivial_usage +//usage: "-[46adtbhinqRr]" +//usage: "[-A ][-B ][-I ]" +//usage: "\n\t[--ad]" +//usage: "[--cd]" +//usage: "[--client-subnet]" +//usage: "[--cookies]" +//usage: "[--do]" +//usage: "\n\t[--edns0 ]" +//usage: "[--edns-flags ]" +//usage: "[--edns-option ]" +//usage: "\n\t[--edns-version ]" +//usage: "[--https]" +//usage: "[--https-path ]" +//usage: "\n\t[--ipv6-dest-option ]" +//usage: "[--noabuf]" +//usage: "[--nsid]" +//usage: "[--out-file ]" +//usage: "\n\t[--port ]" +//usage: "[--p_probe_id]" +//usage: "[--qbuf]" +//usage: "[--read-response ]" +//usage: "\n\t[--resolv]" +//usage: "[--retry ]" +//usage: "[--sni-cert-name ]" +//usage: "\n\t[--timeout ]" +//usage: "[--tls]" +//usage: "[--ttl]" +//usage: "[--write-response ]" +//usage: "\n\t[--type ][--class ][--query ]" +//usage: "\n\t[--a ]" +//usage: "[--aaaa ]" +//usage: "[--any ]" +//usage: "[--afsdb ]" +//usage: "\n\t[--apl ]" +//usage: "[--axfr ]" +//usage: "[--caa ]" +//usage: "[--cert ]" +//usage: "\n\t[--cname ]" +//usage: "[--dlv ]" +//usage: "[--dname ]" +//usage: "[--dnskey ]" +//usage: "\n\t[--ds ]" +//usage: "[--ipseckey ]" +//usage: "[--loc ]" +//usage: "\n\t[--mx ]" +//usage: "[--naptr ]" +//usage: "[--ns ]" +//usage: "[--nsec ]" +//usage: "\n\t[--nsec3 ]" +//usage: "[--nsec3param ]" +//usage: "[--ptr ]" +//usage: "[--rrsig ]" +//usage: "\n\t[--rp ]" +//usage: "[--soa ]" +//usage: "[--sig ]" +//usage: "[--spf ]" +//usage: "\n\t[--sshfp ]" +//usage: "[--srv ]" +//usage: "[--ta ]" +//usage: "[--tlsa ]" +//usage: "\n\t[--txt ]" +//usage: "\n\t[--hostname.bind]" +//usage: "[--id.server]" +//usage: "[--version.bind]" +//usage: "[--version.server]" +//usage: "\n\t[]" +//usage:#define evtdig_full_usage "\n\n" +//usage: "\nOptions:" +//usage: "\n\t-4 Restrict to IPv4" +//usage: "\n\t-6 Restrict to IPv6" +//usage: "\n\t-a Both IPv4 and IPv6" +//usage: "\n\t-R Set recursion desired flag" +//usage: "\n\t-t Use TCP" +//usage: "\n\t-A Atlas measurement ID" +//usage: "\n\t-B Atlas bundle ID" +//usage: "\n\t-I Outgoing interface" +//usage: "\n\t--ad Set AD (Authentic Data) bit" +//usage: "\n\t--cd Set CD (Checking Disabled) bit" +//usage: "\n\t--client-subnet Include edns-client-subnet option" +//usage: "\n\t--cookies Send and receive cookies" +//usage: "\n\t-d|--do Set DO (DNSSEC answer OK) bit" +//usage: "\n\t-e|--edns0 Max. UDP message size" +//usage: "\n\t--edns-flags EDNS flags field" +//usage: "\n\t--edns-option Include empty EDNS option" +//usage: "\n\t--edns-version Set EDNS version" +//usage: "\n\t--https Connect using HTTPS (HTTP2 over TLS)" +//usage: "\n\t--https-path Path for HTTPS (default /dns-query)" +//usage: "\n\t--ipv6-dest-option Include IPv6 dest. option" +//usage: "\n\t--noabuf Omit abuf from output" +//usage: "\n\t-n|--nsid Include NSID option" +//usage: "\n\t-O|--out-file Name of output file" +//usage: "\n\t--port Destination port number" +//usage: "\n\t--p_probe_id Prepend probe ID to query name" +//usage: "\n\t--qbuf Include qbuf in output" +//usage: "\n\t--read-response Read responses" +//usage: "\n\t--resolv Use system resolvers as targets" +//usage: "\n\t--retry Retry query count times" +//usage: "\n\t--sni-cert-name Name to check in cert and SNI" +//usage: "\n\t--timeout Timeout waiting for reply" +//usage: "\n\t--tls Connect using TLS" +//usage: "\n\t--ttl Report TTL of reply" +//usage: "\n\t--write-response Write responses" +//usage: "\n\t--type Query type" +//usage: "\n\t--class Query class" +//usage: "\n\t--query Query name" +//usage: "\n\t--a IPv4 address query" +//usage: "\n\t--aaaa IPv6 address query" +//usage: "\n\t--any Any available records" +//usage: "\n\t--afsdb AFS Data Base location query" +//usage: "\n\t--apl Lists of Address Prefixes query" +//usage: "\n\t--axfr Transfer of an entire zone" +//usage: "\n\t--caa Certification Authority Restriction" +//usage: "\n\t--cert Certificate" +//usage: "\n\t--cname The Canonical Name for an alias" +//usage: "\n\t--dlv DNSSEC Lookaside Validation" +//usage: "\n\t--dname DNAME" +//usage: "\n\t--dnskey DNSKEY" +//usage: "\n\t--ds Delegation Signer" +//usage: "\n\t--ipseckey IPSECKEY" +//usage: "\n\t--loc Location Information" +//usage: "\n\t--mx Mail Exchange" +//usage: "\n\t--naptr Naming Authority Pointer" +//usage: "\n\t--ns Authoritative Name Server" +//usage: "\n\t--nsec NSEC" +//usage: "\n\t--nsec3 NSEC3" +//usage: "\n\t--nsec3param NSEC3PARAM" +//usage: "\n\t--ptr Domain Name Pointer" +//usage: "\n\t--rrsig RRSIG" +//usage: "\n\t--rp Responsible Person" +//usage: "\n\t-s|--soa Start Of A Zone Of Authority" +//usage: "\n\t--sig Security Signature" +//usage: "\n\t--spf Sender Policy Framework" +//usage: "\n\t--sshfp SSH Key Fingerprint" +//usage: "\n\t--srv Server Selection" +//usage: "\n\t--ta DNSSEC Trust Authorities" +//usage: "\n\t--tlsa TLSA" +//usage: "\n\t--txt Text Strings" +//usage: "\n\t-h|--hostname.bind CHAOS TXT hostname.bind query" +//usage: "\n\t-i|--id.server CHAOS TXT id.server query" +//usage: "\n\t-b|--version.bind CHAOS TXT version.bind query" +//usage: "\n\t-r|--version.server CHAOS TXT version.server query" + +#include "libbb.h" +#include "atlas_bb64.h" +#include "atlas_probe.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eperd.h" +#include "resolv.h" +#include "readresolv.h" +#include "tcputil.h" +#include "http2.h" +#include "atlas_path.h" + +#include +#include +#include +#include +#include +#include +#define DQ(str) "\"" #str "\"" +#define DQC(str) "\"" #str "\" : " +#define ADDRESULT buf_add(&qry->result, line, strlen(line)) +#define AS(val) buf_add(&qry->result, val, strlen (val)) +#define JS(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : \"%s\" , ", val), ADDRESULT +#define JS_NC(key, val) snprintf(line, DEFAULT_LINE_LENGTH,"\"" #key"\" : \"%s\" ", val), ADDRESULT +#define JSDOT(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : \"%s.\" , ", val), ADDRESULT +#define JS1(key, fmt, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : "#fmt" , ", val), ADDRESULT +#define JD(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : %d , ", val), ADDRESULT +#define JD_NC(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : %d ", val), ADDRESULT +#define JU(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : %u , ", val), ADDRESULT +#define JU_NC(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : %u", val), ADDRESULT +#define JC snprintf(line, DEFAULT_LINE_LENGTH, ","), ADDRESULT + +#define SAFE_PREFIX_REL ATLAS_DATA_NEW_REL + +#define BLURT crondlog (LVL5 "%s:%d %s()", __FILE__, __LINE__, __func__),crondlog +#define IAMHERE crondlog (LVL5 "%s:%d %s()", __FILE__, __LINE__, __func__) + +#undef MIN /* just in case */ +#undef MAX /* also, just in case */ +#define O_RESOLV_CONF 1003 +#define O_PREPEND_PROBE_ID 1004 +#define O_EVDNS 1005 +#define O_RETRY 1006 +#define O_TYPE 1007 +#define O_CLASS 1008 +#define O_QUERY 1009 +#define O_OUTPUT_COBINED 1101 +#define O_CD 1010 +#define O_AD 1011 + +#if ENABLE_FEATURE_EVTDIG_TLS +#define O_TLS 1012 +#endif + +#define O_TTL 1013 + +#define O_HTTPS 1014 +#define O_SNI_CERT_NAME 1015 +#define O_HTTPS_PATH 1016 + +#define DNS_FLAG_RD 0x0100 + +#define MIN(a, b) (a < b ? a : b) +#define MAX(a, b) (a > b ? a : b) + +#define ENV2QRY(env) \ + ((struct query_state *)((char *)env - offsetof(struct query_state, tu_env))) + +#define MAX_DNS_BUF_SIZE 5120 +#define MAX_DNS_OUT_BUF_SIZE 512 + +/* Intervals and timeouts (all are in milliseconds unless otherwise specified) */ +#define DEFAULT_NOREPLY_TIMEOUT 5000 /* 5000 msec - 0 is illegal */ +#define DEFAULT_LINE_LENGTH 256 +#define DEFAULT_STATS_REPORT_INTERVEL 180 /* in seconds */ +#define CONN_TO 5 /* TCP connection time out in seconds */ +#define DEFAULT_RETRY_MAX 0 + +/* state of the dns query */ +#define STATUS_DNS_RESOLV 1001 +#define STATUS_TCP_CONNECTING 1002 +#define STATUS_TCP_CONNECTED 1003 +#define STATUS_TCP_WRITE 1004 +#define STATUS_NEXT_QUERY 1005 +#define STATUS_RETRANSMIT_QUERY 1006 +#define STATUS_PRINT_FREE 1006 +#define STATUS_SEND 1007 +#define STATUS_WAIT_RESPONSE 1008 +#define STATUS_FREE 0 + +// seems the following are defined in header files we use + +#ifndef ns_t_apl +#define ns_t_apl 42 +#endif + +#ifndef T_APL +#define T_APL ns_t_apl +#endif + +#ifndef ns_t_caa +#define ns_t_caa 257 +#endif + +#ifndef T_CAA +#define T_CAA ns_t_caa +#endif + +#ifndef ns_t_cert +#define ns_t_cert 37 +#endif + +#ifndef T_CERT +#define T_CERT ns_t_cert +#endif + +#ifndef ns_t_dname +#define ns_t_dname 39 +#endif + +#ifndef T_DNAME +#define T_DNAME ns_t_dname +#endif + +#ifndef ns_t_dlv +#define ns_t_dlv 32769 +#endif + +#ifndef T_DLV +#define T_DLV ns_t_dlv +#endif + +#ifndef ns_t_ds +#define ns_t_ds 43 +#endif + +#ifndef T_DS +#define T_DS ns_t_ds +#endif + +#ifndef ns_t_dnskey +#define ns_t_dnskey 48 +#endif + +#ifndef T_DNSKEY +#define T_DNSKEY ns_t_dnskey +#endif + +#ifndef ns_t_ipseckey +#define ns_t_ipseckey 45 +#endif + +#ifndef T_IPSECKEY +#define T_IPSECKEY ns_t_ipseckey +#endif + + +#ifndef ns_t_rrsig +#define ns_t_rrsig 46 +#endif + +#ifndef T_RRSIG +#define T_RRSIG ns_t_rrsig +#endif + +#ifndef ns_t_nsec +#define ns_t_nsec 47 +#endif + +#ifndef T_NSEC +#define T_NSEC ns_t_nsec +#endif + +#ifndef ns_t_nsec3 +#define ns_t_nsec3 50 +#endif + +#ifndef T_NSEC3 +#define T_NSEC3 ns_t_nsec3 +#endif + +#ifndef ns_t_nsec3param +#define ns_t_nsec3param 51 +#endif + +#ifndef T_NSEC3PARAM +#define T_NSEC3PARAM ns_t_nsec3param +#endif + +#ifndef ns_t_spf +#define ns_t_spf 99 +#endif + +#ifndef T_SPF +#define T_SPF ns_t_spf +#endif + +#ifndef ns_t_ta +#define ns_t_ta 32768 +#endif + +#ifndef T_TA +#define T_TA ns_t_ta +#endif + +#ifndef ns_t_tlsa +#define ns_t_tlsa 52 +#endif + +#ifndef T_TLSA +#define T_TLSA ns_t_tlsa +#endif + + +#ifndef ns_t_sshfp +#define ns_t_sshfp 44 +#endif + +#ifndef T_SSHFP +#define T_SSHFP ns_t_sshfp +#endif + +#define EDNS_OPT_CLIENT_SUBNET 8 + +#define EDNS_OPT_COOKIE 10 +#define DNS_CLIENT_COOKIE_LEN 8 +#define DNS_SERVER_COOKIE_MIN_LEN 8 +#define DNS_SERVER_COOKIE_MAX_LEN 32 + +#define RESP_PACKET 1 +#define RESP_PEERNAME 2 +#define RESP_SOCKNAME 3 +#define RESP_N_RESOLV 4 +#define RESP_RESOLVER 5 +#define RESP_LENGTH 6 +#define RESP_DATA 7 +#define RESP_CMSG 8 +#define RESP_TIMEOUT 9 +#define RESP_ADDRINFO 10 +#define RESP_ADDRINFO_SA 11 + + +/* Definition for various types of counters */ +typedef uint32_t counter_t; + +struct dns_cookie_state +{ + uint8_t client_secret[32]; /* 256 bits, we are using sha256 */ + uint8_t client_cookie[DNS_CLIENT_COOKIE_LEN]; + struct dns_server_cookie + { + uint8_t len; + uint8_t cookie[DNS_SERVER_COOKIE_MAX_LEN]; + } server_cookies[MAXNS]; +}; + +/* How to keep track of a DNS query session */ +struct tdig_base { + struct event_base *event_base; + + /* A circular list of user queries */ + struct query_state *qry_head; + + struct event statsReportEvent; + + counter_t sendfail; /* # of failed sendto() */ + counter_t sentok; /* # of successful sendto() */ + counter_t recvfail; /* # of failed recvfrom() */ + counter_t recvok; /* # of successful recvfrom() */ + counter_t martian; /* # of DNS replies we are not looking for */ + counter_t shortpkt; /* # of DNS payload with size < sizeof(struct DNS_HEADER) == 12 bytes */ + counter_t sentbytes; + counter_t recvbytes; + counter_t timeout; + counter_t queries; + counter_t activeqry; + + u_char packet [MAX_DNS_BUF_SIZE] ; + /* used only for the stand alone version */ + void (*done)(void *state, int error); +}; + +static struct tdig_base *tdig_base; + +/* How to keep track of each user query to send dns query */ +struct query_state { + + struct tdig_base *base; + char * name; /* Host identifier as given by the user */ + char * infname; /* Bind to this interface (or address) */ + u_int16_t qryid; /* query id 16 bit */ + struct event event; /* Used to detect read events on udp socket */ + int udp_fd; /* udp_fd */ + int wire_size; + struct dns_cookie_state *cookie_state; + + struct bufferevent *bev_tcp; + struct tu_env tu_env; + + int opt_v4_only ; + int opt_v6_only ; + int opt_AF; + int opt_proto; + int opt_edns0; + int opt_edns_version; + int opt_edns_flags; + int opt_edns_option; + int opt_ipv6_dest_option; + int opt_dnssec; + int opt_nsid; + int opt_client_subnet; + int opt_cookies; + int opt_qbuf; + int opt_abuf; + int opt_resolv_conf; + int opt_rd; + int opt_ad; + int opt_cd; + int opt_prepend_probe_id; + int opt_evdns; + int opt_rset; + int opt_retry_max; + int opt_query_arg; + unsigned opt_timeout; + int retry; + int resolv_i; + bool opt_do_tls; + bool opt_do_ttl; + bool opt_do_https; + char *sni_cert_name; + char *https_path; + bool client_cookie_mismatch; + + char * str_Atlas; + char * str_bundle; + u_int16_t qtype; + u_int16_t qclass; + + char *macro_lookupname; + char *lookupname; + char *server_name; + char *out_filename; + char *port_as_char; + + /* Contents of resolv.conf during a measurement */ + int resolv_max; + char *nslist[MAXNS]; + + /* For fuzzing */ + char *response_out; + char *response_in; + + uint32_t pktsize; /* Packet size in bytes */ + struct addrinfo *res, *ressave, *ressent; + + struct event noreply_timer; /* Timer to handle timeout */ + struct event nsm_timer; /* Timer to send UDP */ + struct event next_qry_timer; /* Timer event to start next query */ + struct event done_qry_timer; /* Timer event to call done */ + + time_t xmit_time; + struct timespec xmit_time_ts; + struct timespec qxmit_time_ts; + double triptime; + double querytime; + int rcvdttl; + + //tdig_callback_type user_callback; + void *user_callback; + void *user_pointer; /* the pointer given to us for this qry */ + + /* these objects are kept in a circular list */ + struct query_state *next, *prev; + + struct buf err; + struct buf qbuf; + struct buf packet; + struct buf result; + int qst ; + char dst_addr_str[(INET6_ADDRSTRLEN+1)]; + char loc_addr_str[(INET6_ADDRSTRLEN+1)]; + unsigned short dst_ai_family ; + unsigned short loc_ai_family ; + struct sockaddr_in6 loc_sin6; + socklen_t loc_socklen; + + /* For DNS over HTTP2 */ + struct http2_env *http2_env; + + u_char *outbuff; + + FILE *resp_file; /* Fuzzing */ +}; +//DNS header structure +struct DNS_HEADER +{ + u_int16_t id; // identification number + +#if BYTE_ORDER == BIG_ENDIAN + u_int16_t qr :1, /* query/response flag */ + opcode :4, /* purpose of message */ + aa :1, /* authoritive answer */ + tc :1, /* truncated message */ + rd :1, /* recursion desired */ + + ra :1, /* recursion available */ + z :1, /* its z! reserved */ + ad :1, /* authenticated data */ + cd :1, /* checking disabled */ + rcode :4; /* response code */ +#elif BYTE_ORDER == LITTLE_ENDIAN || BYTE_ORDER == PDP_ENDIAN + u_int16_t rd :1, + tc :1, + aa :1, + opcode :4, + qr :1, + + rcode :4, + cd :1, + ad :1, + z :1, + ra :1; +#endif + + u_int16_t q_count; /* number of question entries */ + u_int16_t ans_count; /* number of answer entries */ + u_int16_t ns_count; /* number of authority entries */ + u_int16_t add_count; /* number of resource entries */ +}; + +// EDNS OPT pseudo-RR : EDNS0 +struct EDNS0_HEADER +{ + /** EDNS0 available buffer size, see RFC2671 */ + u_int16_t otype; + uint16_t _edns_udp_size; + u_int8_t _edns_ercode; // rcode extension + u_int8_t _edns_version; // EDNS version + u_int16_t Z ; // first bit is the D0 bit. + uint16_t _edns_rdlen; // length of rdata +}; + +// EDNS OPT pseudo-RR : eg NSID RFC 5001 +struct EDNS_NSID +{ + u_int16_t otype; + u_int16_t olength; // length of option data +}; + +// Client Subnet option (RFC 7871) +struct EDNS_CLIENT_SUBNET +{ + u_int16_t otype; + u_int16_t olength; + u_int16_t cs_family; + u_int8_t cs_src_prefix_len; + u_int8_t cs_scope_prefix_len; + // u_int8_t[] cs_address; +}; + +// DNS Cookies option (RFC 7873) +struct EDNS_COOKIES +{ + u_int16_t otype; + u_int16_t olength; + u_int8_t client_cookie[DNS_CLIENT_COOKIE_LEN]; + + /* The server cookie could be smaller or even empty */ + u_int8_t server_cookie[DNS_SERVER_COOKIE_MAX_LEN]; +}; + +//Constant sized fields of query structure +struct QUESTION +{ + u_int16_t qtype; + u_int16_t qclass; +}; + +//Constant sized fields of the resource record structure +#pragma pack(push, 1) +struct R_DATA +{ + u_int16_t type; + u_int16_t _class; + u_int32_t ttl; + u_int16_t data_len; +}; +#pragma pack(pop) + +//Pointers to resource record contents +struct RES_RECORD +{ + unsigned char *name; + struct R_DATA *resource; + unsigned char *rdata; +}; + +static struct option longopts[]= +{ + // class IN + { "a", required_argument, NULL, (100000 + T_A) }, + { "aaaa", required_argument, NULL, (100000 + T_AAAA) }, + { "any", required_argument, NULL, (100000 + T_ANY) }, + { "afsdb", required_argument, NULL, (100000 + T_AFSDB) }, + { "apl", required_argument, NULL, (100000 + T_APL) }, + { "axfr", required_argument, NULL, (100000 + T_AXFR ) }, //yet to be tested. + { "caa", required_argument, NULL, (100000 + T_CAA) }, + { "cert", required_argument, NULL, (100000 + T_CERT) }, + { "cname", required_argument, NULL, (100000 + T_CNAME) }, + { "dlv", required_argument, NULL, (100000 + T_DLV) }, + { "dname", required_argument, NULL, (100000 + T_DNAME) }, + { "dnskey", required_argument, NULL, (100000 + T_DNSKEY) }, + { "ds", required_argument, NULL, (100000 + T_DS) }, + { "ipseckey", required_argument, NULL, (100000 + T_IPSECKEY) }, + { "key", required_argument, NULL, (100000 + T_KEY) }, + { "loc", required_argument, NULL, (100000 + T_LOC) }, + { "mx", required_argument, NULL, (100000 + T_MX ) }, + { "naptr", required_argument, NULL, (100000 + T_NAPTR) }, + { "ns", required_argument, NULL, (100000 + T_NS) }, + { "nsec", required_argument, NULL, (100000 + T_NSEC) }, + { "nsec3", required_argument, NULL, (100000 + T_NSEC3) }, + { "nsec3param", required_argument, NULL, (100000 + T_NSEC3PARAM) }, + { "ptr", required_argument, NULL, (100000 + T_PTR) }, + { "rrsig", required_argument, NULL, (100000 + T_RRSIG) }, + { "rp", required_argument, NULL, (100000 + T_RP) }, + { "soa", required_argument, NULL, 's' }, + { "sig", required_argument, NULL, (100000 + T_SIG) }, + { "spf", required_argument, NULL, (100000 + T_SPF) }, + { "sshfp", required_argument, NULL, (100000 + T_SSHFP) }, + { "srv", required_argument, NULL, (100000 + T_SRV) }, + { "ta", required_argument, NULL, (100000 + T_TA) }, + { "tlsa", required_argument, NULL, (100000 + T_TLSA) }, + { "tsig", required_argument, NULL, (100000 + T_TSIG) }, + { "txt", required_argument, NULL, (100000 + T_TXT) }, + + { "type", required_argument, NULL, O_TYPE }, + { "class", required_argument, NULL, O_CLASS }, + { "query", required_argument, NULL, O_QUERY}, + + // clas CHAOS + { "hostname.bind", no_argument, NULL, 'h' }, + { "id.server", no_argument, NULL, 'i' }, + { "version.bind", no_argument, NULL, 'b' }, + { "version.server", no_argument, NULL, 'r' }, + + // flags + { "c_output", no_argument, NULL, O_OUTPUT_COBINED}, + { "ad", no_argument, NULL, O_AD}, + { "cd", no_argument, NULL, O_CD}, + { "client-subnet", no_argument, NULL, 'c' }, + { "cookies", no_argument, NULL, 'C' }, + { "do", no_argument, NULL, 'd' }, + { "evdns", no_argument, NULL, O_EVDNS }, + { "noabuf", no_argument, NULL, 1002 }, + { "nsid", no_argument, NULL, 'n' }, + { "p_probe_id", no_argument, NULL, O_PREPEND_PROBE_ID }, + { "qbuf", no_argument, NULL, 1001 }, + { "resolv", no_argument, NULL, O_RESOLV_CONF }, +#if ENABLE_FEATURE_EVTDIG_TLS + { "tls", no_argument, NULL, O_TLS}, +#endif + { "https", no_argument, NULL, O_HTTPS}, + { "sni-cert-name", required_argument, NULL, O_SNI_CERT_NAME }, + { "https-path", required_argument, NULL, O_HTTPS_PATH }, + { "ttl", no_argument, NULL, O_TTL }, + + { "edns0", required_argument, NULL, 'e' }, + { "edns-flags", required_argument, NULL, '2' }, + { "edns-option", required_argument, NULL, '3' }, + { "edns-version", required_argument, NULL, '1' }, + { "ipv6-dest-option", required_argument, NULL, '5' }, + { "out-file", required_argument, NULL, 'O' }, + { "port", required_argument, NULL, 'p'}, + { "retry", required_argument, NULL, O_RETRY }, + { "timeout", required_argument, NULL, 'T' }, + + { "write-response", required_argument, NULL, 200000 + 'W'}, + { "read-response", required_argument, NULL, 200000 + 'R'}, + + { NULL, } +}; +static char line[(DEFAULT_LINE_LENGTH+1)]; + +static void tdig_stats(int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h); +static int tdig_delete(void *state); +static int ChangetoDnsNameFormat(u_char *dns, size_t maxlen, char* qry); +struct tdig_base *tdig_base_new(struct event_base *event_base); +void tdig_start (void *qry); +void printReply(struct query_state *qry, int wire_size, unsigned char *result); +void printErrorQuick (struct query_state *qry); +static void local_exit(void *state, int error); +static void *tdig_init(int argc, char *argv[], + void (*done)(void *state, int error)); +static void process_reply(void * arg, int nrecv, struct timespec now, + struct msghdr *msgp); +static void update_server_cookie(struct query_state *qry, uint8_t *packet, + int packlen); +static int get_edns_opt(int *optoffp, int target_code, + uint8_t *packet, int packlen); +static int get_rr_len(int *namelenp, uint8_t *packet, int offset, int len, + int question); +static int mk_dns_buff(struct query_state *qry, u_char *packet, + size_t packetlen) ; +int ip_addr_cmp (u_int16_t af_a, void *a, u_int16_t af_b, void *b); +static void udp_dns_cb(int err, struct evutil_addrinfo *ev_res, void *arg); +static void noreply_callback(int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h); +static void free_qry_inst(struct query_state *qry); +static void ready_callback (int unused, const short event, void * arg); + +u_int32_t get32b (unsigned char *p); +void ldns_write_uint16(void *dst, uint16_t data); +uint16_t ldns_read_uint16(const void *src); +unsigned char* ReadName(unsigned char *base, size_t size, size_t offset, + int* count); +int dns_namelen(unsigned char *base, size_t offset, size_t size); + +void print_txt_json(unsigned char *rdata, int txt_len,struct query_state *qry); + +static void http2_reply_cb(void *ref, unsigned status, + u_char *data, size_t len); +static void https_write_response(void *ref, void *buf, size_t len); +static size_t https_read_response(void *ref, void *buf, size_t len); + +int evtdig_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int evtdig_main(int argc, char **argv) +{ + struct query_state *qry; + + EventBase=event_base_new(); + if (!EventBase) + { + crondlog(LVL9 "ERROR: critical event_base_new failed"); /* exits */ + } + + qry = tdig_init(argc, argv, local_exit); + if(!qry) { + crondlog(DIE9 "ERROR: critical tdig_init failed"); /* exits */ + event_base_free (EventBase); + return 1; + } + + DnsBase = evdns_base_new(EventBase, 1); + if (!DnsBase) { + crondlog(DIE9 "ERROR: critical evdns_base_new failed"); /* exits */ + event_base_free (EventBase); + return 1; + } + + tdig_start(qry); + // printf ("starting query\n"); + + event_base_dispatch (EventBase); + event_base_loopbreak (EventBase); + if(EventBase) + event_base_free(EventBase); + return 0; +} + +void print_txt_json(unsigned char *rdata, int txt_len,struct query_state *qry) +{ + int i, len; + + AS("\"RDATA\" : [ \""); + len= -1; + for(i = 0; i < txt_len; i++) { + if (len == 0) + { + AS("\", \""); + len= -1; + } + if (len == -1) + { + len= *rdata; + rdata++; + continue; + } + if( (*rdata == 34 ) || (*rdata == 92 )) { + snprintf(line, DEFAULT_LINE_LENGTH, "\\\\%03d", *(char *)rdata ); + buf_add(&qry->result, line, strlen (line)); + } + // Space - ~ + else if ((*rdata >= ' ' ) && (*rdata <= '~')) { + snprintf(line, DEFAULT_LINE_LENGTH, "%c", *(char *)rdata ); + buf_add(&qry->result, line, strlen (line)); + } + else { + snprintf(line, DEFAULT_LINE_LENGTH, "\\\\%03d", *rdata ); + buf_add(&qry->result, line, strlen (line)); + } + len--; + rdata++; + } + AS("\" ] "); +} + +static void local_exit(void *state UNUSED_PARAM, int error) +{ + /* + qry->base->done(qry); + void (*terminator)(void *state); + struct event_base *event_base; + struct tdig_base *tbase; + terminator = qry->base->done; + event_base = qry->base->event_base; + if (DnsBase) { + evdns_base_free(DnsBase, 0); + DnsBase = NULL; + } + tbase = qry->base; + tdig_delete(qry); + free(tbase); + event_base_loopbreak(event_base); + event_base_free(event_base); + terminator(qry); + */ + exit(error); +} + + +/* Initialize a struct timeval by converting milliseconds */ +static void msecstotv(time_t msecs, struct timeval *tv) +{ + tv->tv_sec = msecs / 1000; + tv->tv_usec = msecs % 1000 * 1000; +} + +int ip_addr_cmp (u_int16_t af_a, void *a, u_int16_t af_b, void *b) +{ + struct sockaddr_in *a4; + struct sockaddr_in *b4; + struct sockaddr_in6 *a6; + struct sockaddr_in6 *b6; + char buf[INET6_ADDRSTRLEN]; + + if(af_a != af_b) { + crondlog(LVL5 "address family mismatch in %d ", __LINE__); + return -1; + } + + if(af_a == AF_INET ) { + a4 = (struct sockaddr_in *) a; + b4 = (struct sockaddr_in *) b; + if( memcmp ( &(a4->sin_addr), &(b4->sin_addr), sizeof(struct in_addr)) == 0) { + return 0; + } + else + return 1; + } + else if(af_a == AF_INET6 ) { + a6 = (struct sockaddr_in6 *) a; + b6 = (struct sockaddr_in6 *) b; + if( memcmp ( &(a6->sin6_addr), &(b6->sin6_addr), sizeof(struct in6_addr)) == 0) { + inet_ntop(AF_INET6, &(a6->sin6_addr), buf, sizeof(buf)); + crondlog(LVL5 "address6 match A %s", buf); + inet_ntop(AF_INET6, &(b6->sin6_addr), buf, sizeof(buf)); + crondlog(LVL5 "address6 match B %s", buf); + + return 0; + } + else { + inet_ntop(AF_INET6, &(a6->sin6_addr), buf, sizeof(buf)); + crondlog(LVL5 "address6 mismatch A %s", buf); + inet_ntop(AF_INET6, &(b6->sin6_addr), buf, sizeof(buf)); + crondlog(LVL5 "address mismatch B %s", buf); + + + return 1; + } + } + return 1; +} + +static int mk_dns_buff(struct query_state *qry, u_char *packet, + size_t packetlen) +{ + struct DNS_HEADER *dns = NULL; + u_char *qname, *p; + struct QUESTION *qinfo = NULL; + struct EDNS0_HEADER *e; + struct EDNS_NSID *n; + struct EDNS_CLIENT_SUBNET *cs; + struct EDNS_COOKIES *cookies; + uint16_t rdlen; + int r, cookie_opt_len, ind, qnamelen, server_len; + char *lookup_prepend; + int probe_id; + sha256_ctx_t sha256_ctx; + uint8_t sha256_buf[32]; + + dns = (struct DNS_HEADER *)packet; + if (qry->response_in || qry->response_out) + { + qry->qryid= 12345; + } + else + { + r = random(); + r %= 65535; + + // host is storing int host byte order + qry->qryid = (uint16_t) r; + } + // crondlog(LVL5 "%s %s() : %d base address %p",__FILE__, __func__, __LINE__, qry->base); + // BLURT(LVL5 "dns qyery id %d", qry->qryid); + dns->id = (uint16_t) htons(qry->qryid); + + dns->qr = 0; //This is a query + dns->opcode = 0; //This is a standard query + dns->aa = 0; //Not Authoritative + dns->tc = 0; //This message is not truncated + dns->rd = 0; //Recursion not Desired + dns->ra = 0; //Recursion not available! hey we dont have it (lol) + dns->z = 0; + dns->ad = 0; + dns->cd = 0; + dns->rcode = 0; + + dns->q_count = htons(1); /* we have only 1 question */ + dns->ans_count = 0; + dns->ns_count = 0; + dns->add_count = htons(0); + + if (qry->opt_resolv_conf || qry->opt_rd ) { + dns->rd = 1; + } + + if (qry->opt_ad) + dns->ad = 1; + if (qry->opt_cd) + dns->cd = 1; + + //point to the query portion + qname =(u_char *)&packet[sizeof(struct DNS_HEADER)]; + + /* DNS limits the name lengths to 255 in DNS encoding. Verify that the + * buffer is big enough. Also include space for EDNS0 and NSID */ + qnamelen= 255; + + if (packetlen < + sizeof(struct DNS_HEADER) + + qnamelen + sizeof(struct QUESTION) + + 1 /* dummy dns name */ + sizeof(struct EDNS0_HEADER) + + sizeof(struct EDNS_NSID) + + sizeof(struct EDNS_CLIENT_SUBNET)) + { + crondlog(DIE9 "mk_dns_buff: packet size too small, got %d", + packetlen); + } + + // should it be limited to clas C_IN ? + if(qry->opt_prepend_probe_id ) { + probe_id = get_probe_id(); + probe_id = MAX(probe_id, 0); + + + lookup_prepend = xzalloc(DEFAULT_LINE_LENGTH + sizeof(qry->lookupname)); + snprintf(lookup_prepend, (sizeof(qry->lookupname) + + DEFAULT_LINE_LENGTH - 1), + "%d.%llu.%s", probe_id, (unsigned long long)qry->xmit_time, + qry->lookupname); + + qnamelen= ChangetoDnsNameFormat(qname, qnamelen, + lookup_prepend); // fill the query portion. + + free(lookup_prepend); + } + else { + qnamelen= ChangetoDnsNameFormat(qname, qnamelen, + qry->lookupname); // fill the query portion. + } + if (qnamelen == -1) + return -1; + + qinfo =(struct QUESTION*)&packet[sizeof(struct DNS_HEADER) + qnamelen]; + + qinfo->qtype = htons(qry->qtype); + qinfo->qclass = htons(qry->qclass); + + qry->pktsize = (sizeof(struct DNS_HEADER) + qnamelen + + sizeof(struct QUESTION)) ; + if(qry->opt_nsid || qry->opt_client_subnet || qry->opt_cookies || + qry->opt_dnssec || (qry->opt_edns0 > 512) || + qry->opt_edns_version != 0 || + qry->opt_edns_flags != 0 || + qry->opt_edns_option != 0) { + p= &packet[qry->pktsize]; + *p= 0; /* encoding of '.' */ + qry->pktsize++; + + e=(struct EDNS0_HEADER*)&packet[ qry->pktsize ]; + e->otype = htons(ns_t_opt); + e->_edns_udp_size = htons(qry->opt_edns0); + e->_edns_version = qry->opt_edns_version; + e->Z = htons(qry->opt_edns_flags); + if(qry->opt_dnssec) { + e->Z |= htons(0x8000); + } + e->_edns_rdlen = htons(0); +#if 0 + crondlog(LVL5 "opt header in hex | %02X %02X %02X %02X %02X %02X %02X %02X %02X | %02X", + packet[qry->pktsize], + packet[qry->pktsize + 1], + packet[qry->pktsize + 2], + packet[qry->pktsize + 3], + packet[qry->pktsize + 4], + packet[qry->pktsize + 5], + packet[qry->pktsize + 6], + packet[qry->pktsize + 7], + packet[qry->pktsize + 8], + packet[qry->pktsize + 9]); +#endif + + qry->pktsize += sizeof(struct EDNS0_HEADER) ; + dns->add_count = htons(1); + + if(qry->opt_nsid ) { + n=(struct EDNS_NSID*)&packet[ qry->pktsize ]; + rdlen= ntohs(e->_edns_rdlen); + e->_edns_rdlen = + htons(rdlen + sizeof(struct EDNS_NSID)); + n->otype = htons(3); + n->olength = htons(0); + qry->pktsize += sizeof(struct EDNS_NSID); + } + if (qry->opt_client_subnet) + { + cs=(struct EDNS_CLIENT_SUBNET*)&packet[ qry->pktsize ]; + rdlen= ntohs(e->_edns_rdlen); + e->_edns_rdlen = + htons(rdlen + sizeof(struct EDNS_CLIENT_SUBNET)); + cs->otype = htons(EDNS_OPT_CLIENT_SUBNET); + cs->olength = htons(sizeof(struct EDNS_CLIENT_SUBNET) - + offsetof(struct EDNS_CLIENT_SUBNET, cs_family)); + cs->cs_family= htons(qry->opt_AF == AF_INET ? 1 : 2); + cs->cs_src_prefix_len= htons(0); + cs->cs_scope_prefix_len= htons(0); + qry->pktsize += sizeof(struct EDNS_CLIENT_SUBNET); + } + if (qry->opt_cookies) + { + /* Create client cookie. We should use hmac, but it + * doesn't seem to be available. However, just using + * sha256 should be good enough. + */ + sha256_begin(&sha256_ctx); + if (qry->opt_AF == AF_INET) + { + sha256_hash(&sha256_ctx, + &((struct sockaddr_in *)&qry-> + loc_sin6)->sin_addr, + sizeof((struct sockaddr_in *)&qry-> + loc_sin6)->sin_addr); + sha256_hash(&sha256_ctx, + &((struct sockaddr_in *)&qry-> + res->ai_addr)->sin_addr, + sizeof((struct sockaddr_in *)&qry-> + res->ai_addr)->sin_addr); + } + else + { + sha256_hash(&sha256_ctx, + &qry->loc_sin6.sin6_addr, + sizeof(qry->loc_sin6.sin6_addr)); + sha256_hash(&sha256_ctx, + &((struct sockaddr_in6 *)&qry->res-> + ai_addr)->sin6_addr, + sizeof(((struct sockaddr_in6 *)&qry-> + res->ai_addr)->sin6_addr)); + } + sha256_hash(&sha256_ctx, + &qry->cookie_state->client_secret, + sizeof(qry->cookie_state->client_secret)); + sha256_end(&sha256_ctx, sha256_buf); + + cookies=(struct EDNS_COOKIES *)&packet[ qry->pktsize ]; + + /* Fill-in client cookie */ + memcpy(cookies->client_cookie, + sha256_buf+sizeof(sha256_buf) - + DNS_CLIENT_COOKIE_LEN, DNS_CLIENT_COOKIE_LEN); + + /* Save cookie to check reply */ + memcpy(qry->cookie_state->client_cookie, + cookies->client_cookie, + DNS_CLIENT_COOKIE_LEN); + + /* Select server cookie */ + if (qry->opt_resolv_conf) + ind= qry->resolv_i; + else + ind= 0; + server_len= qry->cookie_state->server_cookies[ind].len; + assert(server_len >= 0 && + server_len <= DNS_SERVER_COOKIE_MAX_LEN); + memcpy(cookies->server_cookie, + qry->cookie_state->server_cookies[ind].cookie, + server_len); + + cookie_opt_len= offsetof(struct EDNS_COOKIES, + server_cookie[server_len]); + + rdlen= ntohs(e->_edns_rdlen); + e->_edns_rdlen = + htons(rdlen + cookie_opt_len); + cookies->otype = htons(EDNS_OPT_COOKIE); + cookies->olength = htons(cookie_opt_len - + offsetof(struct EDNS_COOKIES, client_cookie)); + qry->pktsize += cookie_opt_len; + } + if(qry->opt_edns_option ) { + /* Insert an empty option with this option code. + * Use struct EDNS_NSID because it is exacly the + * empty header we need. + */ + n=(struct EDNS_NSID*)&packet[ qry->pktsize ]; + rdlen= ntohs(e->_edns_rdlen); + e->_edns_rdlen = + htons(rdlen + sizeof(struct EDNS_NSID)); + n->otype = htons(qry->opt_edns_option); + n->olength = htons(0); + qry->pktsize += sizeof(struct EDNS_NSID); + } + + /* Transmit the request over the network */ + } + +#if 0 + if(qry->pktsize) { + buf_init(&pbuf, -1); + snprintf(line, DEFAULT_LINE_LENGTH, "%0d bytes ", qry->pktsize); + buf_add(&pbuf, line, strlen(line)); + + line[0] = '"'; + buf_add(&pbuf, line, 1); + for(int x = 0; x < qry->pktsize; x++) { + snprintf(line, DEFAULT_LINE_LENGTH, "%02X ", packet[x]); + buf_add(&pbuf, line, 3); + } + line[0] = '"'; + line[1] = '\0'; + buf_add(&pbuf, line, 2 ); + crondlog(LVL5 "payload : %s", pbuf.buf); + buf_cleanup(&pbuf); + } +#endif + + return 0; +} + +/* Attempt to transmit a UDP DNS Request to a server. TCP is else where */ +static void tdig_send_query_callback(int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h) +{ + int r, fd, on; + sa_family_t af; + struct query_state *qry = h; + struct tdig_base *base = qry->base; + uint32_t nsent = 0; + u_char *outbuff= NULL; + int err = 0; + struct timeval tv_noreply; + struct addrinfo tmp_res; + struct sockaddr_storage tmp_sockaddr; + + /* Clean the no reply timer (if any was previously set) */ + evtimer_del(&qry->noreply_timer); + + qry->qst = STATUS_SEND; + outbuff = xzalloc(MAX_DNS_OUT_BUF_SIZE); + bzero(outbuff, MAX_DNS_OUT_BUF_SIZE); + //AA delete qry->outbuff = outbuff; + qry->xmit_time= atlas_time(); + + do { + if (!qry->response_in && qry->udp_fd != -1) + { + event_del(&qry->event); + close(qry->udp_fd); + qry->udp_fd= -1; + } + + if (qry->response_in) + { + size_t tmp_len; + + if (qry->udp_fd != -1) + fd= qry->udp_fd; + else + fd= open(qry->response_in, O_RDONLY); + if (fd == -1) + { + crondlog(DIE9 "unable to open '%s': %s", + qry->response_in, strerror(errno)); + } + + tmp_len= sizeof(tmp_res); + read_response(fd, RESP_ADDRINFO, + &tmp_len, &tmp_res); + assert(tmp_len == sizeof(tmp_res)); + tmp_len= sizeof(tmp_sockaddr); + read_response(fd, RESP_ADDRINFO_SA, + &tmp_len, &tmp_sockaddr); + assert(tmp_len == tmp_res->ai_addrlen); + tmp_res.ai_addr= (struct sockaddr *)&tmp_sockaddr; + qry->res= &tmp_res; + } + + if (qry->response_out) + { + write_response(qry->resp_file, RESP_ADDRINFO, + sizeof(*qry->res), qry->res); + write_response(qry->resp_file, RESP_ADDRINFO_SA, + qry->res->ai_addrlen, qry->res->ai_addr); + } + + af = ((struct sockaddr *)(qry->res->ai_addr))->sa_family; + + if (!qry->response_in) + { + if ((fd = socket(af, SOCK_DGRAM, 0) ) < 0 ) + { + snprintf(line, DEFAULT_LINE_LENGTH, + "%s \"socket\" : \"socket failed %s\"", + qry->err.size ? ", " : "", + strerror(errno)); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + free (outbuff); + outbuff = NULL; + return; + } + } + + if (af == AF_INET6 && qry->opt_ipv6_dest_option != 0) + { + do_ipv6_option(fd, 1 /* dest */, + qry->opt_ipv6_dest_option); + } + + if (qry->opt_do_ttl) + { + on = 1; + if (af == AF_INET6) { + setsockopt(fd, IPPROTO_IPV6, IPV6_RECVHOPLIMIT, + &on, sizeof(on)); + } + else { + setsockopt(fd, IPPROTO_IP, IP_RECVTTL, + &on, sizeof(on)); + } + } + + qry->udp_fd= fd; + + evutil_make_socket_nonblocking(fd); + + event_assign(&qry->event, tdig_base->event_base, fd, + EV_READ | EV_PERSIST, ready_callback, qry); + if (!qry->response_in) + event_add(&qry->event, NULL); + + if (qry->infname) + { + if (bind_interface(fd, af, qry->infname) == -1) + { + snprintf(line, DEFAULT_LINE_LENGTH, + "%s \"socket\" : \"bind_interface failed\"", + qry->err.size ? ", " : ""); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + free (outbuff); + outbuff = NULL; + return; + } + } + + if (!qry->opt_resolv_conf) + { + r= atlas_check_addr(qry->res->ai_addr, + qry->res->ai_addrlen); + if (r == -1) + { + free (outbuff); + outbuff = NULL; + snprintf(line, DEFAULT_LINE_LENGTH, + "%s \"reason\" : \"address not allowed\"", + qry->err.size ? ", " : ""); + buf_add(&qry->err, line, strlen(line)); + + /* Trigger printing of dst_addr */ + qry->ressent = qry->res; + + printReply (qry, 0, NULL); + return; + } + } + + if (qry->opt_resolv_conf && strcmp(qry->macro_lookupname, + qry->lookupname) != 0) + { + /* We want $r to generate a new random number for + * each resolver. + */ + if (qry->lookupname) + { + free(qry->lookupname); + qry->lookupname= NULL; + } + qry->lookupname= + atlas_name_macro(qry->macro_lookupname); + } + qry->loc_socklen = sizeof(qry->loc_sin6); + if (qry->response_in) + ; /* No need to connect */ + else if (connect(qry->udp_fd, qry->res->ai_addr, qry->res->ai_addrlen) == -1) + { + snprintf(line, DEFAULT_LINE_LENGTH, + "%s \"socket\" : \"connect failed %s\"", + qry->err.size ? ", " : "", + strerror(errno)); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + free (outbuff); + outbuff = NULL; + return; + } + + if (qry->response_in) + { + size_t tmp_len; + + tmp_len= sizeof(qry->loc_sin6); + read_response(qry->udp_fd, RESP_SOCKNAME, + &tmp_len, &qry->loc_sin6); + qry->loc_socklen= tmp_len; + } + else + { + if (getsockname(qry->udp_fd, + (struct sockaddr *)&qry->loc_sin6, + &qry->loc_socklen) == -1) { + snprintf(line, DEFAULT_LINE_LENGTH, + "%s \"getsockname\" : \"%s\"", + qry->err.size ? ", " : "", + strerror(errno)); + buf_add(&qry->err, line, strlen(line)); + } + if (qry->response_out) + { + write_response(qry->resp_file, + RESP_SOCKNAME, + qry->loc_socklen, + &qry->loc_sin6); + } + } + + /* Assume that we are limited to one AF. mk_dns_buff needs + * to know for the client subnet option. We also need to + * know the local address for the cookie option. + */ + qry->opt_AF = + ((struct sockaddr *)(qry->res->ai_addr))->sa_family; + + r= mk_dns_buff(qry, outbuff, MAX_DNS_OUT_BUF_SIZE); + if (r == -1) + { + /* Can't construct a DNS query */ + free (outbuff); + outbuff = NULL; + snprintf(line, DEFAULT_LINE_LENGTH, + "%s \"err\" : \"unable to format DNS query\"", + qry->err.size ? ", " : ""); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + return; + } + + gettime_mono(&qry->xmit_time_ts); + + if (qry->response_in) + nsent= qry->pktsize; + else + { + nsent = send(qry->udp_fd, outbuff,qry->pktsize, + MSG_DONTWAIT); + } + qry->ressent = qry->res; + + if (nsent == qry->pktsize) { + + /* One more DNS Query is sent */ + base->sentok++; + base->sentbytes += nsent; + err = 0; + /* Add the timer to handle no reply condition in the given timeout */ + msecstotv(qry->opt_timeout, &tv_noreply); + if (!qry->response_in) + { + evtimer_add(&qry->noreply_timer, &tv_noreply); + } + if(qry->opt_qbuf) { + buf_init(&qry->qbuf, -1); + buf_add_b64(&qry->qbuf, outbuff, qry->pktsize, 0); + } + + } + else { + err = 1; + base->sendfail++; + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"senderror\" : \"AF %s, %s\"", qry->err.size ? ", " : "" + , strerror(errno) , qry->res->ai_family == AF_INET ? "AF_INET" :"NOT AF_INET"); + buf_add(&qry->err, line, strlen(line)); + } + } while ((qry->res = qry->res->ai_next) != NULL); + free (outbuff); + outbuff = NULL; + if(err) { + printReply (qry, 0, NULL); + return; + } + qry->qst = STATUS_WAIT_RESPONSE; + if (qry->response_in) + ready_callback(0, 0, qry); +} +static void done_qry_cb(int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h) { + struct query_state *qry = h; + qry->qst = STATUS_FREE; + // BLURT(LVL5 "query %s is done call done", qry->server_name); + qry->base->done(qry, 0); +} + +static void next_qry_cb(int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h) { + struct query_state *qry = h; + // BLURT(LVL5 "next query for %s", qry->server_name); + tdig_start(qry); +} + +/* The callback to handle timeouts due to destination host unreachable condition */ +static void noreply_callback(int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h) +{ + struct timeval asap = { 1, 1 }; + struct query_state *qry = h; + + qry->base->timeout++; + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"timeout\" : %d", qry->err.size ? ", " : "", qry->opt_timeout); + buf_add(&qry->err, line, strlen(line)); + + if (qry->response_in) + { + size_t tmp_len; + + tmp_len= 0; + + if (qry->resp_file) + { + read_response_file(qry->resp_file, RESP_TIMEOUT, + &tmp_len, NULL); + } + else + { + read_response(qry->udp_fd, RESP_TIMEOUT, + &tmp_len, NULL); + } + } + if (qry->response_out) + write_response(qry->resp_file, RESP_TIMEOUT, 0, NULL); + + //BLURT(LVL5 "AAA timeout for %s retry %d/%d ", qry->server_name, qry->retry, qry->opt_retry_max); + + if (qry->retry < qry->opt_retry_max) { + qry->retry++; + qry->qst = STATUS_RETRANSMIT_QUERY; + free_qry_inst(qry); + evtimer_add(&qry->next_qry_timer, &asap); + } else { + printReply (qry, 0, NULL); + } + + return; +} + + + +static void tcp_timeout_callback (int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + struct query_state * qry; + qry = ENV2QRY(s); + noreply_callback(0, 0, qry); +} + +static void tcp_reporterr(struct tu_env *env, enum tu_err cause, + const char *str) +{ + struct query_state * qry; + struct timeval asap = { 0, 0 }; + qry = ENV2QRY(env); + + // if (env != &state->tu_env) abort(); // Why do i need this? AA + + switch(cause) + { + case TU_DNS_ERR: + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TUDNS\" : \"%s\"", qry->err.size ? ", " : "", str ); + buf_add(&qry->err, line, strlen(line)); + break; + + case TU_SOCKET_ERR: + + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TUSOCKET\" : \"%s\"", qry->err.size ? ", " : "", str ); + buf_add(&qry->err, line, strlen(line)); + break; + + case TU_READ_ERR: + // need more than this reporting for this case AA + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TU_READ_ERR\" : \"%s\"", qry->err.size ? ", " : "", str ); + buf_add(&qry->err, line, strlen(line)); + break; + + case TU_CONNECT_ERR: + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TUCONNECT\" : \"%s\"", qry->err.size ? ", " : "", str ); + buf_add(&qry->err, line, strlen(line)); + //reconnect next one AA + break; + + case TU_OUT_OF_ADDRS: + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TU_OUT_OF_ADDRESS\" : \"%s\"", qry->err.size ? ", " : "", str ); + buf_add(&qry->err, line, strlen(line)); + break; + + case TU_BAD_ADDR: + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TU_BAD_ADDR\" : true", qry->err.size ? ", " : ""); + buf_add(&qry->err, line, strlen(line)); + + qry->dst_ai_family = env->dns_curr->ai_addr->sa_family; + getnameinfo(env->dns_curr->ai_addr, env->dns_curr->ai_addrlen, qry->dst_addr_str, INET6_ADDRSTRLEN , NULL, 0, NI_NUMERICHOST); + + qry->ressent = qry->res; /* Trigger printing of dst_addr */ + break; + + default: + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TU_UNKNOWN\" : \"%d %s\"", qry->err.size ? ", " : "", cause, str ); + crondlog(DIE9 "reporterr: bad cause %d", cause); + break; + } + if (qry->retry < qry->opt_retry_max) { + qry->retry++; + qry->qst = STATUS_RETRANSMIT_QUERY; + free_qry_inst(qry); + evtimer_add(&qry->next_qry_timer, &asap); + } else { + printReply (qry, 0, NULL); + } +} + +static void tcp_dnscount(struct tu_env *env, int count UNUSED_PARAM) +{ + struct query_state * qry; + qry = ENV2QRY(env); + qry->qst = STATUS_SEND; + // BLURT(LVL5 "dns count for %s : %d", qry->server_name , count); +} + +static void tcp_beforeconnect(struct tu_env *env, + struct sockaddr *addr, socklen_t addrlen) +{ + struct query_state * qry; + qry = ENV2QRY(env); + qry->xmit_time= atlas_time(); + qry->dst_ai_family = addr->sa_family; + // BLURT(LVL5 "time : %d", qry->xmit_time); + getnameinfo(addr, addrlen, qry->dst_addr_str, INET6_ADDRSTRLEN , NULL, 0, NI_NUMERICHOST); + + if (qry->response_out) + { + write_response(qry->resp_file, RESP_PEERNAME, addrlen, + addr); + } + + gettime_mono(&qry->xmit_time_ts); +} + +static void tcp_connected(struct tu_env *env, struct bufferevent *bev) +{ + uint16_t payload_len ; + u_char *outbuff; + u_char *wire; + struct query_state * qry; + qry = ENV2QRY(env); + + qry->loc_socklen= sizeof(qry->loc_sin6); + if (qry->response_in) + { + size_t tmp_len; + + tmp_len= sizeof(qry->loc_sin6); + read_response_file(qry->resp_file, RESP_SOCKNAME, + &tmp_len, &qry->loc_sin6); + qry->loc_socklen= tmp_len; + } + else + { + getsockname(bufferevent_getfd(bev), + (struct sockaddr *)&qry->loc_sin6, &qry->loc_socklen); + if (qry->response_out) + { + write_response(qry->resp_file, RESP_SOCKNAME, + qry->loc_socklen, &qry->loc_sin6); + } + } + + qry->bev_tcp = bev; + outbuff = xzalloc(MAX_DNS_OUT_BUF_SIZE); + bzero(outbuff, MAX_DNS_OUT_BUF_SIZE); + mk_dns_buff(qry, outbuff, MAX_DNS_OUT_BUF_SIZE); + payload_len = (uint16_t) qry->pktsize; + if (qry->opt_do_https) + { + qry->http2_env= http2_init(); + http2_dns(qry->http2_env, qry->bev_tcp, qry->sni_cert_name, + qry->port_as_char, + qry->https_path ? qry->https_path : "/dns-query", + outbuff, payload_len); + wire= NULL; + } + else + { + wire = xzalloc (payload_len + 4); + ldns_write_uint16(wire, qry->pktsize); + memcpy(wire + 2, outbuff, qry->pktsize); + if (!qry->response_in) + { + evbuffer_add(bufferevent_get_output(qry->bev_tcp), + wire, (qry->pktsize +2)); + } + qry->base->sentok++; + qry->base->sentbytes+= (qry->pktsize +2); + // BLURT(LVL5 "send %u bytes", payload_len ); + } + + if(qry->opt_qbuf) { + buf_init(&qry->qbuf, -1); + buf_add_b64(&qry->qbuf, outbuff, qry->pktsize, 0); + } + free(outbuff); + free(wire); + + gettime_mono(&qry->qxmit_time_ts); +} + +static void tcp_readcb(struct bufferevent *bev UNUSED_PARAM, void *ptr) +{ + struct query_state *qry = ptr; + int n, r, type; + u_char b2[2]; + struct timespec rectime; + struct evbuffer *input ; + struct DNS_HEADER *dnsR = NULL; + + + qry = ENV2QRY(ptr); + // BLURT(LVL5 "TCP readcb %s", qry->server_name ); + + if (qry->opt_do_https) + { + r= http2_dns_input(qry->http2_env, bev, http2_reply_cb, + ptr, qry->response_out ? https_write_response : 0, + qry->response_in ? https_read_response : 0); + if (r == -1) + { + snprintf(line, DEFAULT_LINE_LENGTH, + "%s \"TCPREAD\" : " + " \"http2_dns_input failed\"" + , qry->err.size ? ", " : ""); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + } + return; + } + + gettime_mono(&rectime); + + if( qry->packet.size && (qry->packet.size >= qry->wire_size)) { + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TCPREADSIZE\" : " + " \"red more bytes than expected %d, got %zu\"" + , qry->err.size ? ", " : "" + , qry->wire_size, qry->packet.size); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + return; + } + + bzero(qry->base->packet, MAX_DNS_BUF_SIZE); + + if (!qry->response_in) + input = bufferevent_get_input(bev); + else + input= NULL; /* lint */ + if(qry->wire_size == 0) { + if (qry->response_in) + { + size_t tmp_len; + + tmp_len= sizeof(b2); + + peek_response_file(qry->resp_file, &type); + if (type == RESP_TIMEOUT) + { + noreply_callback(0, 0, qry); + return; + } + read_response_file(qry->resp_file, + RESP_LENGTH, &tmp_len, b2); + if (tmp_len != sizeof(b2)) + { + crondlog( + DIE9 "tcp_readcb: error reading from '%s'", + qry->response_in); + } + n= tmp_len; + } + else + n = evbuffer_remove(input, b2, 2 ); + // printf("got %d bytes for response size\n", n); + if(n == 2){ + if (qry->response_out) + { + write_response(qry->resp_file, + RESP_LENGTH, sizeof(b2), b2); + } + qry->wire_size = ldns_read_uint16(b2); + buf_init(&qry->packet, -1); + } + else { + + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TCPREAD\" : \"expected 2 bytes and got %d\"", qry->err.size ? ", " : "", n ); + buf_add(&qry->err, line, strlen(line)); + } + } + + /* We need at least a header */ + if (qry->wire_size < sizeof(struct DNS_HEADER)) + { + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"TCPREADSIZE\" : " + " \"reply too small, got %zu\"" + , qry->err.size ? ", " : "" + , (size_t)qry->wire_size); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + return; + } + for (;;) { + if (qry->response_in) + { + size_t tmp_len; + + tmp_len= sizeof(line); + read_response_file(qry->resp_file, + RESP_DATA, &tmp_len, line); + n= tmp_len; + } + else + n = evbuffer_remove(input,line , DEFAULT_LINE_LENGTH ); + // printf("got %d bytes for data size\n", n); + if (n <= 0) + { + if (qry->response_in) + { + noreply_callback(0,0,qry); + } + break; + } + if (qry->response_out) + { + write_response(qry->resp_file, + RESP_DATA, n, line); + } + buf_add(&qry->packet, line, n); + // crondlog(LVL5 "in readcb %s %s got %d bytes, need %d", qry->str_Atlas, qry->server_name, qry->packet.size, qry->wire_size); + if(qry->wire_size == qry->packet.size) { + // crondlog(LVL5 "in readcb %s %s red %d bytes ", qry->str_Atlas, qry->server_name, qry->wire_size); + // crondlog(LVL5 "qry pointer address readcb %p qry.id, %d", qry->qryid); + // crondlog(LVL5 "DBG: base pointer address readcb %p", qry->base ); + dnsR = (struct DNS_HEADER*) qry->packet.buf; + if ( ntohs(dnsR->id) == qry->qryid ) { + qry->triptime = (rectime.tv_sec - + qry->xmit_time_ts.tv_sec)*1000 + + (rectime.tv_nsec - + qry->xmit_time_ts.tv_nsec)/1e6; + qry->querytime = (rectime.tv_sec - + qry->qxmit_time_ts.tv_sec)*1000 + + (rectime.tv_nsec - + qry->qxmit_time_ts.tv_nsec)/1e6; + printReply (qry, qry->packet.size, (unsigned char *)qry->packet.buf); + } + else { + bzero(line, DEFAULT_LINE_LENGTH); + snprintf(line, DEFAULT_LINE_LENGTH, " %s \"idmismatch\" : \"mismatch id from tcp fd %d\"", qry->err.size ? ", " : "", n); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + } + return; + } + } +} + +static void https_write_response(void *ref, void *buf, size_t len) +{ + struct query_state *qry; + + qry = ENV2QRY(ref); + + write_response(qry->resp_file, RESP_DATA, len, buf); +} + +static size_t https_read_response(void *ref, void *buf, size_t len) +{ + size_t tmp_len; + struct query_state *qry; + + qry = ENV2QRY(ref); + + tmp_len= len; + read_response_file(qry->resp_file, RESP_DATA, &tmp_len, buf); + return tmp_len; +} + +static void http2_reply_cb(void *ref, unsigned status, + u_char *data, size_t len) +{ + struct query_state *qry; + struct DNS_HEADER *dnsR = NULL; + struct timespec rectime; + + qry = ENV2QRY(ref); + + gettime_mono(&rectime); + + if (status != 200) + { + bzero(line, DEFAULT_LINE_LENGTH); + snprintf(line, DEFAULT_LINE_LENGTH, + " %s \"error\" : \"http2 request failed with %d\"", + qry->err.size ? ", " : "", status); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + return; + } + dnsR = (struct DNS_HEADER*) data; + if ( ntohs(dnsR->id) == qry->qryid ) { + qry->triptime = (rectime.tv_sec - + qry->xmit_time_ts.tv_sec)*1000 + + (rectime.tv_nsec - + qry->xmit_time_ts.tv_nsec)/1e6; + qry->querytime = (rectime.tv_sec - + qry->qxmit_time_ts.tv_sec)*1000 + + (rectime.tv_nsec - + qry->qxmit_time_ts.tv_nsec)/1e6; + printReply (qry, len, + (unsigned char *)data); + } + else { + bzero(line, DEFAULT_LINE_LENGTH); + snprintf(line, DEFAULT_LINE_LENGTH, + " %s \"idmismatch\" : \"mismatch id from http2\"", + qry->err.size ? ", " : ""); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + } +} + +static void tcp_writecb(struct bufferevent *bev UNUSED_PARAM, void *ptr UNUSED_PARAM) +{ + /* + struct query_state * qry; + qry = ENV2QRY(ptr); + */ + // BLURT(LVL5 "TCP writecb"); +} + + + +/* + * Called by libevent when the kernel says that the raw socket is ready for reading. + * + * It reads a packet from the wire and attempt to decode and relate DNS Request/Reply. + * + * To be legal the packet received must be: + * o of enough size (> DNS Header size) + * o the one we are looking for (matching the same identifier of all the packets the program is able to send) + */ + +static void process_reply(void * arg, int nrecv, struct timespec now, + struct msghdr *msgp) +{ + int rcvdttl; + struct DNS_HEADER *dnsR = NULL; + struct tdig_base * base; + struct query_state * qry; + struct cmsghdr *cmsgptr; + + qry= arg; + base= qry->base; + + rcvdttl= -42; + if (msgp) + { + for (cmsgptr= CMSG_FIRSTHDR(msgp); cmsgptr; + cmsgptr= CMSG_NXTHDR(msgp, cmsgptr)) + { + if (cmsgptr->cmsg_len == 0) + break; /* Can this happen? */ + if (cmsgptr->cmsg_level == IPPROTO_IP && + cmsgptr->cmsg_type == IP_TTL) + { + rcvdttl= *(int *)CMSG_DATA(cmsgptr); + continue; + } + if (cmsgptr->cmsg_level == IPPROTO_IPV6 && + cmsgptr->cmsg_type == IPV6_HOPLIMIT) + { + rcvdttl= *(int *)CMSG_DATA(cmsgptr); + continue; + } + fprintf(stderr, "process_reply: level %d, type %d\n", + cmsgptr->cmsg_level, cmsgptr->cmsg_type); + } + } + + if (nrecv < sizeof (struct DNS_HEADER)) { + base->shortpkt++; + return; + } + + dnsR = (struct DNS_HEADER*) base->packet; + base->recvok++; + + + // crondlog(LVL7 "DBG: base address process reply %p, nrec %d", base, nrecv); + if (ntohs(dnsR->id) != qry->qryid) + { + base->martian++; + crondlog(LVL7 "DBG: wrong id %d for qry, expected %d", + ntohs(dnsR->id), qry->qryid); + return; + } + + qry->base->recvbytes += nrecv; + qry->triptime = (now.tv_sec-qry->xmit_time_ts.tv_sec)*1000 + + (now.tv_nsec-qry->xmit_time_ts.tv_nsec)/1e6; + qry->rcvdttl= rcvdttl; + + /* Clean the noreply timer */ + evtimer_del(&qry->noreply_timer); + + if (qry->opt_cookies) + update_server_cookie(qry, base->packet, nrecv); + + printReply (qry, nrecv, base->packet); + return; +} + +static void update_server_cookie(struct query_state *qry, uint8_t *packet, + int packlen) +{ + int ind, optlen, optoff; + + /* Should wipe server cookie first. */ + fprintf(stderr, "update_server_cookie: should wipe server cookie\n"); + + /* Select server cookie */ + if (qry->opt_resolv_conf) + ind= qry->resolv_i; + else + ind= 0; + qry->cookie_state->server_cookies[ind].len= 0; + + optlen= get_edns_opt(&optoff, EDNS_OPT_COOKIE, packet, packlen); + if (optlen == -1) + { + return; + } + + /* We expect a server cookie */ + if (optlen < DNS_CLIENT_COOKIE_LEN + DNS_SERVER_COOKIE_MIN_LEN || + optlen > DNS_CLIENT_COOKIE_LEN + DNS_SERVER_COOKIE_MAX_LEN) + { + /* Wrong size */ + printf("update_server_cookie: wrong size\n"); + return; + } + + /* Check if client cookie matches */ + if (memcmp(qry->cookie_state->client_cookie, packet+optoff, + DNS_CLIENT_COOKIE_LEN) != 0) + { + qry->client_cookie_mismatch= 1; + /* Should report cookie mismatch */ + printf("update_server_cookie: client cookie mismatch\n"); + return; + } + + /* Should update server cookie */ + + qry->cookie_state->server_cookies[ind].len= + optlen-DNS_CLIENT_COOKIE_LEN;; + memcpy(qry->cookie_state->server_cookies[ind].cookie, + packet+optoff+DNS_CLIENT_COOKIE_LEN, + qry->cookie_state->server_cookies[ind].len); +} + +static int get_edns_opt(int *optoffp, int target_code, + uint8_t *packet, int packlen) +{ + int i, l, o, is_question, qcount, recs_to_skip; + int namelen, opt_code, opt_len, opt_o, rdata_o, rdlen, rr_o, type; + struct DNS_HEADER *dnsR = NULL; + + dnsR = (struct DNS_HEADER*) packet; + + qcount= ntohs(dnsR->q_count); + recs_to_skip= qcount + + ntohs(dnsR->ans_count ) + + ntohs(dnsR->ns_count) + + ntohs(dnsR->add_count); + o= sizeof(struct DNS_HEADER); + for (i= 0; i= recs_to_skip) + { + printf("get_edns_opt: no OPT RR\n"); + return -1; /* No OPT RR */ + } + opt_o= 0; + for (opt_o= 0; opt_o+4 <= rdlen;) + { + opt_code= (packet[rdata_o + opt_o] << 8) | + packet[rdata_o + opt_o + 1]; + opt_len= (packet[rdata_o + opt_o +2] << 8) | + packet[rdata_o + opt_o + 3]; + if (opt_o + 4 + opt_len > rdlen) + { + printf("get_edns_opt: no option header\n"); + return -1; + } + printf("get_edns_opt: code %d\n", opt_code); + if (opt_code == target_code) + { + *optoffp= rdata_o + opt_o + 4; + return opt_len; + } + opt_o += 4 + opt_len; + } + + /* Not found */ + return -1; +} + +static int get_rr_len(int *namelenp, uint8_t *packet, int offset, int len, + int question) +{ + int label_len, name_len, rdlength, rr_len; + + /* Handle name */ + name_len= 0; + for(;;) + { + if (offset+name_len >= len) + return -1; + label_len= packet[offset+name_len]; + if (label_len > 63) + { + if (label_len < 192) + return -1; /* bogus */ + name_len += 2; + break; + } + else if (label_len == 0) + { + name_len++; + break; + } + name_len += label_len+1; + } + + *namelenp= name_len; + + if (question) + return name_len + /*qtype*/ 2 + /*qclass*/ 2; + + rr_len= name_len + /*type*/ 2 + /*class*/ 2 + /*ttl*/ 4; + if (offset+rr_len+2 > len) + return -1; + rdlength= (packet[offset+rr_len] << 8) | packet[offset+rr_len+1]; + + rr_len += 2 + rdlength; + + return rr_len; +} + +static void ready_callback (int unused UNUSED_PARAM, const short event UNUSED_PARAM, void * arg) +{ + struct query_state * qry; + int nrecv; + struct timespec rectime; + struct msghdr msg; + struct iovec iov[1]; + struct sockaddr_in remote; + char cmsgbuf[256]; + + qry = arg; + + if (qry->response_in) + { + int type; + + peek_response(qry->udp_fd, &type); + if (type == RESP_TIMEOUT) + { + noreply_callback(0, 0, qry); + return; + } + } + + + /* Time the packet has been received */ + gettime_mono(&rectime); + + bzero(qry->base->packet, MAX_DNS_BUF_SIZE); + + iov[0].iov_base= qry->base->packet; + iov[0].iov_len= sizeof(qry->base->packet); + msg.msg_name= &remote; + msg.msg_namelen= sizeof(remote); + msg.msg_iov= iov; + msg.msg_iovlen= 1; + msg.msg_control= cmsgbuf; + msg.msg_controllen= sizeof(cmsgbuf); + msg.msg_flags= 0; /* Not really needed */ + + /* Receive data from the network */ + if (qry->response_in) + { + size_t tmp_len; + + tmp_len= sizeof(qry->base->packet); + read_response(qry->udp_fd, RESP_PACKET, &tmp_len, + qry->base->packet); + nrecv= tmp_len; + + tmp_len= sizeof(remote); + read_response(qry->udp_fd, RESP_PEERNAME, &tmp_len, + &remote); + if (tmp_len != sizeof(remote)) + { + crondlog( + DIE9 "ready_callback: error reading from '%s'", + qry->response_in); + } + + tmp_len= sizeof(cmsgbuf); + read_response(qry->udp_fd, RESP_CMSG, &tmp_len, cmsgbuf); + msg.msg_controllen= tmp_len; + } + else + { + nrecv= recvmsg(qry->udp_fd, &msg, MSG_DONTWAIT); + } + if (nrecv < 0) { + /* One more failure */ + qry->base->recvfail++; + return ; + } + if (qry->response_out) + { + write_response(qry->resp_file, RESP_PACKET, nrecv, + qry->base->packet); + write_response(qry->resp_file, RESP_PEERNAME, sizeof(remote), + &remote); + write_response(qry->resp_file, RESP_CMSG, msg.msg_controllen, + cmsgbuf); + } + + process_reply(arg, nrecv, rectime, &msg); + return; +} + +static bool argProcess (int argc, char *argv[], struct query_state *qry ) +{ + char *validated_fn= NULL; + + if( qry->opt_resolv_conf) { + qry->resolv_i = 0; + } + else if (optind != argc-1) { + crondlog(LVL9 "ERROR no server IP address in input"); + tdig_delete(qry); + return TRUE; + } + else + qry->server_name = strdup(argv[optind]); + + if (qry->macro_lookupname == NULL) { + crondlog(LVL9 "ERROR no query in command line"); + tdig_delete(qry); + return TRUE; + } + + if (qry->macro_lookupname[strlen(qry->macro_lookupname) - 1] != '.') { + crondlog(LVL9 "ERROR query %s does not end with a dot ", + qry->macro_lookupname); + tdig_delete(qry); + return TRUE; + } + + if (qry->out_filename) + { + validated_fn= rebased_validated_filename(ATLAS_SPOOLDIR, + qry->out_filename, SAFE_PREFIX_REL); + if (validated_fn == NULL) + { + crondlog(LVL8 "insecure file '%s'", qry->out_filename); + tdig_delete(qry); + return TRUE; + } + free(qry->out_filename); + qry->out_filename= validated_fn; validated_fn= NULL; + } + if (qry->response_in) + { + validated_fn= rebased_validated_filename(ATLAS_SPOOLDIR, + qry->response_in, ATLAS_FUZZING_REL); + if (!validated_fn) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + qry->response_in); + tdig_delete(qry); + return TRUE; + } + free(qry->response_in); + qry->response_in= validated_fn; validated_fn= NULL; + } + if (qry->response_out) + { + validated_fn= rebased_validated_filename(ATLAS_SPOOLDIR, + qry->response_out, ATLAS_FUZZING_REL); + if (!validated_fn) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + qry->response_out); + tdig_delete(qry); + return TRUE; + } + free(qry->response_out); + qry->response_out= validated_fn; validated_fn= NULL; + } + + if(qry->opt_v6_only == 0) + { + qry->opt_v4_only = 1; + qry->opt_AF = AF_INET; + } + +#if ENABLE_FEATURE_EVTDIG_TLS + if (qry->opt_do_tls || qry->opt_do_https) + { + qry->opt_proto = 6; /* switch to TCP for TLS and HTTPS */ + } +#endif + + if (qry->port_as_char == NULL) + { +#if ENABLE_FEATURE_EVTDIG_TLS + if (qry->opt_do_https) + { + qry->port_as_char = strdup("443"); + } + else if (qry->opt_do_tls) + { + qry->port_as_char = strdup("853"); + } + else +#endif + { + qry->port_as_char = strdup("53"); + } + } + return FALSE; +} + +/* this called for each query/line in eperd */ +static void *tdig_init(int argc, char *argv[], + void (*done)(void *state, int error)) +{ + char *check; + struct query_state *qry; + int c, r, fd; + + if (!tdig_base) + tdig_base = tdig_base_new(EventBase); + + if (!tdig_base) { + crondlog(LVL8 "tdig_base_new failed"); + return NULL; + } + + tdig_base->done = done; + + qry=xzalloc(sizeof(*qry)); + + // initialize per query state variables; + qry->qtype = T_TXT; /* TEXT */ + qry->qclass = C_CHAOS; + qry->opt_v4_only = 0; + qry->opt_v6_only = 0; + qry->str_Atlas = NULL; + qry->str_bundle = NULL; + qry->out_filename = NULL; + qry->opt_proto = 17; + qry->cookie_state = NULL; + qry->udp_fd = -1; + qry->server_name = NULL; + qry->infname = NULL; + tdig_base->activeqry++; + qry->qst = STATUS_FREE; + qry->retry = 0; + qry->resolv_i = 0; + qry->opt_retry_max = DEFAULT_RETRY_MAX; + qry->wire_size = 0; + qry->triptime = 0; + qry->opt_edns0 = 512; + qry->opt_edns_version = 0; + qry->opt_edns_flags = 0; + qry->opt_edns_option = 0; + qry->opt_ipv6_dest_option = 0; + qry->opt_dnssec = 0; + qry->opt_nsid = 0; + qry->opt_client_subnet = 0; + qry->opt_cookies = 0; + qry->opt_qbuf = 0; + qry->opt_abuf = 1; + qry->opt_rd = 0; + qry->opt_ad = 0; + qry->opt_cd = 0; + qry->opt_evdns = 0; + qry->opt_rset = 0; + qry->opt_prepend_probe_id = 0; + qry->ressave = NULL; + qry->ressent = NULL; + buf_init(&qry->err, -1); + buf_init(&qry->packet, -1); + qry->opt_resolv_conf = 0; + qry->macro_lookupname = NULL; + qry->lookupname = NULL; + qry->port_as_char = NULL; + qry->dst_ai_family = 0; + qry->loc_ai_family = 0; + qry->loc_sin6.sin6_family = 0; + qry->result.offset = qry->result.size = qry->result.maxsize= 0; + qry->result.buf = NULL; + qry->rcvdttl= -42; + qry->opt_query_arg = 0; + qry->opt_timeout= DEFAULT_NOREPLY_TIMEOUT; + qry->opt_do_tls = 0; + qry->opt_do_ttl = 0; + qry->opt_do_https = 0; + qry->sni_cert_name = NULL; + qry->https_path = NULL; + qry->resp_file= NULL; + qry->http2_env= NULL; + + /* initialize callbacks : */ + /* sendpacket called by UDP send */ + evtimer_assign(&qry->nsm_timer, tdig_base->event_base, + tdig_send_query_callback, qry); + /* no reply timeout for udp queries */ + evtimer_assign(&qry->noreply_timer, tdig_base->event_base, + noreply_callback, qry); + + /* callback/timer used for restarting query by --resolve */ + evtimer_assign(&qry->next_qry_timer, tdig_base->event_base, next_qry_cb, qry); + evtimer_assign(&qry->done_qry_timer, tdig_base->event_base, done_qry_cb, qry); + + optind = 0; + while (c= getopt_long(argc, argv, "46adD:e:tbhinqO:Rrs:A:B:I:?", longopts, NULL), c != -1) { + switch(c) { + case '1': + qry->opt_edns_version= + strtoul(optarg, &check, 10); + break; + case '2': + qry->opt_edns_flags= + strtoul(optarg, &check, 0); + break; + case '3': + qry->opt_edns_option= + strtoul(optarg, &check, 0); + break; + + case '4': + qry->opt_v4_only = 1; + qry->opt_AF = AF_INET; + break; + + case '5': + qry->opt_ipv6_dest_option= + strtoul(optarg, &check, 10); + break; + + case '6': + qry->opt_v6_only = 1; + qry->opt_AF = AF_INET6; + break; + + case 'a': + qry->opt_v6_only = 1; + qry->opt_v4_only = 1; + break; + + case 'A': + if (!validate_atlas_id(optarg)) + { + crondlog(LVL8 "bad atlas ID '%s'", + optarg); + tdig_delete(qry); + return NULL; + } + qry->str_Atlas = strdup(optarg); + break; + + case 'B': + if (!validate_atlas_id(optarg)) + { + crondlog(LVL8 "bad bundle ID '%s'", + optarg); + tdig_delete(qry); + return NULL; + } + qry->str_bundle= strdup(optarg); + break; + + case 'b': + qry->macro_lookupname = + strdup ("version.bind."); + break; + + case 'c': + qry->opt_client_subnet = 1; + break; + case 'C': + qry->opt_cookies = 1; + break; + + case 'd': + qry->opt_dnssec = 1; + break; + + case 'e': + qry->opt_edns0= strtoul(optarg, &check, 10); + break; + + case 'h': + qry->macro_lookupname = + strdup("hostname.bind."); + break; + + case 'i': + qry->macro_lookupname = strdup("id.server."); + break; + case 'I': + free(qry->infname); + qry->infname= strdup(optarg); + break; + + case 'n': + qry->opt_nsid = 1; + break; + + case 'p': + qry->port_as_char = strdup(optarg); + break; + + case 'O': + qry->out_filename = strdup(optarg); + break; + + case 'r': + qry->macro_lookupname = + strdup("version.server."); + break; + + case 'R': + qry->opt_rd = 1; + break; + + case 's': + qry->qtype = T_SOA; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case 't': + qry->opt_proto = 6; + break; + + case 'T' : + qry->opt_timeout = strtoul(optarg, NULL, 10); + break; + + case 1001: + qry->opt_qbuf = 1; + break; + + case 1002: + qry->opt_abuf = 0; + break; + + +#if ENABLE_FEATURE_EVTDIG_TLS + case O_TLS: + qry->opt_do_tls = 1; + break; +#endif + + case O_TTL: + qry->opt_do_ttl = 1; + break; + + case O_HTTPS: + qry->opt_do_https = 1; + break; + + case O_SNI_CERT_NAME: + qry->sni_cert_name = strdup(optarg); + break; + + case O_HTTPS_PATH: + qry->https_path = strdup(optarg); + break; + + case O_TYPE: + qry->qtype = strtoul(optarg, &check, 10); + if ((qry->qtype >= 0 ) && + (qry->qclass < 65536)) { + + if (! qry->qclass ) + qry->qclass = C_IN; + + break; + } + else { + fprintf(stderr, "ERROR unknown Q " + "--typae %s ??. 0 - " + "65535\n", optarg); + tdig_delete(qry); + return (0); + } + break; + + case O_AD: + qry->opt_ad = 1; + break; + + case O_CD: + qry->opt_cd = 1; + break; + + case O_CLASS: + qry->qclass = strtoul(optarg, &check, 10); + if ((qry->qclass >= 0 ) && + (qry->qclass < 65536)) { + break; + } + else { + fprintf(stderr, "ERROR unknown Q class" + " --class %s ??. 0 - " + "65535\n", optarg); + tdig_delete(qry); + return (0); + } + + case O_RETRY : + qry->opt_query_arg = 1; + qry->opt_retry_max = strtoul(optarg, NULL, 10); + break; + + case O_QUERY: + qry->opt_query_arg = 1; + qry->macro_lookupname = strdup(optarg); + break; + + case O_RESOLV_CONF : + qry->opt_resolv_conf = 1; + qry->opt_v6_only = 1; + qry->opt_v4_only = 1; + qry->opt_rset = 1; + break; + + case O_PREPEND_PROBE_ID: + qry->opt_prepend_probe_id = 1; + break; + + case O_EVDNS: + qry->opt_evdns = 1; + break; + + case (100000 + T_A): + qry->qtype = T_A; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_AAAA ): + qry->qtype = T_AAAA ; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_AFSDB ): + qry->qtype = T_AFSDB ; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_ANY): + qry->qtype = T_ANY ; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_APL): + qry->qtype = T_APL ; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_AXFR ): + qry->qtype = T_AXFR ; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_CAA): + qry->qtype = T_CAA ; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_CERT): + qry->qtype = T_CERT ; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_CNAME): + qry->qtype = T_CNAME; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_DLV): + qry->qtype = T_DLV; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_DNAME): + qry->qtype = T_DNAME; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_DNSKEY): + qry->qtype = T_DNSKEY; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_DS): + qry->qtype = T_DS; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_IPSECKEY): + qry->qtype = T_IPSECKEY; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_LOC): + qry->qtype = T_LOC; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_MX): + qry->qtype = T_MX; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_NAPTR): + qry->qtype = T_NAPTR; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_NS): + qry->qtype = T_NS; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_NSEC): + qry->qtype = T_NSEC; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_NSEC3): + qry->qtype = T_NSEC3; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_NSEC3PARAM): + qry->qtype = T_NSEC3PARAM; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_PTR): + qry->qtype = T_PTR; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_RRSIG): + qry->qtype = T_RRSIG; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_RP): + qry->qtype = T_RP; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_SIG): + qry->qtype = T_SIG; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_SPF): + qry->qtype = T_SPF; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_SRV): + qry->qtype = T_SRV; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_SSHFP): + qry->qtype = T_SSHFP; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_TA): + qry->qtype = T_TA; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_TLSA): + qry->qtype = T_TLSA; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_TSIG): + qry->qtype = T_TSIG; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case (100000 + T_TXT): + qry->qtype = T_TXT; + qry->qclass = C_IN; + qry->macro_lookupname = strdup(optarg); + break; + + case 200000 + 'W': + if (qry->response_out) free(qry->response_out); + qry->response_out= strdup(optarg); + break; + + case 200000 + 'R': + if (qry->response_in) free(qry->response_in); + qry->response_in= strdup(optarg); + break; + + default: + fprintf(stderr, "ERROR unknown option %d ??\n", c); + tdig_delete(qry); + return (0); + break; + } + } + + if(argProcess(argc, argv, qry)) + return NULL; + + qry->opt_evdns = 1; /* Always enabled, leave the old code in + * place for now. + */ + + if (qry->opt_cookies) + { + qry->cookie_state= malloc(sizeof(*qry->cookie_state)); + memset(qry->cookie_state, '\0', sizeof(*qry->cookie_state)); + + /* Get secret */ + fd= open("/dev/urandom", O_RDONLY); + if (fd == -1) + { + crondlog(LVL8 "unable to open /dev/urandom"); + return NULL; + } + r= read(fd, qry->cookie_state->client_secret, + sizeof(qry->cookie_state->client_secret)); + close(fd); + if (r != sizeof(qry->cookie_state->client_secret)) + { + crondlog(LVL8 "unable to read from /dev/urandom"); + return NULL; + } + } + + qry->base = tdig_base; + + /* insert this qry into the list of queries */ + if (!tdig_base->qry_head) { + qry->next = qry->prev = qry; + tdig_base->qry_head = qry; + tdig_stats( 0, 0, tdig_base); // call this first time to initial values. + // crondlog(LVL7 "new head qry %s qry->prev %s qry->next %s", qry->str_Atlas, qry->prev->str_Atlas, qry->next->str_Atlas); + } + else { + crondlog(LVL7 "old head hea %s hea->prev %s hea->next %s", tdig_base->qry_head->str_Atlas, tdig_base->qry_head->prev->str_Atlas, tdig_base->qry_head->next->str_Atlas); + if (tdig_base->qry_head->prev == tdig_base->qry_head) { + tdig_base->qry_head->prev = qry; + crondlog(LVL7 "head->prev == head quereis %d AA", tdig_base->activeqry); + } + qry->next = tdig_base->qry_head->next; + qry->prev = tdig_base->qry_head; + tdig_base->qry_head->next->prev = qry; + tdig_base->qry_head->next = qry; + crondlog(LVL7 " qry %s qry->prev %s qry->next %s", qry->str_Atlas, qry->prev->str_Atlas, qry->next->str_Atlas); + crondlog(LVL7 "new head hea %s hea->prev %s hea->next %s", tdig_base->qry_head->str_Atlas, tdig_base->qry_head->prev->str_Atlas, tdig_base->qry_head->next->str_Atlas); + } + + return qry; +} + + + + +/* called only once. Initialize tdig_base variables here */ +struct tdig_base * tdig_base_new(struct event_base *event_base) +{ + struct timeval tv; + + tdig_base= xzalloc(sizeof( struct tdig_base)); + if (tdig_base == NULL) + { + return (NULL); + } + + tdig_base->qry_head = NULL; + tdig_base->sendfail = 0; + tdig_base->sentok = 0; + tdig_base->recvfail = 0; + tdig_base->recvok = 0; + tdig_base->martian = 0; + tdig_base->shortpkt = 0; + tdig_base->sentbytes = 0; + tdig_base->recvbytes = 0; + tdig_base->timeout = 0; + tdig_base->activeqry = 0; + + memset(tdig_base, 0, sizeof(struct tdig_base)); + tdig_base->event_base = event_base; + + //memset(&tdig_base-->loc_sin6, '\0', sizeof(tdig_base-->loc_sin6)); + //tdig_base-->loc_socklen= 0; + + evtimer_assign(&tdig_base->statsReportEvent, tdig_base->event_base, tdig_stats, tdig_base); + + // Define the callback to handle UDP Reply + // add the raw file descriptor to those monitored for read events + + tv.tv_sec = DEFAULT_STATS_REPORT_INTERVEL; + tv.tv_usec = 0; + event_add(&tdig_base->statsReportEvent, &tv); + + return tdig_base; +} + +static void udp_dns_cb(int err, struct evutil_addrinfo *ev_res, void *arg) { + struct query_state *qry; + qry= arg; + if (err) { + snprintf(line, DEFAULT_LINE_LENGTH, "\"evdns_getaddrinfo\": " + "\"%s %s\"", qry->server_name, + evutil_gai_strerror(err)); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + return ; + + } + else { + qry->res = ev_res; + qry->ressave = ev_res; + qry->qst = STATUS_SEND; + tdig_send_query_callback(0, 0, qry); + } +} + +void tdig_start (void *arg) +{ + struct timeval asap = { 0, 0 }; + struct timeval interval; + + int i, err_num; + size_t len; + struct query_state *qry; + struct addrinfo hints, *res; + char port[] = "domain"; + struct sockaddr_in6 sin6; + char strbuf[256]; + + qry= arg; + + qry->client_cookie_mismatch= false; + + switch(qry->qst) + { + case STATUS_FREE : + /* Macro processing */ + if (qry->lookupname) + { + free(qry->lookupname); + qry->lookupname= NULL; + } + qry->lookupname= + atlas_name_macro(qry->macro_lookupname); + if (qry->lookupname == NULL) + { + snprintf(line, DEFAULT_LINE_LENGTH, + "\"err\": \"macro expansion failed\""); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + return; + } + + /* Get time in case we don't send any packet */ + qry->xmit_time= atlas_time(); + qry->resolv_i = 0; + // crondlog(LVL5 "RESOLV QUERY FREE %s resolv_max %d", qry->server_name, qry->resolv_max); + if( qry->opt_resolv_conf) { + if (qry->response_in) + { + if (qry->udp_fd == -1) + { + qry->udp_fd= + open(qry->response_in, + O_RDONLY); + if (qry->udp_fd == -1) + { + crondlog( + DIE9 "unable to open '%s': %s", + qry->response_in, + strerror(errno)); + } + } + len= sizeof(qry->resolv_max); + read_response(qry->udp_fd, + RESP_N_RESOLV, &len, + &qry->resolv_max); + if (len != sizeof(qry->resolv_max)) + { + crondlog( + DIE9 "tdig_start: error reading from '%s'", + qry->response_in); + } + for (i= 0; iresolv_max; i++) + { + len= sizeof(strbuf)-1; + read_response(qry->udp_fd, + RESP_RESOLVER, &len, + strbuf); + strbuf[len]= '\0'; + qry->nslist[i]= strdup(strbuf); + } + } + else + { + get_local_resolvers (qry->nslist, + &qry->resolv_max, + qry->infname); + } + // crondlog(LVL5 "AAA RESOLV QUERY FREE %s resolv_max %d %d", qry->server_name, qry->resolv_max, qry->resolv_i); + if (qry->response_out) + { + if (!qry->resp_file) + { + qry->resp_file= + fopen(qry->response_out, + "w"); + if (!qry->resp_file) + { + crondlog( + DIE9 "unable to write to '%s'", + qry-> + response_out); + } + } + + write_response(qry->resp_file, + RESP_N_RESOLV, + sizeof(qry->resolv_max), + &qry->resolv_max); + for (i= 0; iresolv_max; i++) + { + write_response(qry->resp_file, + RESP_RESOLVER, + strlen(qry->nslist[i]), + qry->nslist[i]); + } + } + if(qry->resolv_max ) { + free(qry->server_name); + qry->server_name = NULL; + qry->server_name = + strdup(qry->nslist + [qry->resolv_i]); + } + else { + crondlog(LVL5 "AAA RESOLV QUERY FREE %s resolv_max is zero %d i %d", qry->server_name, qry->resolv_max, qry->resolv_i); + free(qry->server_name); + qry->server_name = NULL; + snprintf(line, DEFAULT_LINE_LENGTH, "\"nameserver\": \"no local resolvers found\""); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + return; + } + } + break; + + case STATUS_NEXT_QUERY : + case STATUS_RETRANSMIT_QUERY: + break; + default: + printErrorQuick(qry); + return ; + } + + bzero(&hints, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_flags = 0; + hints.ai_socktype = SOCK_DGRAM; + hints.ai_flags = 0; + + qry->xmit_time= atlas_time(); + qry->qst = STATUS_DNS_RESOLV; + + if(qry->opt_v6_only == 1) + { + hints.ai_family = AF_INET6; + } + else if(qry->opt_v4_only == 1) + { + hints.ai_family = AF_INET; + } + + if( (qry->opt_v4_only == 1 ) && (qry->opt_v6_only == 1) ) + { + hints.ai_family = AF_UNSPEC; + } + + if (qry->response_out && !qry->resp_file) + { + qry->resp_file= fopen(qry->response_out, "w"); + if (!qry->resp_file) + { + crondlog(DIE9 "unable to write to '%s'", + qry->response_out); + } + } + + if(qry->opt_proto == 17) { //UDP + if (qry->response_in) + { + qry->res = NULL; + qry->ressave = NULL; + qry->qst = STATUS_SEND; + tdig_send_query_callback(0, 0, qry); + } + else if(qry->opt_evdns ) { + // use EVDNS asynchronous call + evdns_getaddrinfo(DnsBase, qry->server_name, qry->port_as_char, &hints, udp_dns_cb, qry); + } + else { + // using getaddrinfo; blocking call + if ( ( err_num = getaddrinfo(qry->server_name, port , &hints, &res))) + { + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"getaddrinfo\": \"port %s, AF %d %s\"", qry->err.size ? ", " : "", port, hints.ai_family, gai_strerror(err_num)); + buf_add(&qry->err, line, strlen(line)); + printReply (qry, 0, NULL); + return ; + } + + qry->res = res; + qry->ressave = res; + + evtimer_add(&qry->nsm_timer, &asap); + qry->qst = STATUS_SEND; + } + } + else { // TCP Query + + qry->wire_size = 0; + // crondlog(LVL5 "TCP QUERY %s", qry->server_name); + interval.tv_sec = CONN_TO; + interval.tv_usec= 0; + + if (qry->response_in) + { + size_t len; + + qry->resp_file= fopen(qry->response_in, "r"); + if (!qry->resp_file) + { + crondlog(DIE9 "unable to read from '%s'", + qry->response_in); + } + + len= sizeof(sin6); + read_response_file(qry->resp_file, RESP_PEERNAME, + &len, &sin6); + tcp_beforeconnect(&qry->tu_env, + (struct sockaddr *)&sin6, len); + tcp_connected(&qry->tu_env, NULL); + tcp_writecb(NULL, &qry->tu_env); + while(qry->resp_file != NULL) + tcp_readcb(NULL, &qry->tu_env); + // report(qry); + } + else + { + tu_connect_to_name (&qry->tu_env, qry->server_name, + qry->opt_do_tls, qry->opt_do_https, + qry->port_as_char, + &interval, &hints, qry->infname, + qry->sni_cert_name, + qry->sni_cert_name, + tcp_timeout_callback, tcp_reporterr, + tcp_dnscount, tcp_beforeconnect, + tcp_connected, tcp_readcb, tcp_writecb); + } + + } + return ; +} + +#if 0 +int tdig_base_count_queries(struct tdig_base *base) +{ + const struct query_state *qry; + int n = 0; + + qry = base->qry_head; + if (!qry) + return 0; + do { + ++n; + qry = qry->next; + } while (qry != base->qry_head); + + return n; +} + +#endif + +static void tdig_stats(int unusg_statsed UNUSED_PARAM, const short event UNUSED_PARAM, void *h) +{ + struct timeval now; + struct timeval interval; + FILE *fh; + struct tdig_base *base; + struct query_state *qry_h; + struct query_state *qry; + + base = h; + if(!base->qry_head ) + return; + + qry_h = base->qry_head; + + if(! base->sentok ) + return; + + + if(qry_h->base->done) { + interval.tv_sec = DEFAULT_STATS_REPORT_INTERVEL; + interval.tv_usec = 0; + event_add(&tdig_base->statsReportEvent, &interval); + return; + } + + if (qry_h->out_filename) { + fh= fopen(qry_h->out_filename, "a"); + if (!fh) { + crondlog(LVL8 "evtdig: unable to append to '%s'", qry_h->out_filename); + return; + } + } + else + fh = stdout; + + qry=xzalloc(sizeof(*qry)); + + AS("RESULT { "); + JS(id, "9201" ); + AS(atlas_get_version_json_str()); + AS(", "); + gettimeofday(&now, NULL); + JS1(time, %llu, (unsigned long long)now.tv_sec); + JU(sok , base->sentok); + JU(rok , base->recvok); + JU(sent , base->sentbytes); + JU(recv , base->recvbytes); + JU(serr , base->sendfail); + JU(rerr , base->recvfail); + JU(timeout , base->timeout); + JU(short , base->shortpkt); + JU(martian, base->martian); + JU_NC(q, base->activeqry); + + AS(" }\n"); + fwrite(qry->result.buf, qry->result.size, 1 , fh); + if (qry_h->out_filename) + fclose (fh); + + buf_cleanup(&qry->result); + free(qry); + + interval.tv_sec = DEFAULT_STATS_REPORT_INTERVEL; + interval.tv_usec = 0; + event_add(&tdig_base->statsReportEvent, &interval); +} + + +/* Convert a string into DNS format. This is for a query so no compression. + * DNS format is a length byte followed by the contents of a label. We + * can assume that length of the DNS format is one larger than the original + * string because of the dots (except for just '.' where the length is the + * same). + */ +static int ChangetoDnsNameFormat(u_char *dns, size_t maxlen, char* qry) +{ + size_t qrylen, labellen; + char *src, *e; + u_char *dst; + + qrylen= strlen(qry); + + if (qrylen+1 > maxlen) + { + // printf("ChangetoDnsNameFormat: name too long\n"); + return -1; /* Doesn't fit */ + } + + if (strcmp(qry, ".") == 0) + { + /* This doesn't fit in our regular schedule */ + dns[0]= 0; + return 1; + } + + src= qry; + dst= dns; + for (; src[0] != '\0' && dst < dns+maxlen;) + { + e= strchr(src, '.'); + if (e == NULL) + { + // printf("ChangetoDnsNameFormat: no trailing dot\n"); + return -1; /* qry does not end in a '.' */ + } + + labellen= e-src; + if (labellen > 63) + { + // printf("ChangetoDnsNameFormat: label too long\n"); + return -1; /* Can't do more than 63 */ + } + if (labellen == 0) + { + // printf("ChangetoDnsNameFormat: empty label\n"); + return -1; /* Take care of lonely '.' earlier */ + } + + *dst= labellen; + dst++; + memcpy(dst, src, labellen); + src= e+1; + dst += labellen; + } + + /* End, at a trailing null label */ + *dst= 0; + dst++; + + return dst-dns; +} + + +static void free_qry_inst(struct query_state *qry) +{ + int i; + struct timeval asap = { 1, 0 }; + // BLURT(LVL5 "freeing instance of %s ", qry->server_name); + + if (qry->response_in) + { + asap.tv_sec= 0; + asap.tv_usec= 1; + } + + if(qry->err.size) + { + buf_cleanup(&qry->err); + } + if(qry->qbuf.size) + buf_cleanup(&qry->qbuf); + + if(qry->ressave && qry->opt_evdns) { + evutil_freeaddrinfo(qry->ressave); + qry->ressave = NULL; + qry->ressent = NULL; + } + else if (qry->ressave ) + { + freeaddrinfo(qry->ressave); + qry->ressave = NULL; + qry->ressent = NULL; + } + qry->wire_size = 0; + + if(qry->packet.size) + { + buf_cleanup(&qry->packet); + } + + if(qry->opt_proto == 6) + { + if (!qry->response_in) + tu_cleanup(&qry->tu_env); + } + + if (qry->udp_fd != -1) + { + if (qry->response_in && qry->resolv_i < qry->resolv_max) + { + /* Keep input open */ + } + else + { + event_del(&qry->event); + close(qry->udp_fd); + qry->udp_fd= -1; + } + } + + if ( qry->opt_resolv_conf) { + // this loop goes over servers in /etc/resolv.conf + // select the next server and restart + if(qry->resolv_i < qry->resolv_max) { + if(qry->server_name) { + free (qry->server_name); + qry->server_name = NULL; + } + if (qry->nslist[qry->resolv_i] == NULL) + { + crondlog(DIE9 "free_qry_inst: qry %p, no resolver at index %d, max %d", qry, qry->resolv_i, qry->resolv_max); + } + qry->server_name = strdup(qry->nslist[qry->resolv_i]); + qry->qst = STATUS_NEXT_QUERY; + evtimer_add(&qry->next_qry_timer, &asap); + return; + } + } + + for (i= 0; inslist[i]) + { + free(qry->nslist[i]); + qry->nslist[i]= NULL; + } + } + + if (qry->http2_env) + { + http2_free(qry->http2_env); + qry->http2_env= NULL; + } + + switch(qry->qst){ + case STATUS_RETRANSMIT_QUERY: + break; + + default: + qry->qst = STATUS_FREE; + if(qry->base->done) { + evtimer_add(&qry->done_qry_timer, &asap); + } + if (qry->resp_file) + { + fclose(qry->resp_file); + qry->resp_file= NULL; + } + break; + } +} + + +static int tdig_delete(void *state) +{ + int i; + struct query_state *qry; + + qry = state; + + if (qry->qst ) + return 0; + + if (qry->cookie_state) + { + free(qry->cookie_state); + qry->cookie_state= NULL; + } + if(qry->out_filename) + { + free(qry->out_filename); + qry->out_filename = NULL ; + } + if(qry->macro_lookupname) + { + free(qry->macro_lookupname); + qry->macro_lookupname = NULL; + } + if(qry->lookupname) + { + free(qry->lookupname); + qry->lookupname = NULL; + } + if(qry->infname) + { + free(qry->infname); + qry->infname = NULL; + } + if (qry->port_as_char) + { + free(qry->port_as_char); + qry->port_as_char = NULL; + } + if (qry->sni_cert_name) + { + free(qry->sni_cert_name); + qry->sni_cert_name= NULL; + } + if (qry->https_path) + { + free(qry->https_path); + qry->https_path = NULL; + } + + /* Delete timers */ + evtimer_del(&qry->noreply_timer); + evtimer_del(&qry->nsm_timer); + + if((qry->next == qry->prev) && (qry->next == qry)) { + qry->base->qry_head = NULL; + crondlog(LVL7 "deleted last query qry %s", qry->str_Atlas); + } + else { +#if ENABLE_FEATURE_EVTDIG_DEBUG + crondlog(LVL7 "deleted qry %s qry->prev %s qry->next %s qry_head %s", qry->str_Atlas, qry->prev->str_Atlas, qry->next->str_Atlas, qry->base->qry_head->str_Atlas); + crondlog(LVL7 "old qry->next->prev %s qry->prev->next %s", qry->next->prev->str_Atlas, qry->prev->next->str_Atlas); +#endif + if(qry->next) + qry->next->prev = qry->prev; + if(qry->prev) + qry->prev->next = qry->next; + if(qry->base && qry->base->qry_head == qry) + qry->base->qry_head = qry->next; + +#if ENABLE_FEATURE_EVTDIG_DEBUG + crondlog(LVL7 "new qry->next->prev %s qry->prev->next %s", qry->next->prev->str_Atlas, qry->prev->next->str_Atlas); +#endif + } + if( qry->str_Atlas) + { + free( qry->str_Atlas); + qry->str_Atlas = NULL; + } + if( qry->str_bundle) + { + free( qry->str_bundle); + qry->str_bundle = NULL; + } + if(qry->server_name) + { + free(qry->server_name); + qry->server_name = NULL; + } + for (i= 0; inslist[i]) + { + free(qry->nslist[i]); + qry->nslist[i]= NULL; + } + } + if (qry->udp_fd != -1) + { + event_del(&qry->event); + close(qry->udp_fd); + qry->udp_fd= -1; + } + if(qry->base) + qry->base->activeqry--; + free(qry); + qry = NULL; + return 1; +} + +void printErrorQuick (struct query_state *qry) +{ + FILE *fh; + if (qry->out_filename) + { + fh= fopen(qry->out_filename, "a"); + if (!fh){ + crondlog(LVL8 "evtdig: unable to append to '%s'", + qry->out_filename); + return; + } + } + else + fh = stdout; + + fprintf(fh, "RESULT { "); + fprintf(fh, "%s,", atlas_get_version_json_str()); + fprintf(fh, "\"id\" : 9202 ,"); + fprintf(fh, "\"time\" : %llu ,", (unsigned long long)atlas_time()); + + fprintf(fh, "\"error\" : [{ "); + fprintf(fh, "\"query busy\": \"not starting a new one. previous one is not done yet\"}"); + if(qry->str_Atlas) + { + fprintf(fh, ",{"); + fprintf(fh, "\"id\" : \"%s\"", qry->str_Atlas); + if (qry->str_bundle) + fprintf(fh, ",\"bundle\" : %s", qry->str_bundle); + fprintf(fh, ",\"start time\" : %llu", (unsigned long long)qry->xmit_time); + if(qry->retry) { + fprintf(fh, ",\"retry\": %d", qry->retry); + + } + if(qry->opt_retry_max) { + fprintf(fh, ",\"retry max\": %d", qry->opt_retry_max); + } + fprintf(fh, "}"); + } + fprintf(fh,"]}"); + + fprintf(fh, "\n"); + if (qry->out_filename) + fclose(fh); +} + +void printReply(struct query_state *qry, int wire_size, unsigned char *result) +{ + int i, stop=0; + struct DNS_HEADER *dnsR = NULL; + struct RES_RECORD answers[20]; //the replies from the DNS server + void *ptr = NULL; + FILE *fh; + char addrstr[INET6_ADDRSTRLEN]; + u_int32_t serial; + int iMax ; + int flagAnswer = 0; + int data_len, len; + int write_out = FALSE; + unsigned offset; + unsigned char *name1= NULL, *name2= NULL; + + int lts = get_timesync(); + + if(! qry->result.size){ + buf_init(&qry->result, -1); + + AS("RESULT { "); + + if(qry->str_Atlas) + { + JS(id, qry->str_Atlas); + if (qry->str_bundle) { + JS1(bundle, %s, qry->str_bundle); + } + } + + AS(atlas_get_version_json_str()); + AS(", "); + if (qry->opt_rset){ + JS1(time, %llu, (unsigned long long)qry->xmit_time); + JD(lts,lts); + AS("\"resultset\" : [ {"); + } + + } + else if(qry->opt_rset) { + AS (",{"); + } + + JS1(time, %llu, (unsigned long long)qry->xmit_time); + JD(lts,lts); + + if (qry->response_in || qry->response_out) + ssl_version= "test 0.0.0"; + + if ((qry->opt_do_tls || qry->opt_do_https) && ssl_version != NULL) + JS(sslvers, ssl_version); + + if ( qry->opt_resolv_conf ) { + JD (subid, (qry->resolv_i+1)); + JD (submax, qry->resolv_max); + } + + if( qry->ressent && qry->server_name) + { // started to send query + // historic resaons only works with UDP + switch (qry->ressent->ai_family) + { + case AF_INET: + ptr = &((struct sockaddr_in *) qry->ressent->ai_addr)->sin_addr; + break; + case AF_INET6: + ptr = &((struct sockaddr_in6 *) qry->ressent->ai_addr)->sin6_addr; + break; + } + inet_ntop (qry->ressent->ai_family, ptr, addrstr, INET6_ADDRSTRLEN); + if(strcmp(addrstr, qry->server_name)) { + JS(name, qry->server_name); + } + JS(dst_addr, addrstr); + JS(dst_port, qry->port_as_char); + JD(af, qry->ressent->ai_family == PF_INET6 ? 6 : 4); + } + else if(qry->dst_ai_family && qry->server_name) + { + if(strcmp(qry->dst_addr_str, qry->server_name)) { + JS(dst_name, qry->server_name); + } + JS(dst_addr , qry->dst_addr_str); + JS(dst_port, qry->port_as_char); + JD(af, qry->dst_ai_family == PF_INET6 ? 6 : 4); + } + else if(qry->server_name) { + JS(dst_name, qry->server_name); + } + + if(qry->loc_sin6.sin6_family) { + getnameinfo((struct sockaddr *)&qry->loc_sin6, + qry->loc_socklen, addrstr, INET6_ADDRSTRLEN, + NULL, 0, NI_NUMERICHOST); + if(strlen(addrstr)) + JS(src_addr, addrstr); + } + + if(qry->retry) { + JS1(retry, %d, qry->retry); + } + + if (qry->rcvdttl >= 0) + JD(ttl, qry->rcvdttl); + + JS_NC(proto, qry->opt_proto == 6 ? "TCP" : "UDP" ); + + if(qry->opt_qbuf && qry->qbuf.size) { + AS(",\"qbuf\" : \""); + buf_add(&qry->result, qry->qbuf.buf, qry->qbuf.size); + AS("\" "); + } + + if(result) + { + snprintf(line, DEFAULT_LINE_LENGTH, ",\"result\" : { \"rt\" : %.3f,", qry->triptime); + buf_add(&qry->result,line, strlen(line)); + + if (qry->opt_proto == 6) + { + /* Add query time for TCP */ + snprintf(line, DEFAULT_LINE_LENGTH, "\"qt\" : %.3f,", qry->querytime); + buf_add(&qry->result,line, strlen(line)); + } + + JD_NC (size, wire_size); + + if(qry->opt_abuf) { + snprintf(line, DEFAULT_LINE_LENGTH, ", \"abuf\" : \""); + buf_add(&qry->result,line, strlen(line)); + buf_add_b64(&qry->result, result, wire_size, 0); + AS("\""); + } + + if (qry->client_cookie_mismatch) + { + snprintf(line, DEFAULT_LINE_LENGTH, ", \"client-cookie-mismatch\" : true"); + buf_add(&qry->result,line, strlen(line)); + } + + if (wire_size < sizeof(struct DNS_HEADER)) + goto truncated; + + dnsR = (struct DNS_HEADER*) result; + + buf_add(&qry->result, ",", 1); + JU (ID, ntohs(dnsR->id)); + + /* + fprintf (fh, " , \"RCODE\" : %d", dnsR->rcode); + fprintf (fh, " , \"AA\" : %d", dnsR->aa); + fprintf (fh, " , \"TC\" : %d", dnsR->tc); + */ + JU (ANCOUNT, ntohs(dnsR->ans_count )); + JU (QDCOUNT, ntohs(dnsR->q_count)); + JU (NSCOUNT, ntohs(dnsR->ns_count)); + JU_NC (ARCOUNT, ntohs(dnsR->add_count)); + + /* Start just after header */ + offset= sizeof(struct DNS_HEADER); + + len= dns_namelen(result, offset, wire_size); + if (len == -1) + goto truncated; + + offset += len + sizeof(struct QUESTION); + + if (offset > wire_size) + goto truncated; + + stop=0; + iMax = 0; + + if (dnsR->ans_count > 0) + { + iMax = MIN(2, ntohs(dnsR->ans_count)); + + memset(answers, '\0', sizeof(answers)); + + for(i=0;i + wire_size) + { + /* Report error? */ + goto truncated; + } + + answers[i].resource = + (struct R_DATA*)(result+offset); + offset += sizeof(struct R_DATA); + + answers[i].rdata = NULL; + + if(ntohs(answers[i].resource->type)==T_TXT) //txt + { + data_len = ntohs(answers[i].resource->data_len); + + if (offset+data_len > wire_size) + goto truncated; + + if(flagAnswer == 0) { + AS(",\"answers\" : [ {"); + flagAnswer++; + } + else if (flagAnswer > 0) { + AS(", {"); + } + flagAnswer++; + JS (TYPE, "TXT"); + JS (NAME, answers[i].name); + print_txt_json(result+offset, + data_len, qry); + offset += data_len; + AS("}"); + + } + else if (ntohs(answers[i].resource->type)== T_SOA) + { + name1= name2= NULL; + name1 = ReadName( + result,wire_size, + offset,&stop); + if (stop == -1) + goto truncated; + offset += stop; + name2 = ReadName( + result,wire_size, + offset,&stop); + if (stop == -1) + { + free(name1); name1= NULL; + goto truncated; + } + offset += stop; + if (offset+5*4 > wire_size) + { + free(name1); name1= NULL; + free(name2); name2= NULL; + goto truncated; + } + + if(flagAnswer == 0) { + AS(",\"answers\" : [ { "); + } + else if (flagAnswer > 0) { + AS(",{ "); + } + flagAnswer++; + + JS(TYPE, "SOA"); + JSDOT(NAME, answers[i].name); + JU(TTL, ntohl(answers[i].resource->ttl)); + JSDOT( MNAME, name1); + free(name1); name1= NULL; + JSDOT( RNAME, name2); + free(name2); name2= NULL; + + serial = get32b(result+offset); + JU_NC(SERIAL, serial); + offset += 4; + + offset += 4*4; // skip REFRESH, RETRY, EXIPIRE, and MINIMUM + AS(" } "); + } + else + { + data_len = ntohs(answers[i]. + resource->data_len); + + if (offset+data_len > wire_size) + goto truncated; + + offset += data_len; + } + + // free mem + if(answers[i].rdata != NULL) + free (answers[i].rdata); + } + if(flagAnswer > 0) + AS(" ]"); + } + + for(i=0;ierr.size) + { + AS(", \"error\" : {"); + buf_add(&qry->result,qry->err.buf, qry->err.size); + AS("}"); + } + + /* end of result only JSON closing brackets from here on */ + + if(qry->opt_rset) { + AS("}"); /* resultset : [{ } */ + } + else { + write_out = TRUE; + } + + if(qry->opt_resolv_conf){ + qry->resolv_i++; + + if(qry->resolv_i >= qry->resolv_max) { + write_out = TRUE; + if(qry->opt_rset) { + AS ("]"); /* reseultset : [{}] */ + } + } + } + + if(write_out && qry->result.size){ + if (qry->out_filename) + { + fh= fopen(qry->out_filename, "a"); + if (!fh) { + crondlog(LVL8 "evtdig: unable to append to '%s'", + qry->out_filename); + } + } + else + fh = stdout; + if (fh) { + AS (" }\n"); /* RESULT { } */ + fwrite(qry->result.buf, qry->result.size, 1 , fh); + } + buf_cleanup(&qry->result); + + if (qry->out_filename) + fclose(fh); + } + qry->retry = 0; + free_qry_inst(qry); +} + +unsigned char* ReadName(unsigned char *base, size_t size, size_t offset, + int* count) +{ + unsigned char *name; + unsigned int p=0,jumped=0, jump_count=0, len; + size_t noffset; + + *count = 0; + name = (unsigned char*)malloc(256); + + name[0]= '\0'; + + //read the names in 3www6google3com format + while(offset < size && (len= base[offset], len !=0)) + { + if (len & 0xc0) + { + if ((len & 0xc0) != 0xc0) + { + /* Bad format */ + snprintf((char *)name, sizeof(name), + "format-error at %lu: value 0x%x", + (unsigned long)offset, len); + *count= -1; + free(name); name= NULL; + return name; + } + + noffset= ((len & ~0xc0) << 8) | base[offset+1]; + if (noffset >= size) + { + snprintf((char *)name, sizeof(name), + "offset-error at %lu: offset %lu", + (unsigned long)offset, (unsigned long)noffset); + *count= -1; + free(name); name= NULL; + return name; + } + + if (jump_count > 256) + { + /* Too many */ + snprintf((char *)name, sizeof(name), + "too many redirects at %lu", + (unsigned long)offset); + *count= -1; + free(name); name= NULL; + return name; + } + + offset= noffset; + if(jumped==0) + { + /* if we havent jumped to another location + * then we can count up + */ + *count += 2; + } + jumped= 1; + jump_count++; + continue; + } + if (offset+len+1 > size) + { + snprintf((char *)name, sizeof(name), + "buf-bounds-error at %lu: len %d", + (unsigned long)offset, len); + *count= -1; + free(name); name= NULL; + return name; + } + + if (p+len+1 > 255) + { + snprintf((char *)name, sizeof(name), + "name-length-error at %lu: len %d", + (unsigned long)offset, p+len+1); + *count= -1; + free(name); name= NULL; + return name; + } + memcpy(name+p, base+offset+1, len); + name[p+len]= '.'; + p += len+1; + offset += len+1; + + if(jumped==0) + { + /* if we havent jumped to another location then we + * can count up + */ + *count += len+1; + } + } + + if (!jumped) + (*count)++; /* Terminating zero length */ + + name[p]= '\0'; //string complete + + if(p > 0) + name[p-1]= '\0'; //remove the last dot + return name; +} + +int dns_namelen(unsigned char *base, size_t offset, size_t size) +{ + size_t start_offset; + unsigned int len; + + start_offset= offset; + + //figure out the length of a name in 3www6google3com format + while(offset < size) + { + len= base[offset]; + if (len & 0xc0) + { + if ((len & 0xc0) != 0xc0) + { + /* Bad format */ + return -1; + } + + offset++; + break; + } + if (offset+len+1 > size) + { + return -1; + } + + offset += len+1; + + if (len == 0) + break; + } + + return offset-start_offset; +} + +/* get 4 bytes from memory + * eg. used to extract serial number from soa packet + */ + u_int32_t +get32b (unsigned char *p) +{ + u_int32_t var; + + var = (0x000000ff & *(p)) << 24; + var |= (0x000000ff & *(p+1)) << 16; + var |= (0x000000ff & *(p+2)) << 8; + var |= (0x000000ff & *(p+3)); + + return (var); +} + +/* + * Copy data allowing for unaligned accesses in network byte order + * (big endian). + */ +void ldns_write_uint16(void *dst, uint16_t data) +{ +#ifdef ALLOW_UNALIGNED_ACCESSES + * (uint16_t *) dst = htons(data); +#else + uint8_t *p = (uint8_t *) dst; + p[0] = (uint8_t) ((data >> 8) & 0xff); + p[1] = (uint8_t) (data & 0xff); +#endif +} + +uint16_t +ldns_read_uint16(const void *src) +{ +#ifdef ALLOW_UNALIGNED_ACCESSES + return ntohs(*(uint16_t *) src); +#else + uint8_t *p = (uint8_t *) src; + return ((uint16_t) p[0] << 8) | (uint16_t) p[1]; +#endif +} + +struct testops tdig_ops = { tdig_init, tdig_start, tdig_delete }; diff --git a/probe-busybox/eperd/evtlsscan.c b/probe-busybox/eperd/evtlsscan.c new file mode 100644 index 00000000..4bf5913b --- /dev/null +++ b/probe-busybox/eperd/evtlsscan.c @@ -0,0 +1,1477 @@ +/* + * Copyright (c) 2014 - 2016 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +//config:config EVTLSGETCERT +//config: bool "evtlsgetcert" +//config: default n +//config: help +//config: standalone version of event-driven TLS getcert + +//config:config EVTLSSCAN +//config: bool "evtlsscan" +//config: default n +//config: help +//config: Scan TLS server with various TLS paramenters and summarize them. + +//applet:IF_EVTLSSCAN(APPLET(evtlsscan, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_EVTLSSCAN) += evtlsscan.o + +//usage:#define evtlsscan_trivial_usage +//usage: "todo" +//usage:#define evtlsscan_full_usage "\n\n" +//usage: "todo" + +#include "json-macros.h" +#include "libbb.h" +#include "atlas_bb64.h" +#include "atlas_probe.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eperd.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "tls_hostname_validation.h" + +#define SAFE_PREFIX ATLAS_DATA_NEW + +#define DEFAULT_LINE_LENGTH 1024 +#define DEFAULT_NOREPLY_TIMEOUT 5000 +#define O_RETRY 200 + +#define STATUS_FREE 0 +#define STATUS_START 1001 + +enum output_format {OUTPUT_FMT_NO_CERTS, OUTPUT_FMT_CERTS_ARRAY, OUTPUT_FMT_CERTS_FULL}; + +enum readstate { READ_FIRST, READ_STATUS, READ_HEADER, READ_BODY, READ_SIMPLE, + READ_CHUNKED, READ_CHUNK_BODY, READ_CHUNK_END, + READ_CHUNKED_TRAILER, + READ_DONE }; +enum writestate { WRITE_FIRST, WRITE_HEADER, WRITE_POST_HEADER, + WRITE_POST_FILE, WRITE_POST_FOOTER, WRITE_DONE }; + +/* struct common for all quries */ +struct tls_base { + struct event_base *event_base; +}; + +static void crondlog_aa(const char *ctl, char *fmt, ...); + +/* + * this user query one per user input aka pqry + * Child query and Grand children are the actual queries + */ +struct tls_state { + char *host; + int state; + int q_serial; /* on the instance, keep count of queries sent */ + int q_success; + + /* all children share same result structure with the parent */ + struct buf err; + struct buf result; + + struct evutil_addrinfo *addr; /* there is only one on the pqry */ + struct timeval start_time; /* start time of the parent query */ + struct event free_inst_ev; /* event to free the parent query */ + + char *port; /* port as character string "443"*/ + char do_get; + char do_head; + char do_http10; + char *user_agent; + char *path; + + int dns_count; /* resolved addresses to pquery */ + + int active; /* pending additional quries per user query */ + int retry; + + int opt_out_format; + int opt_retry_max; + int opt_ignore_cert; + + int opt_v4; + int opt_v6; + struct evutil_addrinfo hints; + + int opt_max_con; /* maximum concurrent queries per destination */ + int opt_max_bytes; /* max size of output buffer */ + + bool opt_all_tests; + + struct timeval timeout_tv; /* time out per child query in TV */ + int opt_timeout; /* user input in seconds */ + int opt_ssl_v3; + int opt_tls_v1; + int opt_tls_v11; + int opt_tls_v12; + char *out_filename; + char *str_Atlas; /* option but without opt_ prefix. Historic */ + + struct tls_qry *c; + struct event done_ev; + void (*done)(void *state); /* call back when all queries are done */ +}; + +struct cert_fp { + unsigned char fp[EVP_MAX_MD_SIZE]; + struct cert_fp *next; +}; + +struct tls_qry { + /* per instance variables. Unshared after duplicate */ + int serial; /* serial number of this query. Start at zero on pqry */ + + struct tls_state *ui; /* pqry instace */ + struct buf *result; /* points to pqry.result */ + struct buf *cc; /* certificate chain, shared by grand children, appended to the result */ + struct buf *ciphers_s_buf; /*list of ciphers that succeded shared with children */ + struct buf *ciphers_e_buf; /* ciphers that succeded */ + struct buf *certs; /* ciphers that succeded */ + struct buf err; + + struct cert_fp *cfps; + + SSL_CTX *ssl_ctx; + SSL *ssl; + int sslv; /* version of child query from parent opt_ */ + const char *sslv_str; /* string for sslv */ + const char *cipher_q; /* for this child query, what we are going to query */ + const char *cipher_r; /* for this child query, what the response was */ + struct bufferevent *bev; + struct evutil_addrinfo *addr_curr; + + struct timeval start_time; + double triptime; + double ttc; + int retry; + + struct event timeout_ev; + struct event free_child_ev; + bool is_c; /* is children? same destination (IP) with different ssl option */ + int active_c; /* count of active children. delete parent when zero */ + struct tls_qry *p ; /* parent query to same IP with all ciphers */ + bool tls_incomplete; + enum readstate readstate; /* httpget */ + enum writestate writestate; /* httpget */ + struct sockaddr_in6 loc_sin6; + socklen_t loc_socklen; + char addrstr[INET6_ADDRSTRLEN]; +}; + +int tlsscan_delete (void *st); +void tlsscan_start (struct tls_state *pqry); +static void event_cb(struct bufferevent *bev, short events, void *ptr); +static void write_cb(struct bufferevent *bev, void *ptr); +static void http_read_cb(struct bufferevent *bev UNUSED_PARAM, void *ptr); +static void timeout_cb(int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h); + +static struct tls_base *tls_base = NULL; +static char line[(DEFAULT_LINE_LENGTH+1)]; +static struct option longopts[]= +{ + { "retry", required_argument, NULL, O_RETRY }, + { "timeout", required_argument, NULL, 'T' }, + { "port", required_argument, NULL, 'p'}, +}; + +static void done_cb(int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h) { + struct tls_state *pqry = h; + pqry->done(pqry); +} + +/* free ephemeral data for the this instance of run; pqry */ +static void free_pqry_inst_cb (int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h) +{ + struct tls_state *pqry = h; + if(pqry->err.size) + { + buf_cleanup(&pqry->err); + } + + if (pqry->result.size > 0) + { + buf_cleanup(&pqry->result); + } + + if (pqry->addr != NULL) { + evutil_freeaddrinfo(pqry->addr); + pqry->addr = NULL; + } +} + +static void free_child_cb(int unused UNUSED_PARAM, const short event UNUSED_PARAM, void *h) +{ + + struct tls_qry *qry = h; + /* only free ephemeral data of the child query */ + if(qry->err.size) + { + buf_cleanup(&qry->err); + } + + + if (qry->bev != NULL && qry->tls_incomplete){ + bufferevent_free(qry->bev); /* this will call SSL_free; + SSL_free(qry->ssl); */ + qry->bev = NULL; + } + + if(qry->ssl_ctx != NULL) { + SSL_CTX_free(qry->ssl_ctx); + qry->ssl_ctx = NULL; + } +} + +/* Initialize a struct timeval by converting milliseconds */ +static void msecstotv(time_t msecs, struct timeval *tv) +{ + tv->tv_sec = msecs / 1000; + tv->tv_usec = msecs % 1000 * 1000; +} + +static bool tls_inst_start (struct tls_qry *qry, const char *cipher_q) +{ + /* OpenSSL is initialized, SSL_library_init() should be called already */ + + /* + ssl_ctx are not shared between quries. It could but not sure how to + set structures with specific versions and algorithms. Instead using + one ctx per query. + */ + + switch(qry->sslv) + { + case SSL3_VERSION: + qry->ssl_ctx = SSL_CTX_new(SSLv3_client_method()); + qry->sslv_str = SSL_TXT_SSLV3; + break; + case TLS1_VERSION: + qry->ssl_ctx = SSL_CTX_new(TLSv1_client_method()); + qry->sslv_str = SSL_TXT_TLSV1; + break; + case TLS1_1_VERSION: + qry->ssl_ctx = SSL_CTX_new(TLSv1_1_client_method()); + qry->sslv_str = SSL_TXT_TLSV1_1; + break; + case TLS1_2_VERSION: + qry->ssl_ctx = SSL_CTX_new(TLSv1_2_client_method()); + qry->sslv_str = SSL_TXT_TLSV1_2; + break; + default: + qry->ssl_ctx = SSL_CTX_new(SSLv23_client_method()); + qry->sslv_str = "TLSv1/SSL2/SSL3"; + break; + + } + + qry->cipher_q = cipher_q; + + /* Do we want to do any sort of vericiation the probe? */ + /* if we don't we might be hitting a proxy server in the way */ + // verify_ssl_cert(qry); + + + /* this cipher per context . we are setting per connection */ + // SSL_CTX_set_cipher_list(qry->ssl_ctx, "ALL:COMPLEMENTOFALL"); + // SSL_CTX_set_cipher_list(qry->ssl_ctx, "HIGH"); + + if (!qry->ssl_ctx) { + crondlog_aa(LVL9, "SSL_CTX_new %s", __func__); + return TRUE; + } + + qry->ssl = SSL_new(qry->ssl_ctx); + if (qry->ssl == NULL) { + crondlog_aa(LVL9, "SSL_new() %s", __func__); + return TRUE; + } + SSL_set_cipher_list(qry->ssl, cipher_q); + + /* Set hostname for SNI extension */ + SSL_set_tlsext_host_name(qry->ssl, qry->ui->host); + + msecstotv(DEFAULT_NOREPLY_TIMEOUT, &qry->ui->timeout_tv); + evtimer_add(&qry->timeout_ev, &qry->ui->timeout_tv); + + qry->bev = bufferevent_openssl_socket_new(EventBase, -1, qry->ssl, + BUFFEREVENT_SSL_CONNECTING, + BEV_OPT_CLOSE_ON_FREE); + + //bufferevent_openssl_set_allow_dirty_shutdown(qry->bev, 1); + bufferevent_setcb(qry->bev, http_read_cb, write_cb, event_cb, qry); + + { + void *ptr = NULL; + if (qry->addr_curr->ai_family == AF_INET) { + ptr = &((struct sockaddr_in *) qry->addr_curr->ai_addr)->sin_addr; + } + else if (qry->addr_curr->ai_family == AF_INET6) { + ptr = &((struct sockaddr_in6 *) + qry->addr_curr->ai_addr)->sin6_addr; + } + inet_ntop (qry->addr_curr->ai_family, ptr, qry->addrstr, INET6_ADDRSTRLEN); + crondlog_aa(LVL7, "connect to %s %s active = %d %s %s", + qry->addrstr, qry->ui->host, qry->ui->active, qry->sslv_str, qry->cipher_q); + } + + if (bufferevent_socket_connect(qry->bev, + qry->addr_curr->ai_addr, + qry->addr_curr->ai_addrlen)) { + crondlog_aa(LVL8, "ERROR bufferevent_socket_connect to %s \"%s\"" + "ctive = %d %s %s - %s", qry->addrstr, qry->ui->host, + qry->ui->active, qry->sslv_str, qry->cipher_q, + evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()) + ); + + // warnx("could not connect to %s : %s", qry->ui->host, evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR())); + bufferevent_free(qry->bev); + qry->bev = NULL; + return TRUE; + } + else{ + gettimeofday(&qry->start_time, NULL); + return FALSE; + } + return FALSE; +} +static void ssl_c_init(struct tls_qry *qry) +{ + int i; + const char *p; + SSL *ssl = SSL_new(qry->ssl_ctx); /* this is a local one */ + + if (ssl == NULL) + return; + + for (i=0; ; i++) + { + struct tls_qry *cqry = NULL; /* next child query */ + + p = SSL_get_cipher_list(ssl,i); + if (p == NULL) { + crondlog_aa(LVL7, "%s dst %s active = %d" " active_c = %d %s %s %s created %d children", __func__, + qry->addrstr, qry->ui->active, qry->active_c, qry->sslv_str, p, qry->ui->host, i); + break; + } + /* skip the one that server picked. We know that is supported */ + if (strlen(p) && strncmp(p, qry->cipher_r, strlen(p)) == 0) + continue; + + qry->ui->active++; + cqry = xzalloc(sizeof(struct tls_qry)); + + if (cqry == NULL) + break; + + qry->tls_incomplete = TRUE; + cqry->ui = qry->ui; + qry->ui->q_serial++; + qry->active_c++; + cqry->serial = qry->ui->q_serial; + + cqry->addr_curr = qry->addr_curr; + cqry->result = qry->result; + evtimer_assign(&cqry->timeout_ev, EventBase, timeout_cb, cqry); + evtimer_assign(&cqry->free_child_ev, EventBase, free_child_cb, cqry); + cqry->sslv = qry->sslv; + crondlog_aa(LVL7, "%s dst %s active = %d" " active_c = %d %s %s %s", __func__, + qry->addrstr, qry->ui->active, qry->active_c, qry->sslv_str, p, qry->ui->host); + cqry->is_c = TRUE; + cqry->p = qry; + cqry->cc = qry->cc; + cqry->certs = qry->certs; + cqry->cfps = qry->cfps; + cqry->ciphers_s_buf = qry->ciphers_s_buf; + cqry->cipher_q = p; + tls_inst_start(cqry, p); + } + SSL_free(ssl); +} + +static void atlas_cert_char_encode (struct buf *lbuf, BUF_MEM *bptr) +{ + int j; + char *c = (char *)bptr->data; + + AS("\"cert\" : \""); + for (j = 0; j < bptr->length; j++) { + if (*c == '\n') { + AS("\\n"); + } + else { + /* this could be more efficient ? */ + buf_add(lbuf, c, 1); + } + c++; + } + AS("\""); +} + +static char * add_cert_and_fp(unsigned char *md, X509* cert, struct tls_qry *qry, int *id) { + unsigned int n; + struct cert_fp *cfp = qry->cfps; + const EVP_MD *fdig = EVP_sha1(); + int i = 0; + + + if (X509_digest(cert,fdig,md,&n) == 0) + { + return "error in X509_digest"; + } + + *id = -1; + while ( cfp != NULL) { + if (memcmp(md, cfp->fp, EVP_MAX_MD_SIZE) == 0) + { + *id = i; + break; + } + i++; + cfp = cfp->next; + } + + /* this is a new certificate add to chain */ + + if (*id == -1) { + struct buf *lbuf = qry->cc; + BIO *b64 = BIO_new (BIO_s_mem()); + BUF_MEM *bptr; + int k; + char c3[4]; + + cfp = xzalloc(sizeof(struct cert_fp)); + cfp->next = qry->cfps; + qry->cfps = cfp; + memcpy(cfp->fp, md, EVP_MAX_MD_SIZE); + + *id = i; + PEM_write_bio_X509(b64, cert); + BIO_get_mem_ptr(b64, &bptr); + + if (lbuf->size == 0) { + if (i == 0) { + AS(", \"cert_chain\" : ["); + } + } + if ( i > 0 ) + AS(", "); + AS("{"); + JD(id , i); + atlas_cert_char_encode(lbuf, bptr); + BIO_free(b64); + + AS(", \"fp\":\""); + for (k=0; k<(int)n; k++) + { + snprintf(c3, sizeof(c3),"%02X%c",md[k], + (k+1 == (int)n) ?'"':':'); + AS(c3); + + } + AS("}"); + } + return NULL; +} + +static void add_cert_chain( STACK_OF(X509) *sk, struct tls_qry *qry) { + int i; + int cert_id = -1; + + for (i=0; ierr; + if(lbuf->size > 0) + AS (","); + JS_NC(X509cert, err_s); + } + } +} + +static void get_cert_chain(struct tls_qry *qry) +{ + int i; + struct buf *lbuf = qry->cc; /* careful with lbuf it is used by JSON Macros */ + STACK_OF(X509) *sk = NULL; + // X509 *peer = NULL; + + if(qry->ui->opt_out_format & OUTPUT_FMT_NO_CERTS) + return; + + sk = SSL_get_peer_cert_chain(qry->ssl); + if(sk == NULL) { + lbuf = &qry->err; + if(lbuf->size > 0) + AS (","); + JS_NC(X509cert_chain, "no cert chain found after handshake"); + + } else { + add_cert_chain(sk, qry); + } + + /* + peer=SSL_get_peer_certificate(qry->ssl); + if(peer != NULL) { + + } + */ +} + +static void add_certs_to_result (struct tls_qry *qry) +{ + int i; + STACK_OF(X509) *sk; + + struct buf *lbuf = qry->result; + + if(!(qry->ui->opt_out_format & OUTPUT_FMT_NO_CERTS)) + return; + + if((sk = SSL_get_peer_cert_chain(qry->ssl)) == NULL) + return; + + for (i=0; ilength > 0) { + if (i == 0) { + AS(", \"certs\" : ["); + } + if ( i > 0 ) + AS(", "); + atlas_cert_char_encode(qry->result, bptr); + AS("}"); + } + } + if ( i > 0) { + AS("]"); /* certs [] */ + } +} + +static void fmt_ssl_time(struct tls_qry *qry) +{ + int lts = -1 ; /* get_timesync(); */ /* AA_FIXME */ + struct buf *lbuf = qry->result; + + JS1(time, %ld, qry->start_time.tv_sec); + JD(lts,lts); + +} + +static void fmt_ssl_host(struct tls_qry *qry, bool is_err) +{ + char addrstr[INET6_ADDRSTRLEN]; + struct buf *lbuf = qry->result; + + if ((qry->addr_curr != NULL) && (qry->addrstr[0] != '\0')) { + if(strcmp(qry->addrstr, qry->ui->host)) { + JS(dst_name, qry->ui->host); + } + JS(dst_addr , qry->addrstr); + + if(qry->loc_sin6.sin6_family) { + getnameinfo((struct sockaddr *)&qry->loc_sin6, + qry->loc_socklen, addrstr, INET6_ADDRSTRLEN, + NULL, 0, NI_NUMERICHOST); + if(strlen(addrstr)) + JS(src_addr, addrstr); + } + JD_NC(af, qry->addr_curr->ai_family == PF_INET6 ? 6 : 4); + } + else if (qry->ui->host) { + JS_NC(dst_name, qry->ui->host); + } +} + +static void fmt_ssl_ui_result(struct tls_qry *qry) +{ + int lts = -1 ; /* get_timesync(); */ /* AA_FIXME */ + int fw = get_atlas_fw_version(); + struct buf *lbuf = &qry->ui->result; + + AS("RESULT { "); + if(qry->ui->str_Atlas != NULL) + { + JS(id, qry->ui->str_Atlas); + } + JD(fw, fw); + JD(dnscount, qry->ui->dns_count); + JS1(time, %ld, qry->ui->start_time.tv_sec); + JD(lts,lts); // fix me take lts when I create start time. + if (qry->addr_curr == NULL) { + JS_NC(dst_name, qry->ui->host); + } else { + AS("\"resultset\" : ["); + } +} + +static void fmt_ssl_summary(struct tls_qry *qry, bool is_err) +{ + int size = qry->result->size ; + struct buf *lbuf = qry->result; + + if (qry->addr_curr == NULL) + return; + + if (size == 0){ + AS ("{"); + fmt_ssl_time(qry); + fmt_ssl_host(qry, is_err); + + if(qry->retry) { + JD(retry, qry->retry); + } + if (qry->sslv_str != NULL) { + AS (", "); + JS_NC(version, qry->sslv_str); + } + + } + if ( !is_err && (qry->ssl_ctx != NULL) && (qry->ssl != NULL) && + (!qry->tls_incomplete)) { + qry->ui->q_success++; + lbuf = qry->ciphers_s_buf; /* note lbuf changed */ + if (qry->ciphers_s_buf->size == 0) { + AS (", \"ciphers\": ["); + } else { + AS(" ,"); + } + + AS ("\""); + qry->cipher_r = SSL_CIPHER_get_name(SSL_get_current_cipher(qry->ssl)); + AS (qry->cipher_r); + AS("\""); + get_cert_chain(qry); + + if ((qry->is_c == FALSE) && (qry->ui->opt_all_tests == TRUE)) { + /* this is a successful child. + * create children with cipher algorithm varients + */ + ssl_c_init(qry); + } + } +} + +static void fmt_ssl_cert_full_resp(struct tls_qry *qry, bool is_err) +{ + + struct buf *lbuf = qry->result; + /* if it is a failed grand child qury nothing to print */ + if (qry->is_c && qry->tls_incomplete ){ + if(qry->err.size) + { + buf_cleanup(&qry->err); + } + return; + } + + if (qry->result->size == 0){ /* initialze the first parts RESULT */ + fmt_ssl_ui_result(qry); + } + else { + AS (",{"); + } + + if(qry->retry) { + JD(retry, qry->retry); + } + + fmt_ssl_time(qry); + fmt_ssl_host(qry, is_err); + + if (qry->cipher_r != NULL) { + AS (", "); + JS_NC(ciphers, qry->cipher_r); + } + + if (qry->sslv_str != NULL) { + AS (", "); + JS_NC(version, qry->sslv_str); + } + + if ( !is_err && (qry->ssl_ctx != NULL) && (qry->ssl != NULL) && + (qry->tls_incomplete == FALSE)) { + int i; + qry->ui->q_success++; + AS(","); + JS_NC(cipher, SSL_CIPHER_get_name(SSL_get_current_cipher(qry->ssl))); + + add_certs_to_result(qry); + + if ((qry->is_c == FALSE) && (qry->ui->opt_all_tests == TRUE)) { + /* this is a successful child. + * create children with cipher algorithm varients + */ + ssl_c_init(qry); + } + } + + if ((qry->err.size > 0) || (qry->ui->err.size > 0)) + { + AS(", \"error\" : {"); + if (qry->err.size > 0) { + buf_add(qry->result, qry->err.buf, qry->err.size); + } + if (qry->ui->err.size > 0) { + buf_add(qry->result, qry->ui->err.buf, qry->ui->err.size); + } + AS("}"); + } + AS (" }"); //result +} + + +static void write_results(struct tls_qry *qry) +{ + FILE *fh; + struct buf *lbuf = &qry->ui->result; + /* end of result only JSON closing brackets from here on */ + if (qry->addr_curr != NULL) { + AS("]"); /* resultset : [{}..] */ + } + + AS (", "); + JD(queries, qry->ui->q_serial); + JD_NC(success, qry->ui->q_success); + + if (qry->ui->out_filename) + { + fh= fopen(qry->ui->out_filename, "a"); + if (!fh) { + crondlog(LVL8 "unable to append to '%s'", + qry->ui->out_filename); + } + } + else + fh = stdout; + + if (fh) { + char *closing = " }\n"; /* RESULT { } . end of RESULT line */ + fwrite(qry->ui->result.buf, qry->ui->result.size, 1 , fh); + /* adds the certs directly fh, not to results to save doubling string memory */ + fwrite(closing, strlen(closing), 1 , fh); + + } + buf_cleanup(qry->result); + + if (qry->ui->out_filename) + fclose(fh); + + qry->ui->state = STATUS_FREE; + qry->retry = 0; +} + +static void print_tls_resp(struct tls_qry *qry, bool is_err) { + + struct timeval asap = { 0, 1 }; + struct tls_state *pqry = qry->ui; + int active_c = 0; + + if (qry->ui->active > 0) + qry->ui->active--; + + if ((qry->p != NULL ) && (qry->p->active_c > 0)) { + qry->p->active_c--; + active_c = qry->p->active_c; + } + + if (qry->ui->opt_out_format & OUTPUT_FMT_CERTS_FULL) { + fmt_ssl_cert_full_resp(qry, is_err); + } else { + fmt_ssl_summary(qry, is_err); + } + + if (((qry->p == NULL ) && (qry->active_c == 0)) || + ((qry->p != NULL ) && (qry->p->active_c == 0))) + { + struct buf *lbuf = &qry->ui->result; + evtimer_add(&qry->free_child_ev, &asap); + if (qry->ui->result.size == 0) { /* initialze the first parts RESULT */ + fmt_ssl_ui_result(qry); + } else { + buf_add(&qry->ui->result, ", ", 1); + } + + if(qry->addr_curr != NULL) { + buf_add(&qry->ui->result, qry->result->buf, qry->result->size); + buf_cleanup(qry->result); + if (qry->err.size > 0) { + AS(", \"error\" : {"); + buf_add(&qry->ui->result, qry->err.buf, qry->err.size); + AS("}"); + buf_cleanup(&qry->err); + } + + if (qry->cc->size > 0) { + buf_add(&qry->ui->result, qry->cc->buf, qry->cc->size); + buf_cleanup(qry->cc); + } + + if (qry->ciphers_s_buf->size > 0) { + buf_add(qry->ciphers_s_buf, "]", 1); + buf_add(&qry->ui->result, qry->ciphers_s_buf->buf, qry->ciphers_s_buf->size); + buf_cleanup(qry->ciphers_s_buf); + } + + AS("}"); + } + if (qry->ui->err.size > 0) { + AS(", \"error\" : {"); + buf_add(&qry->ui->result, qry->ui->err.buf, qry->ui->err.size); + AS("}"); + buf_cleanup(&qry->ui->err); + } + + } + + if (qry->ui->active < 1) { + write_results(qry); + evtimer_add(&pqry->free_inst_ev, &asap); + + if (qry->ui->done) /* call the done function */ + evtimer_add(&qry->ui->done_ev, &asap); + } + else { + crondlog_aa(LVL7, "%s no output yet, dst %s active = %d" + " active_c = %d %s %s %s", __func__, + qry->addrstr, qry->ui->active, active_c, qry->sslv_str, qry->cipher_q, qry->ui->host); + } +} + +//#define FREE_NN(p) (if ((p) != NULL) {free((p)); (p) = NULL;}) + + +int tlsscan_delete (void *st) +{ + struct tls_state *pqry = st; + if (pqry == NULL) + return 0; + + if (pqry->state ) + return 0; + + if(pqry->out_filename != NULL) + { + free(pqry->out_filename); + pqry->out_filename = NULL; + } + + if( pqry->str_Atlas != NULL) + { + free(pqry->str_Atlas); + pqry->str_Atlas = NULL; + } + + if (pqry->host != NULL) + { + free(pqry->host); + pqry->host = NULL; + } + + if (pqry->port != NULL) + { + free(pqry->host); + pqry->host = NULL; + } + return 1; +} + +static void timeout_cb(int unused UNUSED_PARAM, const short event + UNUSED_PARAM, void *h) +{ + struct tls_qry *qry = (struct tls_qry *)h; + + if(qry->addr_curr == NULL) { + crondlog_aa(LVL7, "%s %s", __func__, qry->ui->host); + } else { + int active_c = 0; + if (qry->p != NULL) + active_c = qry->active_c; + + crondlog_aa(LVL7, "%s no output yet, dst %s active = %d" + " active_c = %d %s %s %s", __func__, + qry->addrstr, qry->ui->active, active_c, qry->sslv_str, qry->cipher_q, qry->ui->host); + + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"timeout\" : %d", qry->err.size ? ", " : "", DEFAULT_NOREPLY_TIMEOUT); + buf_add(&qry->err, line, strlen(line)); + } + print_tls_resp(qry, TRUE); +} + + + +/* See http://archives.seul.org/libevent/users/Jan-2013/msg00039.html */ +static int cert_verify_callback(X509_STORE_CTX *x509_ctx, void *arg) +{ + char cert_str[256]; + const char *host = (const char *) arg; + const char *res_str = "X509_verify_cert failed"; + HostnameValidationResult res = Error; + + /* This is the function that OpenSSL would call if we hadn't called + * SSL_CTX_set_cert_verify_callback(). Therefore, we are "wrapping" + * the default functionality, rather than replacing it. */ + int ok_so_far = 0; + + X509 *server_cert = NULL; + + /* AA fixme + if (qry->opt_ignore_cert) { */ + return 1; + /* + } + */ + + ok_so_far = X509_verify_cert(x509_ctx); + + server_cert = X509_STORE_CTX_get_current_cert(x509_ctx); + + if (server_cert == NULL) + return 0; + + if (ok_so_far) { + res = validate_hostname(host, server_cert); + + switch (res) { + case MatchFound: + res_str = "MatchFound"; + break; + case MatchNotFound: + res_str = "MatchNotFound"; + break; + case NoSANPresent: + res_str = "NoSANPresent"; + break; + case MalformedCertificate: + res_str = "MalformedCertificate"; + break; + case Error: + res_str = "Error"; + break; + default: + res_str = "WTF!"; + break; + } + } + + X509_NAME_oneline(X509_get_subject_name (server_cert), + cert_str, sizeof (cert_str)); + X509_free(server_cert); + + if (res == MatchFound) { + printf("https server '%s' has this certificate, " + "which looks good to me:\n%s\n", + host, cert_str); + return 1; + } + else { + printf("Got '%s' for hostname '%s' and certificate:\n%s\n", + res_str, host, cert_str); + return 1; + } +} + +static bool verify_ssl_cert (struct tls_qry *qry) { + + /* Attempt to use the system's trusted root certificates. + * (This path is only valid for Debian-based systems.) */ + //if (1 != SSL_CTX_load_verify_locations(qry->ssl_ctx, "/etc/ssl/certs/ca-certificates.crt", NULL)) crondlog(LVL7,"SSL_CTX_load_verify_locations"); + + /* Ask OpenSSL to verify the server certificate. Note that this + * does NOT include verifying that the hostname is correct. + * So, by itself, this means anyone with any legitimate + * CA-issued certificate for any website, can impersonate any + * other website in the world. This is not good. See "The + * Most Dangerous Code in the World" article at + * https://crypto.stanford.edu/~dabo/pubs/abstracts/ssl-client-bugs.html + */ + + SSL_CTX_set_verify(qry->ssl_ctx, SSL_VERIFY_PEER, NULL); + + /* This is how we solve the problem mentioned in the previous + * comment. We "wrap" OpenSSL's validation routine in our + * own routine, which also validates the hostname by calling + * the code provided by iSECPartners. Note that even though + * the "Everything You've Always Wanted to Know About + * Certificate Validation With OpenSSL (But Were Afraid to + * Ask)" paper from iSECPartners says very explicitly not to + * call SSL_CTX_set_cert_verify_callback (at the bottom of + * page 2), what we're doing here is safe because our + * cert_verify_callback() calls X509_verify_cert(), which is + * OpenSSL's built-in routine which would have been called if + * we hadn't set the callback. Therefore, we're just + * "wrapping" OpenSSL's routine, not replacing it. */ + + SSL_CTX_set_cert_verify_callback (qry->ssl_ctx, cert_verify_callback, (void *) qry->ui->host); +} + + + +static void event_cb(struct bufferevent *bev, short events, void *ptr) +{ + struct tls_qry *qry = ptr; + struct timeval rectime ; + if (events & BEV_EVENT_ERROR) + { + crondlog_aa(LVL7, "BEV_EVENT_ERROR %s %s %s active = %d %s %s | %s", __func__, + qry->ui->host, qry->addrstr, qry->ui->active, qry->sslv_str, qry->cipher_q, + evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR())); + + evtimer_del(&qry->timeout_ev); + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"connect\" : \"%s\"", + qry->err.size ? ", " : "", + evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR())); + buf_add(&qry->err, line, strlen(line)); + print_tls_resp(qry, TRUE); + qry->ssl = NULL; /* I think SSL object is cleaned up after the error ??? */ + return; + } + + if (events & BEV_EVENT_CONNECTED) + { + int active_c = 0; + if (qry->loc_socklen == 0) { + qry->loc_socklen= sizeof(qry->loc_sin6); + getsockname(bufferevent_getfd(bev), &qry->loc_sin6, &qry->loc_socklen); + } + + gettimeofday(&rectime, NULL); + + qry->triptime = (rectime.tv_sec - qry->start_time.tv_sec)*1000 + + (rectime.tv_usec - qry->start_time.tv_usec)/1e3; + + if (qry->p != NULL) { + active_c = qry->p->active_c; + } + + crondlog_aa(LVL7, "%s BEV_EVENT_CONNECTED active = %d active_c = %d %s %s %s %s", __func__, + qry->ui->active, active_c, qry->sslv_str, qry->cipher_q, qry->addrstr, qry->ui->host); + write_cb(qry->bev, qry); + return; + } + else { + printf (" called %s unknown event 0x%x\n", __func__, events); + } +} + +static void http_read_cb(struct bufferevent *bev UNUSED_PARAM, void *ptr) +{ + struct tls_qry *qry = ptr; + int active_c = 0; + + if (qry->p != NULL) + active_c = qry->p->active_c; + + crondlog_aa(LVL7, "%s BEV_EVENT_CONNECTED active = %d active_c = %d %s %s %s %s", __func__, + qry->ui->active, active_c, qry->sslv_str, qry->cipher_q, qry->addrstr, qry->ui->host); + evtimer_del(&qry->timeout_ev); + qry->tls_incomplete = FALSE; + print_tls_resp(qry, FALSE); + bufferevent_free(qry->bev); + qry->bev = NULL; +} +static void write_cb(struct bufferevent *bev, void *ptr) +{ + struct evbuffer *output; + struct timeval endtime; + struct tls_qry *qry = ptr; + + // printf("%s: start:\n", __func__); + + for(;;) + { + switch(qry->writestate) + { + case WRITE_FIRST: + gettimeofday(&endtime, NULL); + qry->ttc= (endtime.tv_sec- + qry->start_time.tv_sec)*1e3 + + (endtime.tv_usec - qry->start_time.tv_usec)/1e3; + qry->writestate= WRITE_HEADER; + continue; + case WRITE_HEADER: + output= bufferevent_get_output(bev); + evbuffer_add_printf(output, "%s %s HTTP/1.%c\r\n", + qry->ui->do_get ? "GET" : + qry->ui->do_head ? "HEAD" : "POST", qry->ui->path, + qry->ui->do_http10 ? '0' : '1'); + evbuffer_add_printf(output, "Host: %s\r\n", + qry->ui->host); + evbuffer_add_printf(output, "Connection: close\r\n"); + evbuffer_add_printf(output, "User-Agent: %s\r\n", + qry->ui->user_agent); + evbuffer_add_printf(output, "\r\n"); + + qry->writestate = WRITE_DONE; + // printf("%s: done: \n", __func__); + return; + + case WRITE_DONE: + return; + default: + printf("writecb: unknown write state: %d\n", + qry->writestate); + return; + } + } +} + +static void local_exit(void *state UNUSED_PARAM) +{ + + struct timeval asap = { 0, 2 }; + event_base_loopexit (EventBase, &asap); + return; +} + +/* called only once. Initialize tls_base variables here */ +static void tls_base_new(struct event_base *event_base) +{ + tls_base = xzalloc(sizeof( struct tls_base)); +} + +static bool tls_arg_validate (int argc, char *argv[], struct tls_state *pqry ) +{ + if (optind != argc-1) { + crondlog(LVL9 "ERROR no server IP address in input"); + tlsscan_delete(pqry); + return TRUE; + } + else { + pqry->host = strdup(argv[optind]); + } + if (pqry->opt_all_tests ) { + // pqry->opt_ssl_v3 = SSL3_VERSION; + // pqry->opt_tls_v1 = TLS1_VERSION; + // pqry->opt_tls_v11 = TLS1_1_VERSION; + pqry->opt_tls_v12 = TLS1_2_VERSION; + } + + if ( pqry->opt_timeout > 0) + pqry->timeout_tv.tv_sec = pqry->opt_timeout; + + if(pqry->port == NULL) + pqry->port = strdup("443"); + + return FALSE; +} + +/* eperd call this to initialize */ +static struct tls_state * tlsscan_init (int argc, char *argv[], void (*done)(void *state)) +{ + int c; + struct tls_state *pqry = NULL; + LogFile = "/dev/tty"; + + if (tls_base == NULL) { + tls_base_new(EventBase); + RAND_poll(); + SSL_library_init(); /* call only once this is not reentrant. */ + ERR_load_crypto_strings(); + SSL_load_error_strings(); + OpenSSL_add_all_algorithms(); + } + + if (tls_base == NULL) { + crondlog(LVL8 "tls_base_new failed"); + return NULL; + } + + /* initialize a query object */ + pqry = xzalloc(sizeof(struct tls_state)); + pqry->opt_retry_max = 0; + pqry->port = "443"; + pqry->opt_ignore_cert = 0; + buf_init(&pqry->err, -1); + buf_init(&pqry->result, -1); + pqry->do_http10= 0; + pqry->do_get= 0; + pqry->do_head= 1; + pqry->user_agent= "httpget for atlas.ripe.net"; + pqry->path = "/"; + pqry->done = done; + pqry->opt_all_tests = TRUE; + pqry->timeout_tv.tv_sec = 5; + pqry->opt_out_format = OUTPUT_FMT_CERTS_ARRAY; +// pqry->opt_out_format = OUTPUT_FMT_CERTS_FULL; + + if (done != NULL) + evtimer_assign(&pqry->done_ev, EventBase, done_cb, pqry); + + optind = 0; + while (c= getopt_long(argc, argv, "46O:A?", longopts, NULL), c != -1) { + switch (c) { + case '4': + pqry->opt_v4 = 1; + break; + + case '6': + pqry->opt_v6 = 1; + break; + + case 'A': + pqry->opt_all_tests = TRUE; + break; + + case 'O': + pqry->out_filename = strdup(optarg); + break; + + case 'p': + pqry->port = strdup(optarg); + break; + + case 'T' : + pqry->opt_timeout = strtoul(optarg, NULL, 10); + if ((pqry->opt_timeout <= 0) | (pqry->opt_timeout > 3600)) { + fprintf(stderr, "ERROR invalid timeout " + "-T %s ??. 1 - 3600 seconds\n", optarg); + tlsscan_delete(pqry); + return (0); + } + break; + } + } + + if (tls_arg_validate(argc, argv, pqry)) + { + crondlog(LVL8 "tls_arg_validate failed"); + return NULL; + } + + return pqry; +} + +static bool tls_inst_init(struct tls_state *pqry, struct evutil_addrinfo *addr_curr, int sslv) +{ + struct tls_qry *qry = xzalloc(sizeof(struct tls_qry)); + + qry->cc = xzalloc(sizeof(struct buf)); + qry->certs = xzalloc(sizeof(struct buf)); + qry->ciphers_s_buf = xzalloc(sizeof(struct buf)); + qry->result = xzalloc(sizeof(struct buf)); + qry->addr_curr = addr_curr; + qry->ui = pqry; + + evtimer_assign(&qry->free_child_ev, EventBase, free_child_cb, qry); + evtimer_assign(&qry->timeout_ev, EventBase, timeout_cb, qry); + + if (addr_curr == NULL) { /* there was dns error */ + struct timeval asap = { 0, 0}; + evtimer_add(&qry->timeout_ev, &asap); + return FALSE; + } + + pqry->active++; + qry->ui->q_serial++; + qry->serial = qry->ui->q_serial; + + qry->sslv = sslv; + + qry->tls_incomplete = TRUE; + tls_inst_start(qry, "ALL:COMPLEMENTOFALL"); + + return FALSE; +} + +static void dns_cb(int result, struct evutil_addrinfo *res, void *ctx) +{ + struct tls_state *pqry = (struct tls_state *) ctx; + struct evutil_addrinfo *cur = NULL; + + pqry->addr = res; + pqry->dns_count = 0; + + if (result != 0) + { + snprintf(line, DEFAULT_LINE_LENGTH, "%s \"EVDNS\" : \"%s\"", + pqry->err.size ? ", " : "", + evutil_gai_strerror(result)); + buf_add(&pqry->err, line, strlen(line)); + pqry->addr = NULL; + tls_inst_init(pqry, cur, 0); /* initialize qry and print error */ + return; + } + + for (cur = res; cur != NULL; cur = cur->ai_next) { + pqry->dns_count++; + if (pqry->opt_all_tests) { + // tls_inst_init(pqry, cur, pqry->opt_ssl_v3); + // tls_inst_init(pqry, cur, pqry->opt_tls_v1); + // tls_inst_init(pqry, cur, pqry->opt_tls_v11); + tls_inst_init(pqry, cur, pqry->opt_tls_v12); + } + else { + tls_inst_init(pqry, cur, 0); + } + } +} + +static void printErrorQuick (struct tls_state *pqry) +{ + FILE *fh; + + /* careful not to use json macros they will write over real results */ + + struct timeval now; + if (pqry->out_filename) + { + fh= fopen(pqry->out_filename, "a"); + if (!fh){ + crondlog(LVL8 "unable to append to '%s'", + pqry->out_filename); + return; + } + } + else + fh = stdout; + + fprintf(fh, "RESULT { "); + fprintf(fh, "\"fw\" : \"%d\",", get_atlas_fw_version()); + fprintf(fh, "\"id\" : 9203 ,"); + gettimeofday(&now, NULL); + fprintf(fh, "\"time\" : %ld ,", now.tv_sec); + + fprintf(fh, "\"error\" : [{ "); + fprintf(fh, "\"query busy\": \"not starting a new one. previous one is not done yet\"}"); + if(pqry->str_Atlas) + { + fprintf(fh, ",{"); + fprintf(fh, "\"id\" : \"%s\"", pqry->str_Atlas); + fprintf(fh, ",\"start time\" : %ld", pqry->start_time.tv_sec); + if(pqry->retry) { + fprintf(fh, ",\"retry\": %d", pqry->retry); + + } + if(pqry->opt_retry_max) { + fprintf(fh, ",\"retry max\": %d", pqry->opt_retry_max); + } + fprintf(fh, "}"); + } + fprintf(fh,"]}"); + + if (pqry->out_filename) + fclose(fh); +} + +void tlsscan_start (struct tls_state *pqry) +{ + switch(pqry->state) + { + case STATUS_FREE: + pqry->state = STATUS_START; + break; + default: + printErrorQuick(pqry); + /* this query is still active. can't start another one */ + return; + } + + gettimeofday(&pqry->start_time, NULL); + + pqry->hints.ai_family = AF_UNSPEC; + + if(pqry->opt_v6 && !pqry->opt_v4) + pqry->hints.ai_family = AF_INET6; + + if(pqry->opt_v4 && !pqry->opt_v6) + pqry->hints.ai_family = AF_INET; + + pqry->hints.ai_flags = 0; + pqry->hints.ai_socktype = SOCK_STREAM; + pqry->hints.ai_flags = 0; + + pqry->q_serial = 0; + pqry->q_success = 0; + pqry->active = 0; + pqry->retry = 0; + pqry->dns_count = 0; + + (void) evdns_getaddrinfo(DnsBase, pqry->host, "443", &pqry->hints, + dns_cb, pqry); + evtimer_assign(&pqry->free_inst_ev, EventBase, free_pqry_inst_cb, pqry); +} + +int evtlsscan_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int evtlsscan_main(int argc, char **argv) +{ + struct tls_state *pqry = NULL; /* instance per host(user input) */ + + EventBase = event_base_new(); + if (!EventBase) + { + crondlog(LVL9 "ERROR: critical event_base_new failed"); /* exits */ + return 1; + } + + DnsBase = evdns_base_new(EventBase, 1); + if (!DnsBase) { + crondlog(DIE9 "ERROR: critical evdns_base_new failed"); /* exits */ + event_base_free (EventBase); + return 1; + } + + pqry = tlsscan_init(argc, argv, local_exit); + + if(pqry == NULL) { + crondlog(DIE9 "ERROR: critical tlsscan_init failed"); /* exits */ + event_base_free (EventBase); + return 1; + } + + tlsscan_start(pqry); + + event_base_dispatch(EventBase); + event_base_loopbreak (EventBase); + + if(EventBase) + event_base_free(EventBase); + + return 0; +} + +static void crondlog_aa(const char *ctl, char *fmt, ...) +{ + va_list va; + char buff[1000]; + int level = (ctl[0] & 0x1f); + + va_start(va, fmt); + vsnprintf(buff, 1000 - 1, fmt, va); + printf("%s\n", buff); +} + +struct testops tlsscan_ops = {tlsscan_init, tlsscan_start, tlsscan_delete}; diff --git a/probe-busybox/eperd/evtraceroute.c b/probe-busybox/eperd/evtraceroute.c new file mode 100644 index 00000000..5deef6c0 --- /dev/null +++ b/probe-busybox/eperd/evtraceroute.c @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * Standalone version of the event-based traceroute. + */ +//config:config EVTRACEROUTE +//config: bool "evtraceroute" +//config: default n +//config: help +//config: standalone version of event-driven traceroute + +//applet:IF_EVTRACEROUTE(APPLET(evtraceroute, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-$(CONFIG_EVTRACEROUTE) += evtraceroute.o + +//usage:#define evtraceroute_trivial_usage +//usage: "-[46FIrTU] [-a ] [-b ] [-c ]" +//usage: "\n\t[-f ] [-g ] [-i ] [-m ] " +//usage: "[-p ]\n\t[-t ] [-w ] [-z ] [-A ] " +//usage: "[-B ] [-O ]\n\t[-S ] [-H ] " +//usage: "[-D ] [-R ]\n\t[-W Enables Paris-traceroute" +//usage: "\n -b Base for Paris ID" +//usage: "\n -c #packets per hop" +//usage: "\n -f Starting hop" +//usage: "\n -g Gap limit" +//usage: "\n -i Use this interface" +//usage: "\n -m Max hops" +//usage: "\n -p Destination port" +//usage: "\n -t TOS field" +//usage: "\n -w No reply timeout (ms)" +//usage: "\n -z Dup timeout (ms)" +//usage: "\n -A Atlas measurement ID" +//usage: "\n -B Atlas bundle ID" +//usage: "\n -O Name of output file" +//usage: "\n -S Size of packet" +//usage: "\n -H Add IPv6 Hop-by-hop Option this size" +//usage: "\n -D Add IPv6 Destination Option this size" +//usage: "\n -R Response in file" +//usage: "\n -W Response out file" + +#include "libbb.h" +#include +#include +#include +#include + +#include "eperd.h" + +static void done(void *state UNUSED_PARAM, int error) +{ + exit(error); +} + +int evtraceroute_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int evtraceroute_main(int argc UNUSED_PARAM, char **argv) +{ + int r; + void *state; + + /* Create libevent event base */ + EventBase= event_base_new(); + if (!EventBase) + { + fprintf(stderr, "evtraceroute: event_base_new failed\n"); + exit(1); + } + DnsBase= evdns_base_new(EventBase, 1 /*initialize*/); + if (!DnsBase) + { + fprintf(stderr, "evdns_base_new failed\n"); + exit(1); + } + + state= traceroute_ops.init(argc, argv, done); + if (!state) + { + fprintf(stderr, "evtraceroute: traceroute_ops.init failed\n"); + exit(1); + } + traceroute_ops.start(state); + + r= event_base_loop(EventBase, 0); + if (r != 0) + { + fprintf(stderr, "evtraceroute: event_base_loop failed\n"); + exit(1); + } + return 0; /* not reached */ +} + diff --git a/probe-busybox/eperd/http2.c b/probe-busybox/eperd/http2.c new file mode 100644 index 00000000..20bc3fec --- /dev/null +++ b/probe-busybox/eperd/http2.c @@ -0,0 +1,3150 @@ +/* + * Copyright (c) 2021 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * http2.c + */ + +#include "libbb.h" +#include "eperd.h" +#include "http2.h" +#include +#include +#include +#include +#include +#include + +struct http2_hdr +{ + uint8_t length[3]; + uint8_t type; + uint8_t flags; + uint8_t stream_id[4]; +}; + +#define HTTP2_HDR_TYPE_DATA 0 +#define HTTP2_HDR_TYPE_HEADERS 1 +#define HTTP2_HDR_TYPE_RST_STREAM 3 +#define HTTP2_HDR_TYPE_SETTINGS 4 +#define HTTP2_HDR_TYPE_PING 6 +#define HTTP2_HDR_TYPE_GOAWAY 7 +#define HTTP2_HDR_TYPE_WINDOW_UPDATE 8 + +#define HTTP2_HDR_DATA_END_STREAM 0x01 +#define HTTP2_HDR_DATA_PADDED 0x08 + +#define HTTP2_HDR_HEADERS_END_STREAM 0x01 +#define HTTP2_HDR_HEADERS_END_HEADERS 0x04 +#define HTTP2_HDR_HEADERS_PADDED 0x08 +#define HTTP2_HDR_HEADERS_PRIORITY 0x20 + +#define HTTP2_HDR_R 0x80000000 +#define HTTP2_HDR_SETTINGS_ACK 0x01 + +#define HTTP2_HDR_SETTINGS_HEADER_TABLE_SIZE 0x01 +#define HTTP2_HDR_SETTINGS_ENABLE_PUSH 0x02 +#define HTTP2_HDR_SETTINGS_MAX_CONCURRENT_STREAMS 0x03 +#define HTTP2_HDR_SETTINGS_INITIAL_WINDOW_SIZE 0x04 +#define HTTP2_HDR_SETTINGS_MAX_FRAME_SIZE 0x05 +#define HTTP2_HDR_SETTINGS_MAX_HEADER_LIST_SIZE 0x06 + +#define HTTP2_HDR_GOAWAY_R 0x80000000 +#define HTTP2_HDR_WINDOW_UPDATE_R 0x80000000 + +#define HTTP2_DEFAULT_WINDOW 65535 +#define HTTP2_MAX_WINDOW 0x7fffffff + +#define HTTP2_PROTOCOL_ERROR 0x1 +#define HTTP2_INTERNAL_ERROR 0x2 +#define HTTP2_FLOW_CONTROL_ERROR 0x3 +#define HTTP2_FRAME_SIZE_ERROR 0x6 +#define HTTP2_COMPRESSION_ERROR 0x9 + +#define HTTP2_HEADER_STATUS ":status" +#define HTTP2_CONTENT_TYPE "content-type" +#define TEXT_HTML "text/html" +#define APPLICATION_DNS_MESSAGE "application/dns-message" + +#define HPACK_STATIC_NR 61 + +#define HPACK_IHF 0x80 /* Indexed Header Field */ +#define HPACK_IHF_MASK 0x80 +#define HPACK_IHF_PREFIX_LEN 1 + +#define HPACK_LHFII 0x40 /* Literal Header Field with Incr. Indexing */ +#define HPACK_LHFII_MASK 0xC0 +#define HPACK_LHFII_PREFIX_LEN 2 + +#define HPACK_LHFwI 0x00 /* Literal Header Field without Indexing */ +#define HPACK_LHFwI_MASK 0xF0 +#define HPACK_LHFwI_PREFIX_LEN 4 + +#define HPACK_LHFNI 0x10 + +#define HPACK_INT_MORE 0x80 /* More bytes follow */ + +#define HPACK_H 0x80 /* Huffman encoding */ +#define HPACK_H_MASK 0x80 +#define HPACK_H_PREFIX_LEN 1 + +#define HPACK_HUFF_ENC_NO 256 /* Number of entries in huffman table */ + +static struct table_ent +{ + const char *name; + const char *value; +} static_table[]= +{ +/* 0 */ { NULL, NULL }, +/* 1 */ { ":authority", NULL }, +/* 2 */ { ":method", "GET" }, +/* 3 */ { ":method", "POST" }, +/* 4 */ { ":path", "/" }, +/* 5 */ { ":path", "/index.html" }, +/* 6 */ { ":scheme", "http" }, +/* 7 */ { ":scheme", "https" }, +/* 8 */ { ":status", "200" }, +/* 9 */ { ":status", "204" }, +/* 10 */ { ":status", "206" }, +/* 11 */ { ":status", "304" }, +/* 12 */ { ":status", "400" }, +/* 13 */ { ":status", "404" }, +/* 14 */ { ":status", "500" }, +/* 15 */ { "accept-charset", NULL }, +/* 16 */ { "accept-encoding", "gzip, deflate" }, +/* 17 */ { "accept-language", NULL }, +/* 18 */ { "accept-ranges", NULL }, +/* 19 */ { "accept", NULL }, +/* 20 */ { "accept-control-allow-origin", NULL }, +/* 21 */ { "age", NULL }, +/* 22 */ { "allow", NULL }, +/* 23 */ { "authorization", NULL }, +/* 24 */ { "cache-control", NULL }, +/* 25 */ { "content-disposition", NULL }, +/* 26 */ { "content-encoding", NULL }, +/* 27 */ { "content-language", NULL }, +/* 28 */ { "content-length", NULL }, +/* 29 */ { "content-location", NULL }, +/* 30 */ { "content-range", NULL }, +/* 31 */ { "content-type", NULL }, +/* 32 */ { "cookie", NULL }, +/* 33 */ { "date", NULL }, +/* 34 */ { "etag", NULL }, +/* 35 */ { "expect", NULL }, +/* 36 */ { "expires", NULL }, +/* 37 */ { "from", NULL }, +/* 38 */ { "host", NULL }, +/* 39 */ { "if-match", NULL }, +/* 40 */ { "if-modified-since", NULL }, +/* 41 */ { "if-none-match", NULL }, +/* 42 */ { "if-range", NULL }, +/* 43 */ { "if-unmodified-since", NULL }, +/* 44 */ { "last-modified", NULL }, +/* 45 */ { "link", NULL }, +/* 46 */ { "location", NULL }, +/* 47 */ { "max-forwards", NULL }, +/* 48 */ { "proxy-authentication", NULL }, +/* 49 */ { "proxy-authorization", NULL }, +/* 50 */ { "range", NULL }, +/* 51 */ { "referer", NULL }, +/* 52 */ { "refresh", NULL }, +/* 53 */ { "retry-after", NULL }, +/* 54 */ { "server", NULL }, +/* 55 */ { "set-cookie", NULL }, +/* 56 */ { "strict-transport-security", NULL }, +/* 57 */ { "transfer-encoding", NULL }, +/* 58 */ { "user-agent", NULL }, +/* 59 */ { "vary", NULL }, +/* 60 */ { "via", NULL }, +/* 61 */ { "www-authenticate", NULL }, +}; + +struct huffman_encode +{ + uint32_t encoding; + uint8_t bits; + uint8_t value; +} huffman_encode[]= +{ +/* 0 */ { 0x1ff8, 13, 0 }, +/* 1 */ { 0x7fffd8, 23, 1 }, +/* 2 */ { 0xfffffe2, 28, 2 }, +/* 3 */ { 0xfffffe3, 28, 3 }, +/* 4 */ { 0xfffffe4, 28, 4 }, +/* 5 */ { 0xfffffe5, 28, 5 }, +/* 6 */ { 0xfffffe6, 28, 6 }, +/* 7 */ { 0xfffffe7, 28, 7 }, +/* 8 */ { 0xfffffe8, 28, 8 }, +/* 9 */ { 0xffffea, 24, 9 }, +/* 10 */ { 0x3ffffffc, 30, 10 }, +/* 11 */ { 0xfffffe9, 28, 11 }, +/* 12 */ { 0xfffffea, 28, 12 }, +/* 13 */ { 0x3ffffffd, 30, 13 }, +/* 14 */ { 0xfffffeb, 28, 14 }, +/* 15 */ { 0xfffffec, 28, 15 }, +/* 16 */ { 0xfffffed, 28, 16 }, +/* 17 */ { 0xfffffee, 28, 17 }, +/* 18 */ { 0xfffffef, 28, 18 }, +/* 19 */ { 0xffffff0, 28, 19 }, +/* 20 */ { 0xffffff1, 28, 20 }, +/* 21 */ { 0xffffff2, 28, 21 }, +/* 22 */ { 0x3ffffffe, 30, 22 }, +/* 23 */ { 0xffffff3, 28, 23 }, +/* 24 */ { 0xffffff4, 28, 24 }, +/* 25 */ { 0xffffff5, 28, 25 }, +/* 26 */ { 0xffffff6, 28, 26 }, +/* 27 */ { 0xffffff7, 28, 27 }, +/* 28 */ { 0xffffff8, 28, 28 }, +/* 29 */ { 0xffffff9, 28, 29 }, +/* 30 */ { 0xffffffa, 28, 30 }, +/* 31 */ { 0xffffffb, 28, 31 }, +/* 32 */ { 0x14, 6, ' ' }, +/* 33 */ { 0x3f8, 10, '!' }, +/* 34 */ { 0x3f9, 10, '"' }, +/* 35 */ { 0xffa, 12, '#' }, +/* 36 */ { 0x1ff9, 13, '$' }, +/* 37 */ { 0x15, 6, '%' }, +/* 38 */ { 0xf8, 8, '&' }, +/* 39 */ { 0x7fa, 11, '\'' }, +/* 40 */ { 0x3fa, 10, '(' }, +/* 41 */ { 0x3fb, 10, ')' }, +/* 42 */ { 0xf9, 8, '*' }, +/* 43 */ { 0x7fb, 11, '+' }, +/* 44 */ { 0xfa, 8, ',' }, +/* 45 */ { 0x16, 6, '-' }, +/* 46 */ { 0x17, 6, '.' }, +/* 47 */ { 0x18, 6, '/' }, +/* 48 */ { 0x0, 5, '0' }, +/* 49 */ { 0x1, 5, '1' }, +/* 50 */ { 0x2, 5, '2' }, +/* 51 */ { 0x19, 6, '3' }, +/* 52 */ { 0x1a, 6, '4' }, +/* 53 */ { 0x1b, 6, '5' }, +/* 54 */ { 0x1c, 6, '6' }, +/* 55 */ { 0x1d, 6, '7' }, +/* 56 */ { 0x1e, 6, '8' }, +/* 57 */ { 0x1f, 6, '9' }, +/* 58 */ { 0x5c, 7, ':' }, +/* 59 */ { 0xfb, 8, ';' }, +/* 60 */ { 0x7ffc, 15, '<' }, +/* 61 */ { 0x20, 6, '=' }, +/* 62 */ { 0xffb, 12, '>' }, +/* 63 */ { 0x3fc, 10, '?' }, +/* 64 */ { 0x1ffa, 13, '@' }, +/* 65 */ { 0x21, 6, 'A' }, +/* 66 */ { 0x5d, 7, 'B' }, +/* 67 */ { 0x5e, 7, 'C' }, +/* 68 */ { 0x5f, 7, 'D' }, +/* 69 */ { 0x60, 7, 'E' }, +/* 70 */ { 0x61, 7, 'F' }, +/* 71 */ { 0x62, 7, 'G' }, +/* 72 */ { 0x63, 7, 'H' }, +/* 73 */ { 0x64, 7, 'I' }, +/* 74 */ { 0x65, 7, 'J' }, +/* 75 */ { 0x66, 7, 'K' }, +/* 76 */ { 0x67, 7, 'L' }, +/* 77 */ { 0x68, 7, 'M' }, +/* 78 */ { 0x69, 7, 'N' }, +/* 79 */ { 0x6a, 7, 'O' }, +/* 80 */ { 0x6b, 7, 'P' }, +/* 81 */ { 0x6c, 7, 'Q' }, +/* 82 */ { 0x6d, 7, 'R' }, +/* 83 */ { 0x6e, 7, 'S' }, +/* 84 */ { 0x6f, 7, 'T' }, +/* 85 */ { 0x70, 7, 'U' }, +/* 86 */ { 0x71, 7, 'V' }, +/* 87 */ { 0x72, 7, 'W' }, +/* 88 */ { 0xfc, 8, 'X' }, +/* 89 */ { 0xf3, 7, 'Y' }, +/* 90 */ { 0xfd, 8, 'Z' }, +/* 91 */ { 0x1ffb, 13, '[' }, +/* 92 */ { 0x7fff0, 19, '\\' }, +/* 93 */ { 0x1ffc, 13, ']' }, +/* 94 */ { 0x3ffc, 14, '^' }, +/* 95 */ { 0x22, 6, '_' }, +/* 96 */ { 0x7ffd, 15, '`' }, +/* 97 */ { 0x3, 5, 'a' }, +/* 98 */ { 0x23, 6, 'b' }, +/* 99 */ { 0x4, 5, 'c' }, +/* 100 */ { 0x24, 6, 'd' }, +/* 101 */ { 0x5, 5, 'e' }, +/* 102 */ { 0x25, 6, 'f' }, +/* 103 */ { 0x26, 6, 'g' }, +/* 104 */ { 0x27, 6, 'h' }, +/* 105 */ { 0x6, 5, 'i' }, +/* 106 */ { 0x74, 7, 'j' }, +/* 107 */ { 0x75, 7, 'k' }, +/* 108 */ { 0x28, 6, 'l' }, +/* 109 */ { 0x29, 6, 'm' }, +/* 110 */ { 0x2a, 6, 'n' }, +/* 111 */ { 0x7, 5, 'o' }, +/* 112 */ { 0x2b, 6, 'p' }, +/* 113 */ { 0x76, 7, 'q' }, +/* 114 */ { 0x2c, 6, 'r' }, +/* 115 */ { 0x8, 5, 's' }, +/* 116 */ { 0x9, 5, 't' }, +/* 117 */ { 0x2d, 6, 'u' }, +/* 118 */ { 0x77, 7, 'v' }, +/* 119 */ { 0x78, 7, 'w' }, +/* 120 */ { 0x79, 7, 'x' }, +/* 121 */ { 0x7a, 7, 'y' }, +/* 122 */ { 0x7b, 7, 'z' }, +/* 123 */ { 0x7ffe, 15, '{' }, +/* 124 */ { 0x7fc, 11, '|' }, +/* 125 */ { 0x3ffd, 14, '}' }, +/* 126 */ { 0x1ffd, 13, '~' }, +/* 127 */ { 0xffffffc, 28, 127 }, +/* 128 */ { 0xfffe6, 20, 128 }, +/* 129 */ { 0x3fffd2, 22, 129 }, +/* 130 */ { 0xfffe7, 20, 130 }, +/* 131 */ { 0xfffe8, 20, 131 }, +/* 132 */ { 0x3fffd3, 22, 132 }, +/* 133 */ { 0x3fffd4, 22, 133 }, +/* 134 */ { 0x3fffd5, 22, 134 }, +/* 135 */ { 0x7fffd9, 23, 135 }, +/* 136 */ { 0x3fffd6, 22, 136 }, +/* 137 */ { 0x7fffda, 23, 137 }, +/* 138 */ { 0x7fffdb, 23, 138 }, +/* 139 */ { 0x7fffdc, 23, 139 }, +/* 140 */ { 0x7fffdd, 23, 140 }, +/* 141 */ { 0x7fffde, 23, 141 }, +/* 142 */ { 0xffffeb, 24, 142 }, +/* 143 */ { 0x7fffdf, 23, 143 }, +/* 144 */ { 0xffffec, 24, 144 }, +/* 145 */ { 0xffffed, 24, 145 }, +/* 146 */ { 0x3fffd7, 22, 146 }, +/* 147 */ { 0x7fffe0, 23, 147 }, +/* 148 */ { 0xffffee, 24, 148 }, +/* 149 */ { 0x7fffe1, 23, 149 }, +/* 150 */ { 0x7fffe2, 23, 150 }, +/* 151 */ { 0x7fffe3, 23, 151 }, +/* 152 */ { 0x7fffe4, 23, 152 }, +/* 153 */ { 0x1fffdc, 21, 153 }, +/* 154 */ { 0x3fffd8, 22, 154 }, +/* 155 */ { 0x7fffe5, 23, 155 }, +/* 156 */ { 0x3fffd9, 22, 156 }, +/* 157 */ { 0x7fffe6, 23, 157 }, +/* 158 */ { 0x7fffe7, 23, 158 }, +/* 159 */ { 0xffffef, 24, 159 }, +/* 160 */ { 0x3fffda, 22, 160 }, +/* 161 */ { 0x1fffdd, 21, 161 }, +/* 162 */ { 0xfffe9, 20, 162 }, +/* 163 */ { 0x3fffdb, 22, 163 }, +/* 164 */ { 0x3fffdc, 22, 164 }, +/* 165 */ { 0x7fffe8, 23, 165 }, +/* 166 */ { 0x7fffe9, 23, 166 }, +/* 167 */ { 0x1fffde, 21, 167 }, +/* 168 */ { 0x7fffea, 23, 168 }, +/* 169 */ { 0x3fffdd, 22, 169 }, +/* 170 */ { 0x3fffde, 22, 170 }, +/* 171 */ { 0xfffff0, 24, 171 }, +/* 172 */ { 0x1fffdf, 21, 172 }, +/* 173 */ { 0x3fffdf, 22, 173 }, +/* 174 */ { 0x7fffeb, 23, 174 }, +/* 175 */ { 0x7fffec, 23, 175 }, +/* 176 */ { 0x1fffe0, 21, 176 }, +/* 177 */ { 0x1fffe1, 21, 177 }, +/* 178 */ { 0x3fffe0, 22, 178 }, +/* 179 */ { 0x1fffe2, 21, 179 }, +/* 180 */ { 0x7fffed, 23, 180 }, +/* 181 */ { 0x3fffe1, 22, 181 }, +/* 182 */ { 0x7fffee, 23, 182 }, +/* 183 */ { 0x7fffef, 23, 183 }, +/* 184 */ { 0xfffea, 20, 184 }, +/* 185 */ { 0x3fffe2, 22, 185 }, +/* 186 */ { 0x3fffe3, 22, 186 }, +/* 187 */ { 0x3fffe4, 22, 187 }, +/* 188 */ { 0x7ffff0, 23, 188 }, +/* 189 */ { 0x3fffe5, 22, 189 }, +/* 190 */ { 0x3fffe6, 22, 190 }, +/* 191 */ { 0x7ffff1, 23, 191 }, +/* 192 */ { 0x3ffffe0, 26, 192 }, +/* 193 */ { 0x3ffffe1, 26, 193 }, +/* 194 */ { 0xfffeb, 20, 194 }, +/* 195 */ { 0x7fff1, 19, 195 }, +/* 196 */ { 0x3fffe7, 22, 196 }, +/* 197 */ { 0x7ffff2, 23, 197 }, +/* 198 */ { 0x3fffe8, 22, 198 }, +/* 199 */ { 0x1ffffec, 25, 199 }, +/* 200 */ { 0x3ffffe2, 26, 200 }, +/* 201 */ { 0x3ffffe3, 26, 201 }, +/* 202 */ { 0x3ffffe4, 26, 202 }, +/* 203 */ { 0x7ffffde, 27, 203 }, +/* 204 */ { 0x7ffffdf, 27, 204 }, +/* 205 */ { 0x3ffffe5, 26, 205 }, +/* 206 */ { 0xfffff1, 24, 206 }, +/* 207 */ { 0x1ffffed, 25, 207 }, +/* 208 */ { 0x7fff2, 19, 208 }, +/* 209 */ { 0x1fffe3, 21, 209 }, +/* 210 */ { 0x3ffffe6, 26, 210 }, +/* 211 */ { 0x7ffffe0, 27, 211 }, +/* 212 */ { 0x7ffffe1, 27, 212 }, +/* 213 */ { 0x3ffffe7, 26, 213 }, +/* 214 */ { 0x7ffffe2, 27, 214 }, +/* 215 */ { 0xfffff2, 24, 215 }, +/* 216 */ { 0x1fffe4, 21, 216 }, +/* 217 */ { 0x1fffe5, 21, 217 }, +/* 218 */ { 0x3ffffe8, 26, 218 }, +/* 219 */ { 0x3ffffe9, 26, 219 }, +/* 220 */ { 0xffffffd, 28, 220 }, +/* 221 */ { 0x7ffffe3, 27, 221 }, +/* 222 */ { 0x7ffffe4, 27, 222 }, +/* 223 */ { 0x7ffffe5, 27, 223 }, +/* 224 */ { 0xfffec, 20, 224 }, +/* 225 */ { 0xfffff3, 24, 225 }, +/* 226 */ { 0xfffed, 20, 226 }, +/* 227 */ { 0x1fffe6, 21, 227 }, +/* 228 */ { 0x3fffe9, 22, 228 }, +/* 229 */ { 0x1fffe7, 21, 229 }, +/* 230 */ { 0x1fffe8, 21, 230 }, +/* 231 */ { 0x7ffff3, 23, 231 }, +/* 232 */ { 0x3fffea, 22, 232 }, +/* 233 */ { 0x3fffeb, 22, 233 }, +/* 234 */ { 0x1ffffee, 25, 234 }, +/* 235 */ { 0x1ffffef, 25, 235 }, +/* 236 */ { 0xfffff4, 24, 236 }, +/* 237 */ { 0xfffff5, 24, 237 }, +/* 238 */ { 0x3ffffea, 26, 238 }, +/* 239 */ { 0x7ffff4, 23, 239 }, +/* 240 */ { 0x3ffffeb, 26, 240 }, +/* 241 */ { 0x7ffffe6, 27, 241 }, +/* 242 */ { 0x3ffffec, 26, 242 }, +/* 243 */ { 0x3ffffed, 26, 243 }, +/* 244 */ { 0x7ffffe7, 27, 244 }, +/* 245 */ { 0x7ffffe8, 27, 245 }, +/* 246 */ { 0x7ffffe9, 27, 246 }, +/* 247 */ { 0x7ffffea, 27, 247 }, +/* 248 */ { 0x7ffffeb, 27, 248 }, +/* 249 */ { 0xffffffe, 28, 249 }, +/* 250 */ { 0x7ffffec, 27, 250 }, +/* 251 */ { 0x7ffffed, 27, 251 }, +/* 252 */ { 0x7ffffee, 27, 252 }, +/* 253 */ { 0x7ffffef, 27, 253 }, +/* 254 */ { 0x7fffff0, 27, 254 }, +/* 255 */ { 0x3ffffee, 26, 255 }, +}; + +struct huffman_decode +{ + uint8_t bits; + uint8_t value; +}; + +static struct huffman_decode huffman_decode0[256]; +static struct huffman_decode huffman_decode_fe[256]; +static struct huffman_decode huffman_decode_ff[256]; +static struct huffman_decode huffman_decode_fffe[256]; +static struct huffman_decode huffman_decode_ffff[256]; +static struct huffman_decode huffman_decode_fffff[256]; +static struct huffman_decode huffman_decode_fffffe[16]; +static struct huffman_decode huffman_decode_ffffff[256]; + +struct dyn +{ + char *key; + size_t keylen; + char *value; + size_t valuelen; +}; + +struct http2_env +{ + uint32_t http2_send_global_credits; + uint32_t http2_send_stream1_credits; + uint32_t http2_connection_window; + uint32_t http2_stream1_window; + uint8_t *http2_headers; + size_t http2_headers_max; + size_t http2_headers_offset; + struct dyn *http2_dyn; + size_t http2_dyn_max; + size_t http2_dyn_idx; + u_char *http2_input; + size_t http2_input_max; + size_t http2_input_len; + u_char *http2_data; + size_t http2_data_max; + size_t http2_data_len; + bool type_html; + bool type_dns; + bool done; + unsigned status; + + struct evbuffer *outbuf; +}; + +static void send_request2(struct http2_env *env, struct evbuffer *outbuf, + char *hostname_port, const char *path, u_char *data, size_t datalen); +static void send_settings2(struct evbuffer *outbuf); +static void send_headers2(struct http2_env *env, struct evbuffer *outbuf, + uint32_t stream_id); +static void send_data2(struct evbuffer *outbuf, u_char *data, size_t datalen, + uint32_t stream_id); +static void send_goaway2(struct evbuffer *outbuf, uint32_t error_code, + uint32_t last_stream_id, const char *reason); +static void add_header2(struct http2_env *env, + const char *name, const char *value); +static void add_byte2(struct http2_env *env, uint8_t byte); +static void add_length2(struct http2_env *env, int prefix_len, + uint8_t prefix_value, uint32_t value); +static void add_str2(struct http2_env *env, const char *str); +static int Xhttp2_dns_input1(struct http2_env *env, struct evbuffer *inbuf, + void *ref, http2_write_response_cb_t write_response_cb, + http2_read_response_cb_t read_response_cb); +static int Xreceive_data2(struct http2_env *env, struct http2_hdr *hdrp, + uint32_t length, uint32_t stream_id, u_char *buf); +static int Xreceive_headers2(struct http2_env *env, struct http2_hdr *hdrp, + uint32_t length, uint32_t stream_id, int verbose, u_char *buf); +static int Xreceive_rst_stream2(struct http2_env *env, uint32_t length, + uint32_t stream_id, u_char *buf); +static int Xreceive_settings2(struct http2_env *env, struct http2_hdr *hdrp, + uint32_t length, uint32_t stream_id, u_char *buf); +static int Xreceive_ping2(struct http2_env *env, + uint32_t length, uint32_t stream_id, u_char *data); +static int Xreceive_goaway2(struct http2_env *env, + uint32_t length, uint32_t stream_id, u_char *data); +static int Xreceive_window_update2(struct http2_env *env, + uint32_t length, uint32_t stream_id, u_char *buf); +static size_t Xdecode_header2(struct http2_env *env, + uint8_t *buf, size_t len, int verbose); +static void add_dyn(struct http2_env *env, const char *key, size_t keylen, + const char *value, size_t valuelen); +static int Xget_dyn(struct http2_env *env, uint32_t value, + const char **kp, size_t *klen, const char **vp, size_t *vlen); +static size_t Xdecode_int2(struct http2_env *env, uint8_t *buf, size_t len, + int prefix_len, uint32_t *valuep); +static void init_huffman(void); +static size_t Xdecode_huffman2(struct http2_env *env, uint8_t *buf, size_t len, + char *outbuf, size_t outbuflen); +static void add_credits2(struct http2_env *env, + uint32_t stream_id, uint32_t credits); +static int Xreport_header(struct http2_env *env, int verbose, + const char *key, size_t keylen, const char *value, size_t valuelen); +static int Xparse_status_code2(struct http2_env *env, + const char *value, size_t valuelen, int *statusp); +static int memcasecmp(const void *p1, const void *p2, size_t len); +static void fatal(const char *fmt, ...); + +struct http2_env *http2_init(void) +{ + struct http2_env *env; + + env= malloc(sizeof(*env)); + env->http2_send_global_credits= 0; + env->http2_send_stream1_credits= 0; + env->http2_connection_window= HTTP2_DEFAULT_WINDOW; + env->http2_stream1_window= HTTP2_DEFAULT_WINDOW; + env->http2_headers= NULL; + env->http2_headers_max= 0; + env->http2_headers_offset= 0; + env->http2_dyn= NULL; + env->http2_dyn_max= 0; + env->http2_dyn_idx= 0; + env->http2_input= NULL; + env->http2_input_max= 0; + env->http2_input_len= 0; + env->http2_data= NULL; + env->http2_data_max= 0; + env->http2_data_len= 0; + env->type_html= false; + env->type_dns= false; + env->done= false; + env->outbuf= NULL; + + return env; +} + +void http2_free(struct http2_env *env) +{ + int i; + + free(env->http2_headers); + env->http2_headers= NULL; + for (i= 0; ihttp2_dyn_idx; i++) + { + free(env->http2_dyn[i].key); + env->http2_dyn[i].key= NULL; + free(env->http2_dyn[i].value); + env->http2_dyn[i].value= NULL; + } + free(env->http2_dyn); + env->http2_dyn= NULL; + free(env->http2_input); + env->http2_input= NULL; + free(env->http2_data); + env->http2_data= NULL; + free(env); +} + +void http2_dns(struct http2_env *env, struct bufferevent *bev, + const char *hostname, const char *port, const char *path, + u_char *req, size_t reqlen) +{ + static int first= 1; + + char hostname_port[300]; + + if (first) + { + first= 0; + init_huffman(); + } + + if (strcmp(port, "443") == 0) + { + strlcpy(hostname_port, hostname, sizeof(hostname_port)); + } + else + { + snprintf(hostname_port, sizeof(hostname_port), "%s:%s", + hostname, port); + } + + if (bev) + { + send_request2(env, bufferevent_get_output(bev), + hostname_port, path, req, reqlen); + } +} + +int http2_dns_input(struct http2_env *env, struct bufferevent *bev, + http2_reply_cb_t reply_cb, void *ref, + http2_write_response_cb_t write_response_cb, + http2_read_response_cb_t read_response_cb) +{ + int r; + size_t avail; + struct evbuffer *inbuf; + + if (env->done) + return -1; + + if (bev) + { + inbuf= bufferevent_get_input(bev); + env->outbuf= bufferevent_get_output(bev); + } + else + { + inbuf= NULL; + env->outbuf= NULL; + } + + for(;;) + { + if (inbuf) + avail= evbuffer_get_length(inbuf); + else + avail= 1; + + if (avail == 0) + { + env->outbuf= NULL; + return 1; + } + + r= Xhttp2_dns_input1(env, inbuf, ref, write_response_cb, + read_response_cb); + if (r == -1) + { + env->outbuf= NULL; + return -1; + } + if (env->done) + { + reply_cb(ref, env->status, + env->http2_data, env->http2_data_len); + break; + } + } + env->outbuf= NULL; + return 0; +} + +static int Xhttp2_dns_input1(struct http2_env *env, struct evbuffer *inbuf, + void *ref, http2_write_response_cb_t write_response_cb, + http2_read_response_cb_t read_response_cb) +{ + int r; + ssize_t n; + uint32_t length, stream_id; + size_t len, needed; + struct http2_hdr *http2_hdr; + uint8_t *data; + + needed= sizeof(*http2_hdr); + if (env->http2_input_max < needed) + { + env->http2_input= realloc(env->http2_input, needed); + env->http2_input_max= needed; + } + if (env->http2_input_len < needed) + { + len= needed-env->http2_input_len; + if (read_response_cb) + { + n= read_response_cb(ref, env->http2_input + + env->http2_input_len, len); + } + else + { + n = evbuffer_remove(inbuf, env->http2_input+ + env->http2_input_len, len); + if (n < 0) + fatal("http2_dns_input1: evbuffer_remove failed"); + } + + if (write_response_cb) + { + write_response_cb(ref, env->http2_input+ + env->http2_input_len, n); + } + + env->http2_input_len += n; + if (env->http2_input_len < needed) + return 0; + } +#if 0 + fprintf(stderr, "http2_dns_input1: http2_input_len = %lu\n", + env->http2_input_len); +#endif + + http2_hdr= (struct http2_hdr *)env->http2_input; + + length= (http2_hdr->length[0] << 16) | + (http2_hdr->length[1] << 8) | + http2_hdr->length[2]; +#if 0 + fprintf(stderr, "http2_dns_input1: length %u\n", length); +#endif + + needed= sizeof(*http2_hdr) + length; + if (env->http2_input_max < needed) + { + env->http2_input= realloc(env->http2_input, needed); + env->http2_input_max= needed; + } + if (env->http2_input_len < needed) + { + len= needed-env->http2_input_len; + if (read_response_cb) + { + n= read_response_cb(ref, env->http2_input + + env->http2_input_len, len); + } + else + { + n = evbuffer_remove(inbuf, env->http2_input+ + env->http2_input_len, len); + if (n < 0) + fatal("http2_dns_input1: evbuffer_remove failed"); + } + + if (write_response_cb) + { + write_response_cb(ref, env->http2_input+ + env->http2_input_len, n); + } + + env->http2_input_len += n; + if (env->http2_input_len < needed) + return 0; + } + + /* Get http2_hdr again because the buffer may have moved */ + http2_hdr= (struct http2_hdr *)env->http2_input; + stream_id= (http2_hdr->stream_id[0] << 24) | + (http2_hdr->stream_id[1] << 16) | + (http2_hdr->stream_id[2] << 8) | + http2_hdr->stream_id[3]; + stream_id &= ~HTTP2_HDR_R; + +#if 0 + fprintf(stderr, "http2_dns_input1: stream ID %u\n", stream_id); + fprintf(stderr, "http2_dns_input1: type %u\n", http2_hdr->type); +#endif + + data= env->http2_input+sizeof(*http2_hdr); + switch(http2_hdr->type) + { + case HTTP2_HDR_TYPE_DATA: + r= Xreceive_data2(env, http2_hdr, length, stream_id, data); + if (r == -1) + return -1; + if (http2_hdr->flags & HTTP2_HDR_DATA_END_STREAM) + env->done= true; + break; + case HTTP2_HDR_TYPE_HEADERS: + r= Xreceive_headers2(env, http2_hdr, length, stream_id, + 0/*verbose*/, data); + if (r == -1) + return -1; + break; + case HTTP2_HDR_TYPE_RST_STREAM: + r= Xreceive_rst_stream2(env, length, stream_id, data); + if (r == -1) + return -1; + break; + case HTTP2_HDR_TYPE_SETTINGS: + r= Xreceive_settings2(env, http2_hdr, length, stream_id, data); + if (r == -1) + return -1; + break; + case HTTP2_HDR_TYPE_PING: + r= Xreceive_ping2(env, length, stream_id, data); + if (r == -1) + return -1; + break; + case HTTP2_HDR_TYPE_GOAWAY: + r= Xreceive_goaway2(env, length, stream_id, data); + if (r == -1) + return -1; + break; + case HTTP2_HDR_TYPE_WINDOW_UPDATE: + r= Xreceive_window_update2(env, length, stream_id, data); + if (r == -1) + return -1; + break; + default: + fprintf(stderr, "receive_reply2: got unknown type %d\n", + http2_hdr->type); + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, 0, + "unknown type"); + return -1; + } + env->http2_input_len= 0; + return 0; +} + +static void send_request2(struct http2_env *env, struct evbuffer *outbuf, + char *hostname_port, const char *path, u_char *data, size_t datalen) +{ + const char *start= "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + + char str[20]; + + evbuffer_add(outbuf, start, strlen(start)); + + send_settings2(outbuf); + + snprintf(str, sizeof(str), "%lu", datalen); + add_header2(env, ":method", "POST"); + add_header2(env, ":scheme", "https"); /* We only do HTTP2 over TLS */ + add_header2(env, ":authority", hostname_port); + add_header2(env, ":path", path); + add_header2(env, "user-agent", "RIPE Atlas"); + add_header2(env, "accept", "application/dns-message"); + add_header2(env, "content-type", "application/dns-message"); + add_header2(env, "content-length", str); + + send_headers2(env, outbuf, 1); + send_data2(outbuf, data, datalen, 1); +} + +static void send_settings2(struct evbuffer *outbuf) +{ + uint16_t id; + uint32_t length, value; + struct http2_hdr http2_hdr; + + /* Send settings frame. */ + + length= sizeof(id) + sizeof(value); + http2_hdr.length[0]= (length >> 16) & 0xff; + http2_hdr.length[1]= (length >> 8) & 0xff; + http2_hdr.length[2]= length & 0xff; + http2_hdr.type= HTTP2_HDR_TYPE_SETTINGS; + http2_hdr.flags= 0; + http2_hdr.stream_id[0]= + http2_hdr.stream_id[1]= + http2_hdr.stream_id[2]= + http2_hdr.stream_id[3]= 0; + + evbuffer_add(outbuf, &http2_hdr, sizeof(http2_hdr)); + + id= htons(HTTP2_HDR_SETTINGS_ENABLE_PUSH); + value= htonl(0); + + evbuffer_add(outbuf, &id, sizeof(id)); + evbuffer_add(outbuf, &value, sizeof(value)); +} + +static void send_headers2(struct http2_env *env, struct evbuffer *outbuf, + uint32_t stream_id) +{ + size_t length; + struct http2_hdr http2_hdr; + + length= env->http2_headers_offset; + + http2_hdr.length[0]= (length >> 16) & 0xff; + http2_hdr.length[1]= (length >> 8) & 0xff; + http2_hdr.length[2]= length & 0xff; + http2_hdr.type= HTTP2_HDR_TYPE_HEADERS; + http2_hdr.flags= HTTP2_HDR_HEADERS_END_HEADERS; + http2_hdr.stream_id[0]= (stream_id >> 24) & 0x7f; + http2_hdr.stream_id[1]= (stream_id >> 16) & 0xff; + http2_hdr.stream_id[2]= (stream_id >> 8) & 0xff; + http2_hdr.stream_id[3]= stream_id & 0xff; + + evbuffer_add(outbuf, &http2_hdr, sizeof(http2_hdr)); + evbuffer_add(outbuf, env->http2_headers, + env->http2_headers_offset); + + free(env->http2_headers); + env->http2_headers= NULL; + env->http2_headers_max= 0; + env->http2_headers_offset= 0; +} + +static void send_data2(struct evbuffer *outbuf, u_char *data, size_t datalen, + uint32_t stream_id) +{ + size_t length; + struct http2_hdr http2_hdr; + + length= datalen; + + http2_hdr.length[0]= (length >> 16) & 0xff; + http2_hdr.length[1]= (length >> 8) & 0xff; + http2_hdr.length[2]= length & 0xff; + http2_hdr.type= HTTP2_HDR_TYPE_DATA; + http2_hdr.flags= HTTP2_HDR_DATA_END_STREAM; + http2_hdr.stream_id[0]= (stream_id >> 24) & 0x7f; + http2_hdr.stream_id[1]= (stream_id >> 16) & 0xff; + http2_hdr.stream_id[2]= (stream_id >> 8) & 0xff; + http2_hdr.stream_id[3]= stream_id & 0xff; + + evbuffer_add(outbuf, &http2_hdr, sizeof(http2_hdr)); + evbuffer_add(outbuf, data, datalen); +} + +static void send_goaway2(struct evbuffer *outbuf, uint32_t error_code, + uint32_t last_stream_id, const char *reason) +{ + size_t length; + struct http2_hdr http2_hdr; + uint32_t data[2]; + + fprintf(stderr, "send_goaway2: error %d, lat stream id %d, reason %s\n", + error_code, last_stream_id, reason); + + data[0]= htonl(last_stream_id); + data[1]= htonl(error_code); + + length= sizeof(data); + if (reason) + length += strlen(reason); + + http2_hdr.length[0]= (length >> 16) & 0xff; + http2_hdr.length[1]= (length >> 8) & 0xff; + http2_hdr.length[2]= length & 0xff; + http2_hdr.type= HTTP2_HDR_TYPE_GOAWAY; + http2_hdr.flags= 0; + http2_hdr.stream_id[0]= 0; + http2_hdr.stream_id[1]= 0; + http2_hdr.stream_id[2]= 0; + http2_hdr.stream_id[3]= 0; + + evbuffer_add(outbuf, &http2_hdr, sizeof(http2_hdr)); + evbuffer_add(outbuf, data, sizeof(data)); + if (reason) + evbuffer_add(outbuf, reason, strlen(reason)); +} + +static void add_header2(struct http2_env *env, + const char *name, const char *value) +{ +#if 0 + fprintf(stderr, "add_header2: %s = %s\n", + name, value); +#endif + + /* Simple version, no compression */ + add_byte2(env, HPACK_LHFNI); + add_length2(env, 1, 0x00, strlen(name)); + add_str2(env, name); + add_length2(env, 1, 0x00, strlen(value)); + add_str2(env, value); +} + +static void add_byte2(struct http2_env *env, uint8_t byte) +{ + size_t newsize; + + if (env->http2_headers_max < env->http2_headers_offset+1) + { + newsize= env->http2_headers_offset+64; + env->http2_headers= realloc(env->http2_headers, newsize); + env->http2_headers_max= newsize; + } + env->http2_headers[env->http2_headers_offset]= byte; + env->http2_headers_offset++; +} + +static void add_length2(struct http2_env *env, int prefix_len, + uint8_t prefix_value, uint32_t value) +{ + int first_bits; + unsigned first_max; + + assert(prefix_len < 8); + first_bits= 8-prefix_len; + first_max= (1 << first_bits)-1; + + if (value < first_max) + { + /* Value fits in the first byte */ + add_byte2(env, prefix_value | value); + return; + } + add_byte2(env, prefix_value | first_max); + value -= first_max; + + while (value > 0x7f) + { + add_byte2(env, 0x80 | (value & 0x7f)); + value >>= 7; + } + add_byte2(env, value); +} + +static void add_str2(struct http2_env *env, const char *str) +{ + size_t len, newsize; + + len= strlen(str); + if (env->http2_headers_max < env->http2_headers_offset+len) + { + newsize= env->http2_headers_offset+len+64; + env->http2_headers= realloc(env->http2_headers, newsize); + env->http2_headers_max= newsize; + } + memcpy(env->http2_headers+env->http2_headers_offset, str, len); + env->http2_headers_offset += len; +} + +#if 0 +static size_t encode_huffman2(char *buf, size_t len, + u8_t *outbuf, size_t outbuflen) +{ + u8_t byte; + u32_t value; + int bits, encbits; + size_t o, o_out; + struct huffman_encode *ent; + + bits= 0; + o= 0; + o_out= 0; + + for(;;) + { +#if 0 + fprintf(stderr, "encode_huffman2: len %d, value 0x%x\n", + bits, value & ((1 << bits)-1)); +#endif + if (bits >= 8) + { + if (o_out >= outbuflen) + fatal("encode_huffman2: output buffer full"); + outbuf[o_out]= (value >> (bits-8)) & 0xff; +#if 0 + fprintf(stderr, "encode_huffman2: byte %d: 0x%x\n", + o_out, outbuf[o_out]); +#endif + o_out++; + bits -= 8; + continue; + } + +#if 0 + fprintf(stderr, "encode_huffman2: o %d, len %d\n", o, len); +#endif + + if (o >= len) + break; + + byte= buf[o] & 0xff; + o++; + ent= &huffman_encode[byte]; + encbits= ent->bits; + if (encbits + bits > 32) + { + /* Get the first 8 bits */ + value= (value << 8) | + ((ent->encoding >> (encbits-8)) & 0xff); + bits += 8; + assert(bits >= 8); + if (o_out >= outbuflen) + fatal("encode_huffman2: output buffer full"); + outbuf[o_out]= (value >> (bits-8)) & 0xff; + o_out++; + bits -= 8; + + encbits -= 8; + value= (value << encbits) | + (ent->encoding & ((1 << encbits)-1)); + bits += encbits; + + continue; + } + + value= (value << encbits) | + (ent->encoding & ((1 << encbits)-1)); + bits += encbits; + } + + assert(o == len); + + if (bits != 0) + { + assert(bits < 8); + value= (value << (8-bits)) | ((1 << (8-bits))-1); + bits= 8; + if (o_out >= outbuflen) + fatal("encode_huffman2: output buffer full"); + outbuf[o_out]= (value >> (bits-8)) & 0xff; +#if 0 + fprintf(stderr, "encode_huffman2: byte %d: 0x%x\n", + o_out, outbuf[o_out]); +#endif + o_out++; + bits -= 8; + + } + assert(bits == 0); + return o_out; + fatal("should do trailer"); +} + +static void send_credits2(FILE *file) +{ + u32_t length, window, stream_id; + struct http2_hdr http2_hdr; + + length= sizeof(window); + http2_hdr.length[0]= (length >> 16) & 0xff; + http2_hdr.length[1]= (length >> 8) & 0xff; + http2_hdr.length[2]= length & 0xff; + http2_hdr.type= HTTP2_HDR_TYPE_WINDOW_UPDATE; + http2_hdr.flags= 0; + http2_hdr.stream_id[0]= + http2_hdr.stream_id[1]= + http2_hdr.stream_id[2]= + http2_hdr.stream_id[3]= 0; + window= htonl(http2_send_global_credits); + http2_send_global_credits= 0; + assert(window); + + fwrite(&http2_hdr, sizeof(http2_hdr), 1, file); + fwrite(&window, sizeof(window), 1, file); + + if (http2_send_stream1_credits) + { + stream_id= 1; + http2_hdr.stream_id[0]= (stream_id >> 24) & 0x7f; + http2_hdr.stream_id[1]= (stream_id >> 16) & 0xff; + http2_hdr.stream_id[2]= (stream_id >> 8) & 0xff; + http2_hdr.stream_id[3]= stream_id & 0xff; + + window= htonl(http2_send_stream1_credits); + http2_send_stream1_credits= 0; + assert(window); + + fwrite(&http2_hdr, sizeof(http2_hdr), 1, file); + fwrite(&window, sizeof(window), 1, file); + } +} + +#endif + +static int Xreceive_data2(struct http2_env *env, struct http2_hdr *hdrp, + uint32_t length, uint32_t stream_id, u_char *buf) +{ + size_t data_len, needed, pad_offset, pad_length; + + if (hdrp->flags & HTTP2_HDR_DATA_PADDED) + { + pad_offset= 1; + pad_length= buf[0]; + if (pad_length >= length) + { + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, + 0, "padding error"); + return -1; + } + } + else + { + pad_offset= 0; + pad_length= 0; + } + + add_credits2(env, stream_id, length); + + data_len= length-(pad_offset+pad_length); + if (env->type_html) + { + fprintf(stderr, "%.*s", (int)data_len, buf+pad_offset); + } + if (env->type_dns) + { + needed= env->http2_data_len+data_len; + if (env->http2_data_max < needed) + { + needed += 4096; + env->http2_data= realloc(env->http2_data, needed); + env->http2_data_max= needed; + } + memcpy(env->http2_data+env->http2_data_len, + buf+pad_offset, data_len); + env->http2_data_len += data_len; + } + + return 0; +} + +static int Xreceive_headers2(struct http2_env *env, struct http2_hdr *hdrp, + uint32_t length, uint32_t stream_id, int verbose, u_char *buf) +{ + size_t o, headers_len, len, pad_offset, pad_length, prio_offset; + + if (stream_id == 0) + { + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, 0, + "0 stream ID"); + return -1; + } + if (stream_id != 1) + { + /* We create and support only one stream */ + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, 0, + "stream not open"); + return -1; + } + + if (hdrp->flags & HTTP2_HDR_HEADERS_PADDED) + { + pad_offset= 1; + pad_length= buf[0]; + } + else + { + pad_offset= 0; + pad_length= 0; + } + + if (hdrp->flags & HTTP2_HDR_HEADERS_PRIORITY) + prio_offset= 5; + else + prio_offset= 0; + + if (pad_offset + prio_offset + pad_length > length) + { + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, + 0, "too much padding"); + return -1; + } + + o= pad_offset+prio_offset; + headers_len= length-pad_length; + while (o < length) + { + len= Xdecode_header2(env, buf+o, headers_len-o, verbose); + if (len == 0) + return -1; + o += len; + } + return 0; +} + +static int Xreceive_rst_stream2(struct http2_env *env, uint32_t length, + uint32_t stream_id, u_char *buf) +{ + uint32_t value; + + if (length != 4) + { + send_goaway2(env->outbuf, HTTP2_FRAME_SIZE_ERROR, 0, "length"); + return -1; + } + memcpy(&value, buf, sizeof(value)); + value= ntohl(value); + + if (stream_id == 0) + { + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, 0, + "zero stream ID"); + return -1; + } + + fprintf(stderr, "Got RST on stream %u with error %u\n", + stream_id, value); + return 0; +} + +static int Xreceive_settings2(struct http2_env *env, struct http2_hdr *hdrp, + uint32_t length, uint32_t stream_id, u_char *buf) +{ + size_t o; + uint16_t type; + uint32_t value; + uint8_t *setting; + + if (length % 6 != 0) + { + send_goaway2(env->outbuf, HTTP2_FRAME_SIZE_ERROR, + 0, "frame"); + return -1; + } + + if (hdrp->flags & HTTP2_HDR_SETTINGS_ACK) + { +#if 0 + fprintf(stderr, "receive_settings2: got ACK\n"); +#endif + if (length != 0) + { + send_goaway2(env->outbuf, HTTP2_FRAME_SIZE_ERROR, + 0, "non-empty ACK settings frame"); + return -1; + } + } + + if (stream_id != 0) + { + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, + 0, "non-zero stream ID"); + return -1; + } + + for (o= 0, setting= buf; ooutbuf, HTTP2_PROTOCOL_ERROR, + 0, "non-zero stream ID"); + return -1; + } + if (length != 8) + { + send_goaway2(env->outbuf, HTTP2_FRAME_SIZE_ERROR, + 0, "ping frame not 8 octets"); + return -1; + } + fprintf(stderr, "receive_ping2: value: "); + for (i= 0; ioutbuf, HTTP2_PROTOCOL_ERROR, + 0, "non-zero stream ID"); + return -1; + } + if (length < 8) + { + send_goaway2(env->outbuf, HTTP2_FRAME_SIZE_ERROR, + 0, "goaway frame not at least 8 octets"); + return -1; + } + memcpy(&value, data, sizeof(value)); + value= ntohl(value) & ~HTTP2_HDR_GOAWAY_R; + + fprintf(stderr, "receive_goaway2: last-stream-ID: %u\n", value); + + memcpy(&value, data+4, sizeof(value)); + value= ntohl(value); + + fprintf(stderr, "receive_goaway2: error: %u\n", value); + + fprintf(stderr, "receive_goaway2: debug 0x%x: ", length-8); + for (i= 8; ioutbuf, HTTP2_FRAME_SIZE_ERROR, + 0, "window update frame not 4 octets"); + return -1; + } + memcpy(&value, buf, sizeof(value)); + value= ntohl(value) & ~HTTP2_HDR_WINDOW_UPDATE_R; + + if (value == 0) + { + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, + 0, "zero increment"); + return -1; + } + + if (stream_id == 0) + windowp= &env->http2_connection_window; + else if (stream_id == 1) + windowp= &env->http2_stream1_window; + else + { + send_goaway2(env->outbuf, HTTP2_INTERNAL_ERROR, + 0, "window increment for unknown stream"); + return -1; + } + + /* Update connection window */ + *windowp += value; + if (*windowp > HTTP2_MAX_WINDOW) + { + send_goaway2(env->outbuf, HTTP2_FLOW_CONTROL_ERROR, + 0, "window overflow"); + return -1; + } + return 0; +} + +static size_t Xdecode_header2(struct http2_env *env, + uint8_t *buf, size_t len, int verbose) +{ + int r; + uint8_t byte; + uint32_t value; + size_t o, klen, vlen, namelen, tmplen; + struct table_ent *ent; + const char *kp, *vp, *namep; + char namebuf[256]; + char strbuf[1024]; + + assert(sizeof(static_table)/sizeof(static_table[0]) == + HPACK_STATIC_NR+1); + + byte= buf[0]; +#if 0 + fprintf(stderr, "decode_header2: byte 0x%x\n", byte); +#endif + if ((byte & HPACK_IHF_MASK) == HPACK_IHF) + { + tmplen= Xdecode_int2(env, buf, len, HPACK_IHF_PREFIX_LEN, &value); + if (tmplen == 0) + return 0; + if (value == 0) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "bad index"); + return 0; + } + if (value > HPACK_STATIC_NR) + { + r= Xget_dyn(env, value, &kp, &klen, &vp, &vlen); + if (r == -1) + return 0; + r= Xreport_header(env, verbose, kp, klen, vp, vlen); + if (r == -1) + return 0; + return tmplen; + } + ent= &static_table[value]; + if (!ent->value) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "entry has no value"); + return 0; + } + add_dyn(env, ent->name, strlen(ent->name), + ent->value, strlen(ent->value)); + r= Xreport_header(env, verbose, ent->name, strlen(ent->name), + ent->value, strlen(ent->value)); + if (r == -1) + return 0; + return tmplen; + } + if (byte == HPACK_LHFII || byte == HPACK_LHFwI) + { + /* Literal Header Field without Indexing -- + * New Name + * or + * Literal Header Field with Incremental Indexing -- + * New Name + */ + o= 1; + if (o >= len) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "out of space"); + return 0; + } + + byte= buf[o]; + tmplen= Xdecode_int2(env, buf+o, len-o, HPACK_H_PREFIX_LEN, + &value); + if (tmplen == 0) + return 0; + o += tmplen; + assert(o <= len); + if (value > len-o) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "string too large for space"); + return 0; + } + if ((byte & HPACK_H_MASK) == HPACK_H) + { + tmplen= Xdecode_huffman2(env, buf+o, value, + namebuf, sizeof(namebuf)-1); + if (tmplen == 0) + return 0; + o += value; + namebuf[tmplen]= '\0'; + } + else + { + if (value > sizeof(namebuf)-1) + { + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "name too long"); + return 0; + } + memcpy(namebuf, buf+o, value); + namebuf[value]= '\0'; + o += value; + } + + if (o >= len) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "out of space"); + return 0; + } + + byte= buf[o]; + tmplen= Xdecode_int2(env, buf+o, len-o, HPACK_H_PREFIX_LEN, + &value); + if (tmplen == 0) + return 0; + o += tmplen; + assert(o <= len); + if (value > len-o) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "string too large for space"); + return 0; + } + if ((byte & HPACK_H_MASK) == HPACK_H) + { + tmplen= Xdecode_huffman2(env, buf+o, value, + strbuf, sizeof(strbuf)); + if (tmplen == 0) + return 0; + o += value; + add_dyn(env, namebuf, strlen(namebuf), + strbuf, tmplen); + r= Xreport_header(env, verbose, namebuf, strlen(namebuf), + strbuf, tmplen); + if (r == -1) + return 0; + return o; + } + + add_dyn(env, namebuf, strlen(namebuf), + (char *)buf+o, value); + r= Xreport_header(env, verbose, namebuf, strlen(namebuf), + (char *)buf+o, value); + if (r == -1) + return 0; + o += value; + return o; + } + if ((byte & HPACK_LHFII_MASK) == HPACK_LHFII) + { + /* Literal Header Field with Incremental Indexing -- + * Indexed Name + */ + o= 0; + tmplen= Xdecode_int2(env, buf, len, HPACK_LHFII_PREFIX_LEN, + &value); + if (tmplen == 0) + return 0; + if (value == 0) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "bad index"); + return 0; + } + if (value > HPACK_STATIC_NR) + { + r= Xget_dyn(env, value, &namep, &namelen, &vp, &vlen); + if (r == -1) + return 0; + ent= NULL; + } + else + { + ent= &static_table[value]; + namep= ent->name; + namelen= strlen(namep); + } + + o += tmplen; + if (o >= len) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "out of space"); + return 0; + } + + byte= buf[o]; + tmplen= Xdecode_int2(env, buf+o, len-o, HPACK_H_PREFIX_LEN, + &value); + if (tmplen == 0) + return 0; + o += tmplen; + assert(o <= len); + if (value > len-o) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "string too large for space"); + return 0; + } + if ((byte & HPACK_H_MASK) == HPACK_H) + { + tmplen= Xdecode_huffman2(env, buf+o, value, + strbuf, sizeof(strbuf)); + if (tmplen == 0) + return 0; + o += value; + add_dyn(env, namep, namelen, strbuf, tmplen); + r= Xreport_header(env, verbose, namep, namelen, + strbuf, tmplen); + if (r == -1) + return 0; + return o; + } + + add_dyn(env, namep, namelen, (char *)buf+o, value); + r= Xreport_header(env, verbose, namep, namelen, + (char *)buf+o, value); + if (r == -1) + return 0; + o += value; + return o; + } + if ((byte & HPACK_LHFwI_MASK) == HPACK_LHFwI) + { + /* Literal Header Field without Indexing -- + * Indexed Name + */ + o= 0; + tmplen= Xdecode_int2(env, buf, len, HPACK_LHFwI_PREFIX_LEN, + &value); + if (tmplen == 0) + return 0; + if (value == 0) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "bad index"); + return 0; + } + if (value > HPACK_STATIC_NR) + { + r= Xget_dyn(env, value, &namep, &namelen, &vp, &vlen); + if (r == -1) + return 0; + ent= NULL; + } + else + { + ent= &static_table[value]; + namep= ent->name; + namelen= strlen(namep); + } + + o += tmplen; + if (o >= len) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "out of space"); + return 0; + } + + byte= buf[o]; + tmplen= Xdecode_int2(env, buf+o, len-o, HPACK_H_PREFIX_LEN, + &value); + if (tmplen == 0) + return 0; + o += tmplen; + assert(o <= len); + if (value > len-o) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "string too large for space"); + return 0; + } + if ((byte & HPACK_H_MASK) == HPACK_H) + { + tmplen= Xdecode_huffman2(env, buf+o, value, + strbuf, sizeof(strbuf)); + if (tmplen == 0) + return 0; + o += value; + r= Xreport_header(env, verbose, namep, namelen, + strbuf, tmplen); + if (r == -1) + return 0; + return o; + } + + r= Xreport_header(env, verbose, namep, namelen, + (char *)buf+o, value); + if (r == -1) + return 0; + o += value; + return o; + } + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "bad first byte"); + return 0; +} + +static void add_dyn(struct http2_env *env, const char *key, size_t keylen, + const char *value, size_t valuelen) +{ + size_t needed; + void *k1, *v1; + struct dyn *dyn; + + if (env->http2_dyn_max < env->http2_dyn_idx+1) + { + needed= env->http2_dyn_idx+16; + env->http2_dyn= realloc(env->http2_dyn, needed*sizeof(*dyn)); + env->http2_input_max= needed; + } + dyn= &env->http2_dyn[env->http2_dyn_idx]; + k1= malloc(keylen); + memcpy(k1, key, keylen); + v1= malloc(valuelen); + memcpy(v1, value, valuelen); + dyn->key= k1; + dyn->keylen= keylen; + dyn->value= v1; + dyn->valuelen= valuelen; + env->http2_dyn_idx++; +} + +static int Xget_dyn(struct http2_env *env, uint32_t value, + const char **kp, size_t *klen, const char **vp, size_t *vlen) +{ + struct dyn *dyn; + + if (value < HPACK_STATIC_NR) + fatal("bad value for get_dyn"); + value -= HPACK_STATIC_NR; + if (value >= env->http2_dyn_idx) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "bad dynamic index"); + return -1; + } + dyn= &env->http2_dyn[env->http2_dyn_idx-1-value]; + + *kp= dyn->key; + *klen= dyn->keylen; + *vp= dyn->value; + *vlen= dyn->valuelen; + + return 0; +} + +static size_t Xdecode_int2(struct http2_env *env, uint8_t *buf, size_t len, + int prefix_len, uint32_t *valuep) +{ + uint8_t byte; + uint32_t v, new_bits, new_value; + int o, bits, mask, max_value, shift; + + assert(prefix_len < 8); + bits= 8-prefix_len; + mask= (1 << bits)-1; + max_value= mask; + + assert(len >= 1); + byte= buf[0]; + byte &= mask; + if (byte < max_value) + { + *valuep= byte; + return 1; + } + v= 0; + for (o= 1; o> shift != new_bits) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "overflow in multi-byte integer"); + return 0; + } + v |= new_value; + if (byte & HPACK_INT_MORE) + { + /* More bytes will follow */ + continue; + } + + /* This is the last byte. Add max_value */ + v += max_value; + if (v < max_value) + { + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "overflow in multi-byte integer"); + return 0; + } + + *valuep= v; + return o+1; + } + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "multi-byte int runs beyond buffer"); + return 0; +} + +static void init_huffman(void) +{ + uint8_t byte, mask, nibble; + int i, j, len; + + /* Check consitency of huffman_encode */ + assert(sizeof(huffman_encode)/sizeof(huffman_encode[0]) == + HPACK_HUFF_ENC_NO); + + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + if (huffman_encode[i].value != i) + { + fatal( + "init_huffman: bad value in huffman_encode: row %d, found %d", + i, huffman_encode[i].value); + } + } + + /* Fill in huffman_decode0 table */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + len= huffman_encode[i].bits; + if (len >= 8) + { + byte= huffman_encode[i].encoding >> (len-8); + assert(byte < 256); + if (huffman_decode0[byte].bits == 0) + { + /* New entry */ + huffman_decode0[byte].bits= len; + huffman_decode0[byte].value= + huffman_encode[i].value; + } + else + { + /* Existing entry. Old and new bits need + * to be more than 8 + */ + if (huffman_decode0[byte].bits <= 8 || + len <= 8) + { + fatal( + "init_huffman: inconsistent entry"); + } + } + continue; + } + byte= huffman_encode[i].encoding << (8-len); + assert(byte < 256); + mask= (1 << (8-len))-1; + for (j= 0; j<= mask; j++) + { + if (huffman_decode0[byte+j].bits != 0) + { + fatal( +"init_huffman: duplicate entry: adding row %d, byte 0x%x, offset %d, found bits %d, value %d", + i, byte, j, + huffman_decode0[byte+j].bits, + huffman_decode0[byte+j].value); + } + + /* New entry */ + huffman_decode0[byte+j].bits= len; + huffman_decode0[byte+j].value= + huffman_encode[i].value; + } + } + + /* Check that all entries have been filled-in */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + assert(huffman_decode0[i].bits > 0); + } + + /* Fill in huffman_decode_fe table */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + len= huffman_encode[i].bits; + if (len <= 8) + continue; + + if (((huffman_encode[i].encoding >> (len-8)) & 0xff) != 0xfe) + continue; /* No FE prefix */ + + len -= 8; + assert(len < 8); + + byte= huffman_encode[i].encoding << (8-len); + assert(byte < 256); + mask= (1 << (8-len))-1; + for (j= 0; j<= mask; j++) + { + if (huffman_decode_fe[byte+j].bits != 0) + { + fatal( +"init_huffman: duplicate entry: adding row %d, byte 0x%x, offset %d, found bits %d, value %d", + i, byte, j, + huffman_decode_fe[byte+j].bits, + huffman_decode_fe[byte+j].value); + } + + /* New entry */ + huffman_decode_fe[byte+j].bits= len; + huffman_decode_fe[byte+j].value= + huffman_encode[i].value; + } + } + + /* Check that all entries have been filled-in */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + if (huffman_decode_fe[i].bits == 0) + { + fatal( + "init_huffman: empty entry %d in huffman_decode_fe", + i); + } + } + + /* Fill in huffman_decode_ff table */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + len= huffman_encode[i].bits; + if (len <= 8) + continue; + + if (((huffman_encode[i].encoding >> (len-8)) & 0xff) != 0xff) + continue; /* No FF prefix */ + + len -= 8; + if (len >= 8) + { + byte= huffman_encode[i].encoding >> (len-8); + assert(byte < 256); + if (huffman_decode_ff[byte].bits == 0) + { + /* New entry */ + huffman_decode_ff[byte].bits= len; + huffman_decode_ff[byte].value= + huffman_encode[i].value; + } + else + { + /* Existing entry. Old and new bits need + * to be more than 8 + */ + if (huffman_decode_ff[byte].bits <= 8 || + len <= 8) + { + fatal( + "init_huffman: inconsistent entry"); + } + } + continue; + } + + byte= huffman_encode[i].encoding << (8-len); + assert(byte < 256); + mask= (1 << (8-len))-1; + for (j= 0; j<= mask; j++) + { + if (huffman_decode_ff[byte+j].bits != 0) + { + fatal( +"init_huffman: duplicate entry: adding row %d, byte 0x%x, offset %d, found bits %d, value %d", + i, byte, j, + huffman_decode_ff[byte+j].bits, + huffman_decode_ff[byte+j].value); + } + + /* New entry */ + huffman_decode_ff[byte+j].bits= len; + huffman_decode_ff[byte+j].value= + huffman_encode[i].value; + } + } + + /* Check that all entries have been filled-in */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + if (huffman_decode_ff[i].bits == 0) + { + fatal( + "init_huffman: empty entry %d in huffman_decode_ff", + i); + } + } + + /* Fill in huffman_decode_fffe table */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + len= huffman_encode[i].bits; + if (len <= 16) + continue; + + if (((huffman_encode[i].encoding >> (len-16)) & 0xffff) != + 0xfffe) + { + continue; /* No FFFE prefix */ + } + + len -= 16; +#if 0 + if (len >= 8) + { + byte= huffman_encode[i].encoding >> (len-8); + assert(byte < 256); + if (huffman_decode_ff[byte].bits == 0) + { + /* New entry */ + huffman_decode_ff[byte].bits= len; + huffman_decode_ff[byte].value= + huffman_encode[i].value; + } + else + { + /* Existing entry. Old and new bits need + * to be more than 8 + */ + if (huffman_decode_ff[byte].bits <= 8 || + len <= 8) + { + fatal( + "init_huffman: inconsistent entry"); + } + } + continue; + } +#endif + + byte= huffman_encode[i].encoding << (8-len); + assert(byte < 256); + mask= (1 << (8-len))-1; + for (j= 0; j<= mask; j++) + { + if (huffman_decode_fffe[byte+j].bits != 0) + { + fatal( +"init_huffman: duplicate entry: adding row %d, byte 0x%x, offset %d, found bits %d, value %d", + i, byte, j, + huffman_decode_fffe[byte+j].bits, + huffman_decode_fffe[byte+j].value); + } + + /* New entry */ + huffman_decode_fffe[byte+j].bits= len; + huffman_decode_fffe[byte+j].value= + huffman_encode[i].value; + } + } + + /* Check that all entries have been filled-in */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + if (huffman_decode_fffe[i].bits == 0) + { + fatal( + "init_huffman: empty entry %d in huffman_decode_fffe", + i); + } + } + + /* Fill in huffman_decode_ffff table */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + len= huffman_encode[i].bits; + if (len <= 16) + continue; + + if (((huffman_encode[i].encoding >> (len-16)) & 0xffff) != + 0xffff) + { + continue; /* No FFFF prefix */ + } + + len -= 16; + + if (len >= 8) + { + byte= huffman_encode[i].encoding >> (len-8); + assert(byte < 256); + if (huffman_decode_ffff[byte].bits == 0) + { + /* New entry */ + huffman_decode_ffff[byte].bits= len; + huffman_decode_ffff[byte].value= + huffman_encode[i].value; + } + else + { + /* Existing entry. Old and new bits need + * to be more than 8 + */ + if (huffman_decode_ffff[byte].bits <= 8 || + len <= 8) + { + fatal( + "init_huffman: inconsistent entry"); + } + } + continue; + } + + byte= huffman_encode[i].encoding << (8-len); + assert(byte < 256); + mask= (1 << (8-len))-1; + for (j= 0; j<= mask; j++) + { + if (huffman_decode_ffff[byte+j].bits != 0) + { + fatal( +"init_huffman: duplicate entry: adding row %d, byte 0x%x, offset %d, found bits %d, value %d", + i, byte, j, + huffman_decode_ffff[byte+j].bits, + huffman_decode_ffff[byte+j].value); + } + + /* New entry */ + huffman_decode_ffff[byte+j].bits= len; + huffman_decode_ffff[byte+j].value= + huffman_encode[i].value; + } + } + + /* Check that all entries have been filled-in */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + if (huffman_decode_ffff[i].bits == 0) + { + fatal( + "init_huffman: empty entry %d in huffman_decode_ffff", + i); + } + } + + /* Fill in huffman_decode_fffff table */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + len= huffman_encode[i].bits; + if (len <= 20) + continue; + + if (((huffman_encode[i].encoding >> (len-20)) & 0xfffff) != + 0xfffff) + { + continue; /* No FFFFF prefix */ + } + + len -= 20; + + if (len >= 8) + { + byte= huffman_encode[i].encoding >> (len-8); + assert(byte < 256); + if (huffman_decode_fffff[byte].bits == 0) + { + /* New entry */ + huffman_decode_fffff[byte].bits= len; + huffman_decode_fffff[byte].value= + huffman_encode[i].value; + } + else + { + /* Existing entry. Old and new bits need + * to be more than 8 + */ + if (huffman_decode_fffff[byte].bits <= 8 || + len <= 8) + { + fatal( + "init_huffman: inconsistent entry"); + } + } + continue; + } + + byte= huffman_encode[i].encoding << (8-len); + assert(byte < 256); + mask= (1 << (8-len))-1; + for (j= 0; j<= mask; j++) + { + if (huffman_decode_fffff[byte+j].bits != 0) + { + fatal( +"init_huffman: duplicate entry: adding row %d, byte 0x%x, offset %d, found bits %d, value %d", + i, byte, j, + huffman_decode_fffff[byte+j].bits, + huffman_decode_fffff[byte+j].value); + } + + /* New entry */ + huffman_decode_fffff[byte+j].bits= len; + huffman_decode_fffff[byte+j].value= + huffman_encode[i].value; + } + } + + /* Check that all entries have been filled-in */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + if (huffman_decode_fffff[i].bits == 0) + { + fatal( + "init_huffman: empty entry %d in huffman_decode_fffff", + i); + } + } + + /* Fill in huffman_decode_fffffe table */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + len= huffman_encode[i].bits; + if (len <= 24) + continue; + + if (((huffman_encode[i].encoding >> (len-24)) & 0xffffff) != + 0xfffffe) + { + continue; /* No FFFFFE prefix */ + } + + len -= 24; + + if (len >= 4) + { + nibble= (huffman_encode[i].encoding >> (len-4)) & 0xf; + assert(nibble < 16); + if (huffman_decode_fffffe[nibble].bits == 0) + { + /* New entry */ + huffman_decode_fffffe[nibble].bits= len; + huffman_decode_fffffe[nibble].value= + huffman_encode[i].value; + } + else + { + /* Existing entry. Old and new bits need + * to be more than 8 + */ + if (huffman_decode_fffffe[byte].bits <= 4 || + len <= 4) + { + fatal( + "init_huffman: inconsistent entry"); + } + } + continue; + } + + nibble= (huffman_encode[i].encoding << (4-len)) & 0xf; + assert(nibble < 16); + mask= (1 << (4-len))-1; + for (j= 0; j<= mask; j++) + { + if (huffman_decode_fffffe[nibble+j].bits != 0) + { + fatal( +"init_huffman: duplicate entry: adding row %d, byte 0x%x, offset %d, found bits %d, value %d", + i, byte, j, + huffman_decode_fffffe[nibble+j].bits, + huffman_decode_fffffe[nibble+j].value); + } + + /* New entry */ + huffman_decode_fffffe[nibble+j].bits= len; + huffman_decode_fffffe[nibble+j].value= + huffman_encode[i].value; + } + } + + /* Check that all entries have been filled-in */ + for (i= 0; i<16; i++) + { + if (huffman_decode_fffffe[i].bits == 0) + { + fatal( + "init_huffman: empty entry %d in huffman_decode_fffffe", + i); + } + } + + /* Fill in huffman_decode_ffffff table */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + len= huffman_encode[i].bits; + if (len <= 24) + continue; + + if (((huffman_encode[i].encoding >> (len-24)) & 0xffffff) != + 0xffffff) + { + continue; /* No FFFFFF prefix */ + } + + len -= 24; +#if 0 + if (len >= 8) + { + byte= huffman_encode[i].encoding >> (len-8); + assert(byte < 256); + if (huffman_decode_ff[byte].bits == 0) + { + /* New entry */ + huffman_decode_ff[byte].bits= len; + huffman_decode_ff[byte].value= + huffman_encode[i].value; + } + else + { + /* Existing entry. Old and new bits need + * to be more than 8 + */ + if (huffman_decode_ff[byte].bits <= 8 || + len <= 8) + { + fatal( + "init_huffman: inconsistent entry"); + } + } + continue; + } +#endif + + byte= huffman_encode[i].encoding << (8-len); + assert(byte < 256); + mask= (1 << (8-len))-1; + for (j= 0; j<= mask; j++) + { + if (huffman_decode_ffffff[byte+j].bits != 0) + { + fatal( +"init_huffman: duplicate entry: adding row %d, byte 0x%x, offset %d, found bits %d, value %d", + i, byte, j, + huffman_decode_ffffff[byte+j].bits, + huffman_decode_ffffff[byte+j].value); + } + + /* New entry */ + huffman_decode_ffffff[byte+j].bits= len; + huffman_decode_ffffff[byte+j].value= + huffman_encode[i].value; + } + } + + /* Claim unused entries */ + for (j= 252; j<256; j++) + { + huffman_decode_ffffff[j].bits= 42; + huffman_decode_ffffff[j].value= 42; + } + + /* Check that all entries have been filled-in */ + for (i= 0; i< HPACK_HUFF_ENC_NO; i++) + { + if (huffman_decode_ffffff[i].bits == 0) + { + fatal( + "init_huffman: empty entry %d in huffman_decode_ffffff", + i); + } + } +} + +static size_t Xdecode_huffman2(struct http2_env *env, uint8_t *buf, size_t len, + char *outbuf, size_t outbuflen) +{ + uint8_t byte, decval, mask, nibble; + size_t o, o_out; + uint32_t bits; + int bitslen, declen; + + o= 0; + o_out= 0; + bits= 0; + bitslen= 0; + + for(;;) + { + /* Try to have at least 8 bits. At the end of the string that + * may not work. + */ + if (bitslen < 8 && o < len) + { + bits= (bits << 8) | buf[o]; + bitslen += 8; + o++; + } + +#if 0 + fprintf(stderr, "decode_huffman2: got bits 0x%x, len %d\n", + bits & ((1 << bitslen)-1), bitslen); +#endif + + /* Extract first byte */ + if (bitslen < 8) + byte= bits << (8-bitslen); + else + byte= (bits >> (bitslen-8)) & 0xff; + +#if 0 + fprintf(stderr, "decode_huffman2: got first byte 0x%02x\n", + byte); +#endif + + if (bitslen >= 8 && byte == 0xfe) + { + /* Use huffman_decode_fe table */ + bitslen -= 8; + if (bitslen < 8 && o < len) + { + bits= (bits << 8) | buf[o]; + bitslen += 8; + o++; + } + + /* Extract second byte */ + if (bitslen < 8) + byte= bits << (8-bitslen); + else + byte= (bits >> (bitslen-8)) & 0xff; + +#if 0 + fprintf(stderr, + "decode_huffman2: got 2nd byte 0x%02x\n", + byte); +#endif + + declen= huffman_decode_fe[byte].bits; + decval= huffman_decode_fe[byte].value; + +#if 0 + fprintf(stderr, + "decode_huffman2: got len %d, value '%c'\n", + declen, decval); +#endif + + assert(declen <= 8); + if (declen <= bitslen) + { + bitslen -= declen; + if (o_out >= outbuflen) + { + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "no space in output buffer"); + return 0; + } + outbuf[o_out]= decval; + o_out++; + continue; + } + + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "not enough bits"); + return 0; + } + + if (byte != 0xff) + { + declen= huffman_decode0[byte].bits; + decval= huffman_decode0[byte].value; + +#if 0 + fprintf(stderr, + "decode_huffman2: got len %d, value '%c'\n", + declen, decval); +#endif + + if (declen <= bitslen) + { + assert(declen <= 8); + bitslen -= declen; + if (o_out >= outbuflen) + { + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "no space in output buffer"); + return 0; + } + outbuf[o_out]= decval; + o_out++; + continue; + } + + /* An incomplete symbol marks the end the string. + * Perform some checks. + */ + assert(bitslen < 8); + assert(o == len); + + /* The remaining bits should be a prefix of the EOS + * symbol, basically all bits have to be one. + */ + mask= (1 << bitslen)-1; + if ((bits & mask) == mask) + break; + + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "garbage at end of string"); + return 0; + } + + assert(bitslen >= 8); + assert(byte == 0xff); + + /* Use huffman_decode_ff table */ + bitslen -= 8; + if (bitslen < 8 && o < len) + { + bits= (bits << 8) | buf[o]; + bitslen += 8; + o++; + } + + /* Extract second byte */ + if (bitslen < 8) + byte= bits << (8-bitslen); + else + byte= (bits >> (bitslen-8)) & 0xff; + +#if 0 + fprintf(stderr, + "decode_huffman2: got 2nd byte 0x%02x\n", + byte); +#endif + + if (bitslen >= 8 && byte == 0xfe) + { + /* Use huffman_decode_fffe table */ + bitslen -= 8; + if (bitslen < 8 && o < len) + { + bits= (bits << 8) | buf[o]; + bitslen += 8; + o++; + } + + /* Extract third byte */ + if (bitslen < 8) + byte= bits << (8-bitslen); + else + byte= (bits >> (bitslen-8)) & 0xff; + +#if 0 + fprintf(stderr, + "decode_huffman2: got 3rd byte 0x%02x\n", + byte); +#endif + + declen= huffman_decode_fffe[byte].bits; + decval= huffman_decode_fffe[byte].value; + +#if 0 + fprintf(stderr, + "decode_huffman2: got len %d, value '%c'\n", + declen, decval); +#endif + + assert(declen <= 8); + if (declen <= bitslen) + { + bitslen -= declen; + if (o_out >= outbuflen) + { + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "no space in output buffer"); + return 0; + } + outbuf[o_out]= decval; + o_out++; + continue; + } + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "not enough bits"); + return 0; + } + + + if (byte != 0xff) + { + declen= huffman_decode_ff[byte].bits; + decval= huffman_decode_ff[byte].value; + +#if 0 + fprintf(stderr, + "decode_huffman2: got len %d, value '%c'\n", + declen, decval); +#endif + + assert(declen <= 8); + + if (declen <= bitslen) + { + assert(declen <= 8); + bitslen -= declen; + if (o_out >= outbuflen) + { + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "no space in output buffer"); + return 0; + } + outbuf[o_out]= decval; + o_out++; + continue; + } + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "garbage at end of string"); + return 0; + } + + assert(bitslen >= 8); + assert(byte == 0xff); + + /* Use huffman_decode_ffff table */ + bitslen -= 8; + if (bitslen < 8 && o < len) + { + bits= (bits << 8) | buf[o]; + bitslen += 8; + o++; + } + + /* Extract third byte */ + if (bitslen < 8) + byte= bits << (8-bitslen); + else + byte= (bits >> (bitslen-8)) & 0xff; + +#if 0 + fprintf(stderr, + "decode_huffman2: got 3rd byte 0x%02x\n", + byte); +#endif + + if (bitslen >= 8 && byte == 0xfe) + { + /* Use huffman_decode_fffffe table. This table + * is index using 4 bits + */ + bitslen -= 8; + if (bitslen < 4 && o < len) + { + bits= (bits << 8) | buf[o]; + bitslen += 8; + o++; + } + + /* Extract high nibble of fourth byte */ + if (bitslen < 4) + nibble= bits << (4-bitslen); + else + nibble= (bits >> (bitslen-4)) & 0xf; + +#if 0 + fprintf(stderr, + "decode_huffman2: got high nibble of 4th byte 0x%x\n", + nibble); +#endif + + declen= huffman_decode_fffffe[nibble].bits; + decval= huffman_decode_fffffe[nibble].value; + +#if 0 + fprintf(stderr, + "decode_huffman2: got len %d, value '%c'\n", + declen, decval); +#endif + + assert(declen <= 4); + if (declen <= bitslen) + { + bitslen -= declen; + if (o_out >= outbuflen) + { + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "no space in output buffer"); + return 0; + } + outbuf[o_out]= decval; + o_out++; + continue; + } + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "not enough bits"); + return 0; + } + + if (bitslen >= 8 && (byte & 0xf0) == 0xf0 && byte != 0xff) + { + /* Use huffman_decode_fffff table. The prefix is + * 20 bits. + */ + bitslen -= 4; + if (bitslen < 8 && o < len) + { + bits= (bits << 8) | buf[o]; + bitslen += 8; + o++; + } + + /* Extract low nibble of the third byte and the + * high nibble of fourth byte + */ + if (bitslen < 8) + byte= bits << (8-bitslen); + else + byte= bits >> (bitslen-8); + +#if 0 + fprintf(stderr, + "decode_huffman2: got low 3rd/high 4th 0x%x\n", + byte); +#endif + + declen= huffman_decode_fffff[byte].bits; + decval= huffman_decode_fffff[byte].value; + +#if 0 + fprintf(stderr, + "decode_huffman2: got len %d, value '%c'\n", + declen, decval); +#endif + + assert(declen <= 8); + if (declen <= bitslen) + { + bitslen -= declen; + if (o_out >= outbuflen) + { + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "no space in output buffer"); + return 0; + } + outbuf[o_out]= decval; + o_out++; + continue; + } + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "not enough bits"); + return 0; + } + + if (byte != 0xff) + { + declen= huffman_decode_ffff[byte].bits; + decval= huffman_decode_ffff[byte].value; + +#if 0 + fprintf(stderr, + "decode_huffman2: got len %d, value '%c'\n", + declen, decval); +#endif + + assert(declen <= 8); + + if (declen <= bitslen) + { + assert(declen <= 8); + bitslen -= declen; + if (o_out >= outbuflen) + { + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "no space in output buffer"); + return 0; + } + outbuf[o_out]= decval; + o_out++; + continue; + } + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "garbage at end of string"); + return 0; + } + + assert(bitslen >= 8); + assert(byte == 0xff); + + /* Use huffman_decode_ffffff table */ + bitslen -= 8; + if (bitslen < 8 && o < len) + { + bits= (bits << 8) | buf[o]; + bitslen += 8; + o++; + } + + /* Extract fourth byte */ + if (bitslen < 8) + byte= bits << (8-bitslen); + else + byte= (bits >> (bitslen-8)) & 0xff; + +#if 0 + fprintf(stderr, + "decode_huffman2: got 4th byte 0x%02x\n", + byte); +#endif + +#if 0 + if (bitslen >= 8 && byte == 0xfe) + { + /* Use huffman_decode_fffe table */ + bitslen -= 8; + if (bitslen < 8 && o < len) + { + bits= (bits << 8) | buf[o]; + bitslen += 8; + o++; + } + + /* Extract third byte */ + if (bitslen < 8) + byte= bits << (8-bitslen); + else + byte= (bits >> (bitslen-8)) & 0xff; + + fprintf(stderr, + "decode_huffman2: got 3rd byte 0x%02x\n", + byte); + + declen= huffman_decode_fffe[byte].bits; + decval= huffman_decode_fffe[byte].value; + +#if 0 + fprintf(stderr, + "decode_huffman2: got len %d, value '%c'\n", + declen, decval); +#endif + + assert(declen <= 8); + if (declen <= bitslen) + { + bitslen -= declen; + if (o_out >= outbuflen) + { + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "no space in output buffer"); + return 0; + } + outbuf[o_out]= decval; + o_out++; + continue; + } + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "not enough bits"); + return 0; + } +#endif + + if (byte != 0xff) + { + declen= huffman_decode_ffffff[byte].bits; + decval= huffman_decode_ffffff[byte].value; + +#if 0 + fprintf(stderr, + "decode_huffman2: got len %d, value '%c'\n", + declen, decval); +#endif + + assert(declen <= 8); + + if (declen <= bitslen) + { + assert(declen <= 8); + bitslen -= declen; + if (o_out >= outbuflen) + { + send_goaway2(env->outbuf, + HTTP2_COMPRESSION_ERROR, + 0, "no space in output buffer"); + return 0; + } + outbuf[o_out]= decval; + o_out++; + continue; + } + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "garbage at end of string"); + return 0; + } + + send_goaway2(env->outbuf, HTTP2_COMPRESSION_ERROR, + 0, "should decode ff"); + return 0; + } + return o_out; +} + +static void add_credits2(struct http2_env *env, + uint32_t stream_id, uint32_t credits) +{ + env->http2_send_global_credits += credits; + if (stream_id == 1) + env->http2_send_stream1_credits += credits; + else + { + fatal("add_credits2: should handle more streams"); + } +} + +static int Xreport_header(struct http2_env *env, int verbose, + const char *key, size_t keylen, const char *value, size_t valuelen) +{ + int r, status; + size_t len; + const u_char *p; + + if (verbose) + { + fprintf(stderr, "'%.*s': '%.*s'\n", (int)keylen, key, + (int)valuelen, value); + } + + if (keylen == strlen(HTTP2_HEADER_STATUS) && + memcmp(key, HTTP2_HEADER_STATUS, keylen) == 0) + { + r= Xparse_status_code2(env, value, valuelen, &status); + if (r == -1) + return -1; + env->status= status; + if (verbose) + fprintf(stderr, "HTTP/2 %.*s\n", (int)valuelen, value); + else if (status != 200) + fprintf(stderr, "Request failed with %03d\n", status); + } + if (keylen == strlen(HTTP2_CONTENT_TYPE) && + memcmp(key, HTTP2_CONTENT_TYPE, keylen) == 0) + { + len= valuelen; + p= memchr(value, ';', len); + if (p != NULL) + len= p-(const u_char *)value; + + /* Trim trailing white space */ + while (len > 0 && isspace(value[len-1])) + len--; + + p= (const u_char *)value; + /* Skip leading white space */ + while (p < (const u_char *)value+len && isspace(*p)) + p++; + len= (const u_char *)value+len-p; + + if (len == strlen(TEXT_HTML) && + memcasecmp(p, TEXT_HTML, len) == 0) + { + env->type_html= true; + } + if (len == strlen(APPLICATION_DNS_MESSAGE) && + memcasecmp(p, APPLICATION_DNS_MESSAGE, len) == 0) + { + env->type_dns= true; + } + + + } + return 0; +} + +static int Xparse_status_code2(struct http2_env *env, + const char *value, size_t valuelen, int *statusp) +{ + int i, status; + char *check; + char buf[4]; + + if (valuelen != sizeof(buf)-1) + { + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, 0, + "bad status code"); + return -1; + } + memcpy(buf, value, valuelen); + buf[valuelen]= '\0'; + + for (i= 0; ioutbuf, HTTP2_PROTOCOL_ERROR, 0, + "error parsing status code"); + return -1; + } + } + + status= strtoul(buf, &check, 10); + if (check[0] != '\0') + { + send_goaway2(env->outbuf, HTTP2_PROTOCOL_ERROR, 0, + "garbage after status code"); + return -1; + } + + *statusp= status; + return 0; +} + +static int memcasecmp(const void *p1, const void *p2, size_t len) +{ + const u_char *cp1, *cp2; + int c1, c2, i; + + cp1= p1; + cp2= p2; + + for (i= 0; i + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * http2.h + */ + +#include +#include +#include +#include +#include + +struct http2_env; + +typedef void (*http2_reply_cb_t)(void *ref, unsigned status, + u_char *data, size_t len); +typedef void (*http2_write_response_cb_t)(void *ref, void *data, size_t len); +typedef size_t (*http2_read_response_cb_t)(void *ref, void *data, size_t len); + +struct http2_env *http2_init(void); +void http2_free(struct http2_env *env); +void http2_dns(struct http2_env *env, struct bufferevent *bev, + const char *hostname, const char *port, const char *path, + u_char *req, size_t reqlen); +int http2_dns_input(struct http2_env *env, struct bufferevent *bev, + http2_reply_cb_t reply_cb, void *ref, + http2_write_response_cb_t write_response_cb, + http2_read_response_cb_t read_response_cb); diff --git a/probe-busybox/eperd/httpget.c b/probe-busybox/eperd/httpget.c new file mode 100644 index 00000000..13c5b861 --- /dev/null +++ b/probe-busybox/eperd/httpget.c @@ -0,0 +1,2298 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * httpget.c -- libevent-based version of httpget + */ + +#include "libbb.h" +#include +#include +#include +#include +#include +#include +#include + +#include "eperd.h" +#include "tcputil.h" +#include "atlas_path.h" + +#define SAFE_PREFIX_IN_REL ATLAS_DATA_OUT_REL +#define SAFE_PREFIX_OUT_REL ATLAS_DATA_NEW_REL + +#define CONN_TO 5000 /* Timeout in milliseconds */ + +#define ENV2STATE(env) \ + ((struct hgstate *)((char *)env - offsetof(struct hgstate, tu_env))) + +#define DBQ(str) "\"" #str "\"" + +#define MAX_LINE_LEN 2048 /* We don't deal with lines longer than this */ +#define POST_BUF_SIZE 2048 /* Big enough to be efficient? */ + +#define RESP_PACKET 1 +#define RESP_SOCKNAME 2 +#define RESP_DSTADDR 3 +#define RESP_READ_ERROR 4 + +static struct option longopts[]= +{ + { "all", no_argument, NULL, 'a' }, + { "combine", no_argument, NULL, 'c' }, + { "get", no_argument, NULL, 'g' }, + { "head", no_argument, NULL, 'E' }, + { "host", required_argument, NULL, 'n' }, + { "post", no_argument, NULL, 'P' }, + { "post-file", required_argument, NULL, 'p' }, + { "post-header", required_argument, NULL, 'h' }, + { "post-footer", required_argument, NULL, 'f' }, + { "read-limit", required_argument, NULL, 'r' }, + { "store-headers", required_argument, NULL, 'H' }, + { "store-body", required_argument, NULL, 'B' }, + { "user-agent", required_argument, NULL, 'u' }, + { "sni", required_argument, NULL, 's' }, + { "timeout", required_argument, NULL, 'S' }, + { "etim", no_argument, NULL, 't' }, + { "eetim", no_argument, NULL, 'T' }, + { NULL, } +}; + +enum readstate { READ_FIRST, READ_STATUS, READ_HEADER, READ_BODY, READ_SIMPLE, + READ_CHUNKED, READ_CHUNK_BODY, READ_CHUNK_END, READ_CHUNKED_TRAILER, + READ_DONE }; +enum writestate { WRITE_FIRST, WRITE_HEADER, WRITE_POST_HEADER, + WRITE_POST_FILE, WRITE_POST_FOOTER, WRITE_DONE }; + +struct hgbase +{ + struct event_base *event_base; + + struct hgstate **table; + int tabsiz; + + /* For standalone httpget. Called when a httpget instance is + * done. Just one pointer for all instances. It is up to the caller + * to keep it consistent. + */ + void (*done)(void *state, int error); +}; + +struct hgstate +{ + /* Parameters */ + char *output_file; + char *atlas; + char *bundle; + char do_all; + char do_combine; + char only_v4; + char only_v6; + char do_get; + char do_head; + char do_post; + bool do_tls; + char do_http10; + char *user_agent; + char *post_header; + char *post_file; + char *post_footer; + int max_headers; + int max_body; + int etim; + size_t read_limit; + unsigned timeout; + char *infname; + char *response_in; /* Fuzzing */ + char *response_out; + + /* State */ + char busy; + struct tu_env tu_env; + char dnserr; + char connecting; + char in_writecb; + char *host; + char *port; + char *hostport; + char *path; + char *sni; + struct bufferevent *bev; + enum readstate readstate; + enum writestate writestate; + int http_result; + char res_major; + char res_minor; + int headers_size; + int tot_headers; + int chunked; + int tot_chunked; + int content_length; + int content_offset; + int subid; + int submax; + time_t gstart; + struct timespec start; + double resptime; + double ttr; /* Time to resolve */ + double ttc; /* Time to connect */ + double ttfb; /* Time to first byte */ + int roffset; + int report_roffset; + int first_connect; + int read_truncated; + FILE *post_fh; + char *post_buf; + + char *line; + size_t linemax; /* Allocated size of line */ + size_t linelen; /* Current amount of data in line */ + size_t lineoffset; /* Offset in line where to start processing */ + + /* Base and index in table */ + struct hgbase *base; + int index; + + struct sockaddr_in6 sin6; + socklen_t socklen; + struct sockaddr_in6 loc_sin6; + socklen_t loc_socklen; + + char *result; + size_t reslen; + size_t resmax; + + char *result2; + size_t reslen2; + size_t resmax2; + + FILE *resp_file; /* Fuzzing */ +}; + +static struct hgbase *hg_base; + +static void report(struct hgstate *state); +static void add_str(struct hgstate *state, const char *str); +static void add_str_quoted(struct hgstate *state, char *str); +static void add_str2(struct hgstate *state, const char *str); + +static struct hgbase *httpget_base_new(struct event_base *event_base) +{ + struct hgbase *base; + + base= xzalloc(sizeof(*base)); + + base->event_base= event_base; + + base->tabsiz= 10; + base->table= xzalloc(base->tabsiz * sizeof(*base->table)); + + return base; +} + +static int parse_url(char *url, char **hostp, char **portp, char **hostportp, + char **pathp, bool *do_tls) +{ + char *item; + const char *cp, *np, *prefix; + size_t len; +#if ENABLE_FEATURE_EVHTTPGET_HTTPS + const char *prefixs; + size_t lens; +#endif + + *hostp= NULL; + *portp= NULL; + *hostportp= NULL; + *pathp= NULL; + *do_tls = 0; + + /* the url must start with 'http://' */ + prefix= "http://"; +#if ENABLE_FEATURE_EVHTTPGET_HTTPS + prefixs = "https://"; + lens= strlen(prefixs); +#endif + len= strlen(prefix); + if (strncasecmp(prefix, url, len) == 0) + { + *do_tls = 0; + } +#if ENABLE_FEATURE_EVHTTPGET_HTTPS + else if (strncasecmp(prefixs, url, lens) == 0) + { + len = lens; + *do_tls = 1; + } +#endif + else + { + crondlog(LVL8 "bad prefix in url '%s'", url); + goto fail; + } + + cp= url+len; + + /* Get hostport part */ + np= strchr(cp, '/'); + if (np != NULL) + len= np-cp; + else + { + len= strlen(cp); + np= cp+len; + } + if (len == 0) + { + crondlog(LVL8 "missing host part in url '%s'", url); + return 0; + } + item= xmalloc(len+1); + memcpy(item, cp, len); + item[len]= '\0'; + *hostportp= item; + + /* The remainder is the path */ + cp= np; + if (cp[0] == '\0') + cp= "/"; + len= strlen(cp); + item= xmalloc(len+1); + memcpy(item, cp, len); + item[len]= '\0'; + *pathp= item; + + /* Extract the host name from hostport */ + cp= *hostportp; + np= cp; + if (cp[0] == '[') + { + /* IPv6 address literal */ + np= strchr(cp, ']'); + if (np == NULL || np == cp+1) + { + crondlog(LVL8 + "malformed IPv6 address literal in url '%s'", + url); + goto fail; + } + } + + np= strchr(np, ':'); + if (np != NULL) + len= np-cp; + else + { + len= strlen(cp); + np= cp+len; + } + if (len == 0) + { + crondlog(LVL8 "missing host part in url '%s'", url); + goto fail; + } + item= xmalloc(len+1); + if (cp[0] == '[') + { + /* Leave out the square brackets */ + memcpy(item, cp+1, len-2); + item[len-2]= '\0'; + } + else + { + memcpy(item, cp, len); + item[len]= '\0'; + } + *hostp= item; + + /* Port */ + cp= np; + if ((cp[0] == '\0') && *do_tls) + cp= "443"; + else if ((cp[0] == '\0') && !*do_tls) + cp= "80"; + else + cp++; + len= strlen(cp); + item= xmalloc(len+1); + memcpy(item, cp, len); + item[len]= '\0'; + *portp= item; + + return 1; + +fail: + if (*hostp) + { + free(*hostp); + *hostp= NULL; + } + if (*portp) + { + free(*portp); + *portp= NULL; + } + if (*hostportp) + { + free(*hostportp); + *hostportp= NULL; + } + if (*pathp) + { + free(*pathp); + *pathp= NULL; + } + return 0; +} + +static void timeout_callback(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + struct hgstate *state; + char errline[256]; + + state= ENV2STATE(s); + + if (state->connecting) + { + add_str(state, DBQ(err) ":" DBQ(connect: timeout) ", "); + if (state->do_all) + report(state); + else + tu_restart_connect(&state->tu_env); + return; + } + switch(state->readstate) + { + case READ_FIRST: + case READ_STATUS: + add_str(state, DBQ(err) ":" DBQ(timeout reading status) ", "); + report(state); + break; + case READ_HEADER: + if (state->max_headers) + add_str(state, " ], "); + add_str(state, ", " DBQ(err) ":" DBQ(timeout reading headers)); + report(state); + break; + case READ_SIMPLE: +#if 0 /* Enable when adding storing bodies */ + if (state->max_body) + add_str(s, " ]"); +#endif + add_str(state, DBQ(err) ":" DBQ(timeout reading body) ", "); + report(state); + break; + case READ_CHUNKED: + case READ_CHUNK_BODY: +#if 0 /* Enable when adding storing bodies */ + if (state->max_body) + add_str(s, " ]"); +#endif +#if 1 + snprintf(errline, sizeof(errline), + DBQ(err) ":" + DBQ(timeout reading chunk: state %ld linelen %ld lineoffset %ld) + ", ", + (long)state->readstate, + (long)state->linelen, + (long)state->lineoffset); + add_str(state, errline); +#else + add_str(state, DBQ(err) ":" DBQ(timeout reading chunk) ", "); +#endif + report(state); + break; + default: + printf("in timeout_callback, unhandled cased: %d\n", + state->readstate); + } +} + +static void *httpget_init(int __attribute((unused)) argc, char *argv[], + void (*done)(void *state, int error)) +{ + int c, i, do_combine, do_get, do_head, do_post, + max_headers, max_body, only_v4, only_v6, + do_all, do_http10, do_etim, do_eetim; + bool do_tls; + size_t newsiz, read_limit; + unsigned timeout; + char *url, *check; + char *host_arg, *post_file, *output_file, *post_footer, *post_header, + *A_arg, *b_arg, *store_headers, *store_body, *read_limit_str, + *timeout_str, *infname, *response_in, *response_out; + char *validated_response_in= NULL; + char *validated_response_out= NULL; + char *validated_output_file= NULL; + char *validated_post_header= NULL; + char *validated_post_file= NULL; + char *validated_post_footer= NULL; + const char *sni, *user_agent; + char *host, *port, *hostport, *path; + struct hgstate *state; + FILE *fh; + + /* Arguments */ + do_http10= 0; + do_all= 0; + do_combine= 0; + do_get= 1; + do_head= 0; + do_post= 0; + do_tls = 0; + host_arg= NULL; + post_file= NULL; + post_footer=NULL; + post_header=NULL; + output_file= NULL; + store_headers= NULL; + store_body= NULL; + read_limit_str= NULL; + timeout_str= NULL; + A_arg= NULL; + b_arg= NULL; + infname= NULL; + response_in= NULL; + response_out= NULL; + only_v4= 0; + only_v6= 0; + sni= NULL; + do_etim= 0; + do_eetim= 0; + user_agent= "httpget for atlas.ripe.net"; + + if (!hg_base) + { + hg_base= httpget_base_new(EventBase); + if (!hg_base) + crondlog(DIE9 "httpget_base_new failed"); + } + + + /* Allow us to be called directly by another program in busybox */ + optind= 0; + while (c= getopt_long(argc, argv, "01ab:A:cI:O:R:W:46", longopts, NULL), + c != -1) + { + switch(c) + { + case '0': + do_http10= 1; + break; + case '1': + do_http10= 0; + break; + case '4': + only_v4= 1; + only_v6= 0; + break; + case '6': + only_v6= 1; + only_v4= 0; + break; + case 'A': + A_arg= optarg; + break; + case 'R': + response_in= optarg; + break; + case 'W': + response_out= optarg; + break; + case 'a': /* --all */ + do_all= 1; + break; + case 'b': + b_arg= optarg; + break; + case 'B': /* --store-body */ + store_body= optarg; + break; + case 'c': /* --combine */ + do_combine= 1; + break; + case 'E': /* --head */ + do_get = 0; + do_head = 1; + do_post = 0; + break; + case 'g': /* --get */ + do_get = 1; + do_head = 0; + do_post = 0; + break; + case 'f': /* --post-footer */ + post_footer= optarg; + break; + case 'H': /* --store-headers */ + store_headers= optarg; + break; + case 'h': /* --post-header */ + post_header= optarg; + break; + case 'I': + infname= optarg; + break; + case 'n': + host_arg= optarg; /* --host */ + break; + case 'O': + output_file= optarg; + break; + case 'P': /* --post */ + do_get = 0; + do_head = 0; + do_post = 1; + break; + case 'p': /* --post-file */ + post_file= optarg; + break; + case 'r': + read_limit_str= optarg; /* --read-limit */ + break; + case 'T': /* --etim */ + do_eetim= 1; + break; + case 't': /* --etim */ + do_etim= 1; + break; + case 's': + sni= optarg; /* --sni */ + break; + case 'S': + timeout_str= optarg; /* --timeout */ + break; + case 'u': /* --user-agent */ + user_agent= optarg; + break; + default: + crondlog(LVL8 "bad option '%c'", c); + return NULL; + } + } + + if (optind != argc-1) + { + crondlog(LVL8 "exactly one url expected"); + return NULL; + } + url= argv[optind]; + + if (response_in) + { + validated_response_in= rebased_validated_filename(ATLAS_SPOOLDIR, + response_in, ATLAS_FUZZING_REL); + if (validated_response_in == NULL) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + response_in); + goto err; + } + } + if (response_out) + { + validated_response_out= rebased_validated_filename(ATLAS_SPOOLDIR, + response_out, ATLAS_FUZZING_REL); + if (validated_response_out == NULL) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + response_out); + goto err; + } + } + + if (output_file) + { + validated_output_file= rebased_validated_filename(ATLAS_SPOOLDIR, + output_file, SAFE_PREFIX_OUT_REL); + if (validated_output_file == NULL) + { + crondlog(LVL8 "insecure file '%s'", output_file); + goto err; + } + fh= fopen(validated_output_file, "a"); + if (!fh) + { + crondlog(LVL8 "httpget: unable to append to '%s'", + validated_output_file); + goto err; + } + fclose(fh); + } + + if (A_arg) + { + if (!validate_atlas_id(A_arg)) + { + crondlog(LVL8 "bad atlas ID '%s'", A_arg); + goto err; + } + } + if (b_arg) + { + if (!validate_atlas_id(b_arg)) + { + crondlog(LVL8 "bad bundle ID '%s'", b_arg); + goto err; + } + } + + if (post_header) + { + validated_post_header= rebased_validated_filename(ATLAS_SPOOLDIR, + post_header, SAFE_PREFIX_IN_REL); + if (validated_post_header == NULL) + { + crondlog(LVL8 "insecure file '%s'", post_header); + goto err; + } + } + if (post_file) + { + validated_post_file= rebased_validated_filename(ATLAS_SPOOLDIR, + post_file, SAFE_PREFIX_IN_REL); + if (validated_post_file == NULL) + { + crondlog(LVL8 "insecure file '%s'", post_file); + goto err; + } + } + if (post_footer) + { + validated_post_footer= rebased_validated_filename(ATLAS_SPOOLDIR, + post_footer, SAFE_PREFIX_IN_REL); + if (validated_post_footer == NULL) + { + crondlog(LVL8 "insecure file '%s'", post_footer); + goto err; + } + } + + max_headers= 0; + max_body= UINT_MAX; /* default is to write out the entire body */ + + if (store_headers) + { + max_headers= strtoul(store_headers, &check, 10); + if (check[0] != '\0') + { + crondlog(LVL8 + "unable to parse argument (--store-headers) '%s'", + store_headers); + return NULL; + } + } + + if (store_body) + { + max_body= strtoul(store_body, &check, 10); + if (check[0] != '\0') + { + crondlog(LVL8 + "unable to parse argument (--store-body) '%s'", + store_body); + return NULL; + } + } + + read_limit= 0; + if (read_limit_str) + { + read_limit= strtoul(read_limit_str, &check, 10); + if (check[0] != '\0') + { + crondlog(LVL8 + "unable to parse argument (--read-limit) '%s'", + read_limit_str); + return NULL; + } + } + + timeout= CONN_TO; + if (timeout_str) + { + timeout= strtoul(timeout_str, &check, 10); + if (check[0] != '\0') + { + crondlog(LVL8 + "unable to parse argument (--timeout) '%s'", + timeout_str); + return NULL; + } + } + + if (!parse_url(url, &host, &port, &hostport, &path, &do_tls)) + { + /* Do we need to report an error? */ + return NULL; + } + + if (host_arg) + { + /* Replace hostport from the URL with host_arg */ + free(hostport); + hostport= strdup(host_arg); + } + + //printf("host: %s\n", host); + //printf("port: %s\n", port); + //printf("hostport: %s\n", hostport); + //printf("path: %s\n", path); + + state= xzalloc(sizeof(*state)); + state->base= hg_base; + state->atlas= A_arg ? strdup(A_arg) : NULL; + state->bundle= b_arg ? strdup(b_arg) : NULL; + state->output_file= validated_output_file; + validated_output_file= NULL; + state->response_in= validated_response_in; + validated_response_in= NULL; + state->response_out= validated_response_out; + validated_response_out= NULL; + state->host= host; + state->port= port; + state->hostport= hostport; + state->path= path; + state->do_all= do_all; + state->do_combine= !!do_combine; + state->do_get= do_get; + state->do_head= do_head; + state->do_post= do_post; + state->do_tls= do_tls; + state->post_header= validated_post_header; + validated_post_header= NULL; + state->post_file= validated_post_file; + validated_post_file= NULL; + state->post_footer= validated_post_footer; + validated_post_footer= NULL; + state->do_http10= do_http10; + state->user_agent= user_agent ? strdup(user_agent) : NULL; + state->max_headers= max_headers; + state->max_body= max_body; + state->read_limit= read_limit; + state->sni= sni ? strdup(sni) : strdup(host); + state->timeout= timeout; + state->infname= infname ? strdup(infname) : NULL; + + state->only_v4= 2; + + state->only_v4= !!only_v4; /* Gcc bug? */ + state->only_v6= !!only_v6; + + if (do_eetim) + state->etim= 2; + else if (do_etim) + state->etim= 1; + else + state->etim= 0; + + + //evtimer_assign(&state->timer, state->base->event_base, + // timeout_callback, state); + + state->line= NULL; + state->linemax= 0; + state->linelen= 0; + state->lineoffset= 0; + + for (i= 0; itabsiz; i++) + { + if (hg_base->table[i] == NULL) + break; + } + if (i >= hg_base->tabsiz) + { + newsiz= 2*hg_base->tabsiz; + hg_base->table= xrealloc(hg_base->table, + newsiz*sizeof(*hg_base->table)); + for (i= hg_base->tabsiz; itable[i]= NULL; + i= hg_base->tabsiz; + hg_base->tabsiz= newsiz; + } + state->index= i; + hg_base->table[i]= state; + hg_base->done= done; + + return state; + +err: + if (validated_response_in) free(validated_response_in); + if (validated_response_out) free(validated_response_out); + if (validated_output_file) free(validated_output_file); + if (validated_post_header) free(validated_post_header); + if (validated_post_file) free(validated_post_file); + if (validated_post_footer) free(validated_post_footer); + return NULL; +} + +static void report(struct hgstate *state) +{ + int done, do_output; + FILE *fh; + char namebuf[NI_MAXHOST]; + char line[160]; + + //event_del(&state->timer); + + state->subid++; + + do_output= 1; + if (state->do_all && state->do_combine && state->subidsubmax) + { + do_output= 0; + } + + fh= NULL; + if (do_output) + { + if (state->output_file) + { + fh= fopen(state->output_file, "a"); + if (!fh) + crondlog(DIE9 "httpget: unable to append to '%s'", + state->output_file); + } + else + fh= stdout; + + fprintf(fh, "RESULT { "); + if (state->atlas) + { + fprintf(fh, DBQ(id) ":" DBQ(%s) ", " + "%s, " + DBQ(lts) ":%d, " + DBQ(time) ":%llu, ", + state->atlas, atlas_get_version_json_str(), + get_timesync(), + (unsigned long long)state->gstart); + if (state->bundle) + { + fprintf(fh, DBQ(bundle) ":%s, ", + state->bundle); + } + if (!state->tu_env.host_is_literal) + { + fprintf(fh, DBQ(ttr) ":%f, ", + state->tu_env.ttr); + } + + } + fprintf(fh, DBQ(result) ":[ "); + } + + if (state->do_all && !state->dnserr) + { + if (state->do_combine) + { + snprintf(line, sizeof(line), DBQ(time) ":%llu, ", + (unsigned long long)state->start.tv_sec); + } + else + { + snprintf(line, sizeof(line), DBQ(subid) ":%d, " + DBQ(submax) ":%d, ", + state->subid, state->submax); + } + add_str(state, line); + } + + if (!state->dnserr) + { + snprintf(line, sizeof(line), + DBQ(method) ":" DBQ(%s) ", " DBQ(af) ": %d", + state->do_get ? "GET" : state->do_head ? "HEAD" : + "POST", + state->sin6.sin6_family == AF_INET6 ? 6 : 4); + add_str(state, line); + + if (state->read_truncated) + add_str(state, ", " DBQ(read-truncated) ": true"); + + if (state->socklen != 0) + { + getnameinfo((struct sockaddr *)&state->sin6, + state->socklen, namebuf, sizeof(namebuf), + NULL, 0, NI_NUMERICHOST); + + snprintf(line, sizeof(line), + ", " DBQ(dst_addr) ":" DBQ(%s), namebuf); + add_str(state, line); + } + + /* End of readtiming */ + if (state->etim >= 2) + { + add_str2(state, " ]"); + add_str(state, state->result2); + free(state->result2); + state->result2= NULL; + state->resmax2= 0; + state->reslen2= 0; + } + } + + if (!state->connecting && !state->dnserr) + { + namebuf[0]= '\0'; + getnameinfo((struct sockaddr *)&state->loc_sin6, + state->loc_socklen, namebuf, sizeof(namebuf), + NULL, 0, NI_NUMERICHOST); + + snprintf(line, sizeof(line), ", " DBQ(src_addr) ":" DBQ(%s), + namebuf); + add_str(state, line); + } + + done= (state->readstate == READ_DONE); + if (done) + { + snprintf(line, sizeof(line), + ", " DBQ(rt) ":%f" + ", " DBQ(res) ":%d" + ", " DBQ(ver) ":" DBQ(%d.%d) + ", " DBQ(hsize) ":%d" + ", " DBQ(bsize) ":%d", + state->resptime, + state->http_result, + state->res_major, state->res_minor, + state->headers_size, + state->content_offset); + add_str(state, line); + if (state->etim >= 1) + { + snprintf(line, sizeof(line), + ", " DBQ(ttr) ":%f" + ", " DBQ(ttc) ":%f" + ", " DBQ(ttfb) ":%f", + state->ttr, + state->ttc, + state->ttfb); + add_str(state, line); + } + } + + if (!state->dnserr) + { + add_str(state, " }"); + } + if (!do_output) + add_str(state, ", "); + else + add_str(state, " ]"); + + if (do_output) + { + fprintf(fh, "%s }\n", state->result); + free(state->result); + state->result= NULL; + state->resmax= 0; + state->reslen= 0; + + if (state->output_file) + fclose(fh); + } + + free(state->post_buf); + state->post_buf= NULL; + + if (state->do_all && state->subid < state->submax) + { + tu_restart_connect(&state->tu_env); + return; + } + if (state->linemax) + { + state->linemax= 0; + free(state->line); + state->line= NULL; + } + + state->bev= NULL; + + if (!state->response_in) + tu_cleanup(&state->tu_env); + + if (state->resp_file) + { + fclose(state->resp_file); + state->resp_file= NULL; + } + + state->busy= 0; + if (state->base->done) + state->base->done(state, 0); +} + +static int get_input(struct hgstate *state) +{ + int n; + double t; + struct timespec endtime; + char line[80]; + + /* Assume that we always end up with a full buffer anyway */ + if (state->linemax == 0) + { + if (state->line) + crondlog(DIE9 "line is not empty"); + + state->linemax= MAX_LINE_LEN; + state->line= xmalloc(state->linemax); + } + + if (state->lineoffset) + { + if (state->linelen > state->lineoffset) + { + memmove(state->line, &state->line[state->lineoffset], + state->linelen-state->lineoffset); + state->linelen -= state->lineoffset; + } + else + { + state->linelen= 0; + } + state->lineoffset= 0; + } + if (state->linelen >= state->linemax) + { + return -1; /* We cannot get more data */ + } + + if (state->etim >= 2 && state->report_roffset) + { + gettime_mono(&endtime); + t= (endtime.tv_sec-state->start.tv_sec)*1e3 + + (endtime.tv_nsec-state->start.tv_nsec)/1e6; + if (state->roffset != 0) + add_str2(state, ","); + snprintf(line, sizeof(line), + " { " DBQ(o) ": %d, " + DBQ(t) ": %f }", state->roffset, t); + add_str2(state, line); + state->report_roffset= 0; + } + + if (state->response_in) + { + size_t tmplen; + + tmplen= state->linemax-state->linelen; + read_response_file(state->resp_file, RESP_PACKET, + &tmplen, &state->line[state->linelen]); + n= tmplen; + } + else + { + n= bufferevent_read(state->bev, + &state->line[state->linelen], + state->linemax-state->linelen); + } + if (n < 0) + return -1; + if (state->response_out) + { + write_response(state->resp_file, RESP_PACKET, + n, &state->line[state->linelen]); + } + state->linelen += n; + state->roffset += n; + return 0; +} + +static void skip_spaces(const char *cp, char **ncp) +{ + const unsigned char *ucp; + + ucp= (const unsigned char *)cp; + while (ucp[0] != '\0' && isspace(ucp[0])) + ucp++; + *ncp= (char *)ucp; +} + +static void add_str(struct hgstate *state, const char *str) +{ + size_t len; + + len= strlen(str); + if (state->reslen + len+1 > state->resmax) + { + state->resmax= state->reslen + len+1 + 80; + state->result= xrealloc(state->result, state->resmax); + } + memcpy(state->result+state->reslen, str, len+1); + state->reslen += len; + //printf("add_str: result = '%s'\n", state->result); +} + +static void add_str_quoted(struct hgstate *state, char *str) +{ + char c; + char *p; + char buf[20]; + + for (p= str; *p; p++) + { + c= *p; + if (c == '"' || c == '\\') + snprintf(buf, sizeof(buf), "\\%c", c); + else if (isprint_asciionly((unsigned char)c)) + { + buf[0]= c; + buf[1]= '\0'; + } + else + { + snprintf(buf, sizeof(buf), "\\u%04x", + (unsigned char)c); + } + add_str(state, buf); + } +} + +static void add_str2(struct hgstate *state, const char *str) +{ + size_t len; + + len= strlen(str); + if (state->reslen2 + len+1 > state->resmax2) + { + state->resmax2= 2*state->reslen2 + len+1 + 80; + state->result2= xrealloc(state->result2, state->resmax2); + } + memcpy(state->result2+state->reslen2, str, len+1); + state->reslen2 += len; +} + +static void err_status(struct hgstate *state, const char *reason) +{ + char line[80]; + + snprintf(line, sizeof(line), + DBQ(err) ":" DBQ(bad status line: %s) ", ", + reason); + add_str(state, line); + report(state); +} + +static void err_header(struct hgstate *state, const char *reason) +{ + char line[80]; + + if (state->max_headers != 0) + add_str(state, " ], "); + snprintf(line, sizeof(line), + DBQ(err) ":" DBQ(bad header line: %s) ", ", reason); + add_str(state, line); + report(state); +} + +static void err_chunked(struct hgstate *state, const char *reason) +{ + char line[80]; + snprintf(line, sizeof(line), DBQ(err) ":" DBQ(bad chunk line: %s) ", ", + reason); + add_str(state, line); + report(state); +} + +static void readcb(struct bufferevent *bev UNUSED_PARAM, void *ptr) +{ + int r, major, minor, need_line, no_body; + size_t len; + char *cp, *ncp, *check, *line; + const char *prefix, *kw; + struct hgstate *state; + struct timespec endtime; + + state= ENV2STATE(ptr); + + state->report_roffset= 1; + for (;;) + { + if (state->read_limit > 0 && + state->roffset >= state->read_limit) + { + state->read_truncated= 1; + switch(state->readstate) + { + case READ_FIRST: + case READ_STATUS: + err_status(state, "read truncated"); + return; + case READ_HEADER: + err_header(state, "read truncated"); + return; + default: + state->readstate= READ_DONE; + break; + } + } + switch(state->readstate) + { + case READ_FIRST: + gettime_mono(&endtime); + state->ttfb= (endtime.tv_sec- + state->start.tv_sec)*1e3 + + (endtime.tv_nsec-state->start.tv_nsec)/1e6; + state->readstate= READ_STATUS; + state->roffset= 0; + if (state->etim >= 2) + add_str2(state, ", " DBQ(readtiming) ": ["); + continue; + case READ_STATUS: + case READ_HEADER: + case READ_CHUNKED: + case READ_CHUNK_END: + case READ_CHUNKED_TRAILER: + need_line= 1; + break; + default: + need_line= 0; + break; + } + + if (need_line) + { + /* Wait for a complete line */ + if (state->linemax == 0 || + memchr(&state->line[state->lineoffset], '\n', + state->linelen-state->lineoffset) == NULL) + { + r= get_input(state); + if (r == -1) + { + printf( + "readcb: get_input failed, should do something\n"); + return; + } + + /* Did we get what we want? */ + if (memchr(&state->line[state->lineoffset], + '\n', state->linelen-state->lineoffset) + == NULL) + { + /* No */ + if (state->linelen-state->lineoffset >= + MAX_LINE_LEN) + { + add_str(state, DBQ(err) ":" + DBQ(line too long) + ", "); + report(state); + } + return; + } + } + } + + switch(state->readstate) + { + case READ_STATUS: + line= &state->line[state->lineoffset]; + cp= strchr(line, '\n'); + if (cp == NULL) + { + /* Contains nul */ + err_status(state, "contains nul"); + return; + } + + state->lineoffset += (cp-line+1); + + cp[0]= '\0'; + if (cp > line && cp[-1] == '\r') + cp[-1]= '\0'; + + /* Check http version */ + prefix= "http/"; + len= strlen(prefix); + if (strncasecmp(prefix, line, len) != 0) + { + err_status(state, "bad prefix"); + return; + } + cp= line+len; + + major= strtoul(cp, &check, 10); + if (check == cp || check[0] != '.') + { + err_status(state, "bad major"); + return; + } + + cp= check+1; + minor= strtoul(cp, &check, 10); + if (check == cp || check[0] == '\0' || + !isspace(*(unsigned char *)check)) + { + err_status(state, "bad minor"); + return; + } + + skip_spaces(check, &cp); + + if (!isdigit(*(unsigned char *)cp)) + { + err_status(state, "bad status code"); + return; + } + state->http_result= strtoul(cp, NULL, 10); + state->res_major= major; + state->res_minor= minor; + + state->readstate= READ_HEADER; + state->content_length= -1; + + if (state->max_headers) + { + add_str(state, DBQ(header) ": ["); + } + + continue; + + case READ_HEADER: + line= &state->line[state->lineoffset]; + cp= strchr(line, '\n'); + if (cp == NULL) + { + err_header(state, "contains nul"); + return; + } + + len= (cp-line+1); + state->lineoffset += len; + + cp[0]= '\0'; + if (cp > line && cp[-1] == '\r') + cp[-1]= '\0'; + + if (line[0] == '\0') + { + if (state->tot_headers <= state->max_headers && + state->max_headers != 0) + { + if (state->tot_headers != 0) + add_str(state, ","); + add_str(state, " \"\""); + } + if (state->max_headers) + add_str(state, " ], "); + state->readstate= READ_BODY; + continue; + } + + state->headers_size += len; + + len= strlen(line); + if (state->tot_headers+len+1 <= state->max_headers) + { + if (state->tot_headers != 0) + add_str(state, ","); + add_str(state, " \""); + add_str_quoted(state, line); + add_str(state, "\""); + state->tot_headers += len; + } else if (state->tot_headers <= state->max_headers && + state->max_headers != 0) + { + /* Fill up remaining space and report + * truncation */ + if (state->tot_headers != 0) + add_str(state, ","); + add_str(state, " \""); + if (state->tot_headers < state->max_headers) + { + line[state->max_headers- + state->tot_headers]= '\0'; + add_str_quoted(state, line); + } + add_str(state, "[...]\""); + + state->tot_headers += len+1; + } + + cp= line; + skip_spaces(cp, &ncp); + if (ncp != line) + continue; /* Continuation line */ + + cp= ncp; + while (ncp[0] != '\0' && ncp[0] != ':' && + !isspace((unsigned char)ncp[0])) + { + ncp++; + } + + kw= "Transfer-Encoding"; + len= strlen(kw); + if (strncasecmp(cp, kw, len) == 0) + { + /* Skip optional white space */ + cp= ncp; + skip_spaces(cp, &cp); + + if (cp[0] != ':') + { + err_header(state, + "malformed transfer-encoding"); + return; + } + cp++; + + /* Skip more white space */ + skip_spaces(cp, &cp); + + /* Should have the value by now */ + kw= "chunked"; + len= strlen(kw); + if (strncasecmp(cp, kw, len) != 0) + continue; + /* make sure we have end of line or white + * space */ + if (cp[len] != '\0' && + isspace((unsigned char)cp[len])) + { + continue; + } + state->chunked= 1; + continue; + } + + kw= "Content-length"; + len= strlen(kw); + if (strncasecmp(cp, kw, len) != 0) + continue; + + /* Skip optional white space */ + cp= ncp; + skip_spaces(cp, &cp); + + if (cp[0] != ':') + { + err_header(state, + "malformed content-length"); + return; + } + cp++; + + /* Skip more white space */ + skip_spaces(cp, &cp); + + /* Should have the value by now */ + state->content_length= strtoul(cp, &check, 10); + if (check == cp) + { + err_header(state, + "malformed content-length"); + return; + } + + /* And after that we should have just white space */ + cp= check; + skip_spaces(cp, &cp); + + if (cp[0] != '\0') + { + err_header(state, + "malformed content-length"); + return; + } + continue; + + case READ_BODY: + no_body= (state->do_head || state->http_result == 204 || + state->http_result == 304 || + state->http_result/100 == 1); + + if (no_body) + { + /* This reply will not have a body even if + * there is a content-length line. + */ + state->readstate= READ_DONE; + } + else if (state->chunked) + { + state->readstate= READ_CHUNKED; + state->content_offset= 0; + state->tot_chunked= 0; + } + else + { + state->readstate= READ_SIMPLE; + state->content_offset= 0; + } + + continue; + + case READ_CHUNKED: + line= &state->line[state->lineoffset]; + cp= strchr(line, '\n'); + if (cp == NULL) + { + err_chunked(state, "contains nul"); + return; + } + + len= (cp-line+1); + state->lineoffset += len; + + cp[0]= '\0'; + if (cp > line && cp[-1] == '\r') + cp[-1]= '\0'; + + len= strtoul(line, &check, 16); + if (check == line || (check[0] != '\0' && + !isspace(*(unsigned char *)check))) + { + err_chunked(state, "not a number"); + return; + } + + if (!len) + { + state->readstate= READ_CHUNKED_TRAILER; + continue; + } + + state->tot_chunked += len; + state->readstate= READ_CHUNK_BODY; + continue; + + case READ_CHUNK_BODY: + if (state->content_offset >= state->tot_chunked) + { + state->readstate= READ_CHUNK_END; + continue; + } + + /* Do we need more input? */ + if (state->linemax == 0 || + state->lineoffset >= state->linelen) + { + r= get_input(state); + if (r == -1) + { + printf( + "readcb: get_input failed, should do something\n"); + return; + } + + /* Did we get what we want? */ + if (state->lineoffset >= state->linelen) + { + /* No */ + return; + } + } + + len= state->linelen-state->lineoffset; + if (state->content_offset+len > state->tot_chunked) + len= state->tot_chunked-state->content_offset; + + if (state->content_offset+len <= state->max_body) + { +#if 0 + printf( + "readcb: should report %ld bytes worth of content\n", + len); +#endif + } + else if (state->content_offset <= state->max_body && + state->max_body != 0) + { + /* Fill up remaining space and report + * truncation */ + if (state->content_offset < state->max_body) + { + len= state->max_body - + state->content_offset; +#if 0 + printf( + "readcb: should report %ld bytes worth of content\n", + len); +#endif + + } + fprintf(stderr, + "readcb: should add truncation indicator\n"); + } + + state->content_offset += len; + state->lineoffset += len; + + continue; + + case READ_CHUNK_END: + line= &state->line[state->lineoffset]; + cp= strchr(line, '\n'); + if (cp == NULL) + { + err_chunked(state, "contains nul"); + return; + } + + len= (cp-line+1); + state->lineoffset += len; + + cp[0]= '\0'; + if (cp > line && cp[-1] == '\r') + cp[-1]= '\0'; + + if (strlen(line) != 0) + { + err_chunked(state, + "garbage at the end of chunk"); + return; + } + + state->readstate= READ_CHUNKED; + continue; + + case READ_CHUNKED_TRAILER: + line= &state->line[state->lineoffset]; + cp= strchr(line, '\n'); + if (cp == NULL) + { + err_chunked(state, "contains nul"); + return; + } + + len= (cp-line+1); + state->lineoffset += len; + + cp[0]= '\0'; + if (cp > line && cp[-1] == '\r') + cp[-1]= '\0'; + + if (line[0] == '\0') + { + state->readstate= READ_DONE; + continue; + } + continue; + + case READ_SIMPLE: + if (state->content_length >= 0 && + state->content_offset >= state->content_length) + { + state->readstate= READ_DONE; + continue; + } + + /* Do we need more input? */ + if (state->linemax == 0 || + state->lineoffset >= state->linelen) + { + r= get_input(state); + if (r == -1) + { + printf( + "readcb: get_input failed, should do something\n"); + return; + } + + /* Did we get what we want? */ + if (state->lineoffset >= state->linelen) + { + /* No */ + return; + } + } + + len= state->linelen-state->lineoffset; + if (state->content_offset+len <= state->max_body) + { +#if 0 + printf( + "readcb: should report %ld bytes worth of content\n", + len); +#endif + } + else if (state->content_offset <= state->max_body && + state->max_body != 0) + { + /* Fill up remaining space and report + * truncation */ + if (state->content_offset < state->max_body) + { + len= state->max_body - + state->content_offset; +#if 0 + printf( + "readcb: should report %ld bytes worth of content\n", + len); +#endif + + } + fprintf(stderr, + "readcb: should add truncation indicator\n"); + } + + state->content_offset += len; + state->lineoffset += len; + + continue; + + case READ_DONE: + if (state->bev || state->response_in) + { + state->bev= NULL; + gettime_mono(&endtime); + state->resptime= + (endtime.tv_sec- + state->start.tv_sec)*1e3 + + (endtime.tv_nsec- + state->start.tv_nsec)/1e6; + report(state); + } + return; + default: + printf("readcb: readstate = %d\n", state->readstate); + return; + } + } +} + +static int post_file(struct hgstate *state, const char *filename) +{ + int r; + FILE *fh; + + if (!state->post_fh) + { + fh= fopen(filename, "r"); + if (fh == NULL) + { + printf("post_file: unable to open '%s': %s\n", + filename, strerror(errno)); + return -1; + } + state->post_fh= fh; + } + if (!state->post_buf) + state->post_buf= xmalloc(POST_BUF_SIZE); + r= fread(state->post_buf, 1, POST_BUF_SIZE, state->post_fh); + if (r == -1) + { + printf("post_file: error reading from '%s': %s\n", + filename, strerror(errno)); + return -1; + } + if (r == 0) + { + fclose(state->post_fh); + state->post_fh= NULL; + return 1; + } + r= bufferevent_write(state->bev, state->post_buf, r); + if (r == -1) + { + printf("post_file: bufferevent_write failed\n"); + } + return r; +} + +static void writecb(struct bufferevent *bev, void *ptr) +{ + int r; + struct hgstate *state; + struct evbuffer *output; + off_t cLength; + struct stat sb; + struct timespec endtime; + + state= ENV2STATE(ptr); + + if (state->in_writecb) + return; /* Recursive call */ + state->in_writecb= 1; + + for(;;) + { + switch(state->writestate) + { + case WRITE_FIRST: + gettime_mono(&endtime); + state->ttc= (endtime.tv_sec- + state->start.tv_sec)*1e3 + + (endtime.tv_nsec-state->start.tv_nsec)/1e6; + state->writestate= WRITE_HEADER; + continue; + case WRITE_HEADER: + if (state->response_in) + { + state->writestate = WRITE_DONE; + continue; + } + output= bufferevent_get_output(bev); + evbuffer_add_printf(output, "%s %s HTTP/1.%c\r\n", + state->do_get ? "GET" : + state->do_head ? "HEAD" : "POST", state->path, + state->do_http10 ? '0' : '1'); + evbuffer_add_printf(output, "Host: %s\r\n", + state->hostport); + evbuffer_add_printf(output, "Connection: close\r\n"); + evbuffer_add_printf(output, "User-Agent: %s\r\n", + state->user_agent); + if (state->do_post) + { + evbuffer_add_printf(output, + "Content-Type: application/x-www-form-urlencoded\r\n"); + } + + cLength= 0; + if (state->do_post) + { + if (state->post_header) + { + if (stat(state->post_header, &sb) == 0) + cLength += sb.st_size; + } + if (state->post_file) + { + if (stat(state->post_file, &sb) == 0) + cLength += sb.st_size; + } + if (state->post_footer) + { + if (stat(state->post_footer, &sb) == 0) + cLength += sb.st_size; + } + evbuffer_add_printf(output, + "Content-Length: %lu\r\n", + (unsigned long)cLength); + } + + evbuffer_add_printf(output, "\r\n"); + if (state->do_post) + state->writestate = WRITE_POST_HEADER; + else + state->writestate = WRITE_DONE; + goto out; + case WRITE_POST_HEADER: + if (!state->post_header) + { + state->writestate= WRITE_POST_FILE; + continue; + } + r= post_file(state, state->post_header); + if (r != 1) + goto out; + + /* Done */ + state->writestate= WRITE_POST_FILE; + continue; + + case WRITE_POST_FILE: + if (!state->post_file) + { + state->writestate= WRITE_POST_FOOTER; + continue; + } + r= post_file(state, state->post_file); + if (r != 1) + goto out; + + /* Done */ + state->writestate= WRITE_POST_FOOTER; + continue; + case WRITE_POST_FOOTER: + if (!state->post_footer) + { + state->writestate= WRITE_DONE; + continue; + } + r= post_file(state, state->post_footer); + if (r != 1) + goto out; + + /* Done */ + state->writestate= WRITE_DONE; + continue; + case WRITE_DONE: + goto out; + default: + printf("writecb: unknown write state: %d\n", + state->writestate); + goto out; + } + } + +out: + state->in_writecb= 0; +} + +static void err_reading(struct hgstate *state) +{ + struct timespec endtime; + + write_response(state->resp_file, RESP_READ_ERROR, 0, NULL); + + switch(state->readstate) + { + case READ_STATUS: + add_str(state, ", " DBQ(err) ":" DBQ(error reading status)); + report(state); + break; + case READ_HEADER: + if (state->max_headers) + add_str(state, " ], "); + add_str(state, DBQ(err) ":" DBQ(error reading headers) ", "); + report(state); + break; + case READ_SIMPLE: +#if 0 + if (state->max_body) + add_str(state, " ]"); +#endif + if (state->content_length == -1) + { + /* EOF is normal */ + state->readstate= READ_DONE; + } + else + { + add_str(state, DBQ(err) ":" DBQ(error reading body) + ", "); + } + gettime_mono(&endtime); + state->resptime= (endtime.tv_sec-state->start.tv_sec)*1e3 + + (endtime.tv_nsec-state->start.tv_nsec)/1e6; + report(state); + break; + default: + printf("in err_reading, unhandled case %d\n", state->readstate); + } +} + +static void dnscount(struct tu_env *env, int count) +{ + struct hgstate *state; + + state= ENV2STATE(env); + state->subid= 0; + state->submax= count; +} + +static void beforeconnect(struct tu_env *env, + struct sockaddr *addr, socklen_t addrlen) +{ + struct hgstate *state; + struct timespec endtime; + + state= ENV2STATE(env); + + state->socklen= addrlen; + memcpy(&state->sin6, addr, state->socklen); + if (state->response_out) + { + write_response(state->resp_file, RESP_DSTADDR, + addrlen, addr); + } + + state->connecting= 1; + state->in_writecb= 0; + state->readstate= READ_FIRST; + state->writestate= WRITE_FIRST; + + state->linelen= 0; + state->lineoffset= 0; + state->headers_size= 0; + state->tot_headers= 0; + state->roffset= 0; + state->read_truncated= 0; + + /* Clear result */ + if (!state->do_all || !state->do_combine) + state->reslen= 0; + + add_str(state, "{ "); + + if (state->first_connect) + { + gettime_mono(&endtime); + state->ttr= (endtime.tv_sec-state->start.tv_sec)*1e3 + + (endtime.tv_nsec-state->start.tv_nsec)/1e6; + state->first_connect= 0; + } + gettime_mono(&state->start); +} + + +static void reporterr(struct tu_env *env, enum tu_err cause, + const char *str) +{ + struct hgstate *state; + char namebuf[NI_MAXHOST]; + char line[160]; + + state= ENV2STATE(env); + + if (env != &state->tu_env) abort(); + + switch(cause) + { + case TU_DNS_ERR: + snprintf(line, sizeof(line), + "{ " DBQ(dnserr) ":" DBQ(%s) " }", str); + add_str(state, line); + state->dnserr= 1; + report(state); + break; + + case TU_READ_ERR: + err_reading(state); + break; + + case TU_SOCKET_ERR: + snprintf(line, sizeof(line), + "{ " DBQ(sockerr) ":" DBQ(%s) ", ", str); + add_str(state, line); + report(state); + break; + + case TU_CONNECT_ERR: + fprintf(stderr, "reporterr: str %s\n", str); + snprintf(line, sizeof(line), + DBQ(err) ":" DBQ(connect: %s) ", ", str); + add_str(state, line); + + if (state->do_all) + report(state); + else + tu_restart_connect(&state->tu_env); + break; + + case TU_OUT_OF_ADDRS: + report(state); + break; + + case TU_BAD_ADDR: + add_str(state, "{ " DBQ(error) ": " + DBQ(address not allowed)); + + getnameinfo(env->dns_curr->ai_addr, + env->dns_curr->ai_addrlen, namebuf, sizeof(namebuf), + NULL, 0, NI_NUMERICHOST); + + snprintf(line, sizeof(line), + ", " DBQ(dst_addr) ":" DBQ(%s) " }", namebuf); + add_str(state, line); + + state->dnserr= 1; + report(state); + break; + + default: + crondlog(DIE9 "reporterr: bad cause %d", cause); + } +} + +static void connected(struct tu_env *env, struct bufferevent *bev) +{ + struct hgstate *state; + + state= ENV2STATE(env); + + if (env != &state->tu_env) abort(); + + state->connecting= 0; + state->bev= bev; + + state->loc_socklen= sizeof(state->loc_sin6); + if (state->response_in) + { + size_t len; + + len= state->loc_socklen; + read_response_file(state->resp_file, RESP_SOCKNAME, + &len, &state->loc_sin6); + state->loc_socklen= len; + } + else + { + getsockname(bufferevent_getfd(bev), + (struct sockaddr *)&state->loc_sin6, + &state->loc_socklen); + if (state->response_out) + { + write_response(state->resp_file, RESP_SOCKNAME, + state->loc_socklen, &state->loc_sin6); + } + } +} + +static void httpget_start(void *state) +{ + int type; + size_t len; + struct hgstate *hgstate; + struct evutil_addrinfo hints; + struct timeval interval; + struct timespec endtime; + + hgstate= state; + + if (hgstate->busy) + { + printf("httget_start: busy\n"); + return; + } + hgstate->busy= 1; + + hgstate->dnserr= 0; + hgstate->connecting= 0; + hgstate->readstate= READ_STATUS; + hgstate->writestate= WRITE_HEADER; + hgstate->gstart= atlas_time(); + gettime_mono(&hgstate->start); + hgstate->first_connect= 1; + + if (hgstate->response_out) + { + hgstate->resp_file= fopen(hgstate->response_out, "w"); + if (!hgstate->resp_file) + { + crondlog(DIE9 "unable to write to '%s'", + hgstate->response_out); + } + } + + memset(&hints, '\0', sizeof(hints)); + hints.ai_socktype= SOCK_STREAM; + if (hgstate->only_v4) + hints.ai_family= AF_INET; + else if (hgstate->only_v6) + hints.ai_family= AF_INET6; + interval.tv_sec= hgstate->timeout / 1000; + interval.tv_usec= (hgstate->timeout % 1000) * 1000; + + if (hgstate->response_in) + { + hgstate->resp_file= fopen(hgstate->response_in, "r"); + if (!hgstate->resp_file) + { + crondlog(DIE9 "unable to read from '%s'", + hgstate->response_in); + } + /* Emulate ttr */ + tu_fake_ttr(&hgstate->tu_env, hgstate->host); + + len= sizeof(hgstate->sin6); + read_response_file(hgstate->resp_file, RESP_DSTADDR, + &len, &hgstate->sin6); + hgstate->socklen= len; + + add_str(hgstate, "{ "); + if (hgstate->first_connect) + { + gettime_mono(&endtime); + hgstate->ttr= (endtime.tv_sec-hgstate->start.tv_sec)* + 1e3 + (endtime.tv_nsec-hgstate->start.tv_nsec)/ + 1e6; + hgstate->first_connect= 0; + } + + /* Start time */ + gettime_mono(&hgstate->start); + + gettime_mono(&endtime); + hgstate->ttc= (endtime.tv_sec- hgstate->start.tv_sec)*1e3 + + (endtime.tv_nsec-hgstate->start.tv_nsec)/1e6; + + hgstate->readstate= READ_FIRST; + + connected(&hgstate->tu_env, NULL); + + writecb(NULL, &hgstate->tu_env); + while(hgstate->resp_file != NULL) + { + peek_response_file(hgstate->resp_file, &type); + if (type == RESP_READ_ERROR) + { + len= 0; + read_response_file(hgstate->resp_file, + RESP_READ_ERROR, &len, NULL); + err_reading(hgstate); + break; + } + readcb(NULL, &hgstate->tu_env); + } + report(hgstate); + } + else + { + tu_connect_to_name(&hgstate->tu_env, hgstate->host, + hgstate->do_tls, 0, hgstate->port, + &interval, &hints, hgstate->infname, + hgstate->sni, NULL, + timeout_callback, + reporterr, dnscount, beforeconnect, + connected, readcb, writecb); + } +} + +static int httpget_delete(void *state) +{ + int ind; + struct hgstate *hgstate; + struct hgbase *base; + + hgstate= state; + + printf("httpget_delete: state %p, index %d, busy %d\n", + state, hgstate->index, hgstate->busy); + + if (hgstate->busy) + return 0; + + if (hgstate->line) + crondlog(DIE9 "line is not empty"); + + base= hgstate->base; + ind= hgstate->index; + + if (base->table[ind] != hgstate) + crondlog(DIE9 "strange, state not in table"); + base->table[ind]= NULL; + + //event_del(&hgstate->timer); + + free(hgstate->atlas); + hgstate->atlas= NULL; + free(hgstate->bundle); + hgstate->bundle= NULL; + free(hgstate->output_file); + hgstate->output_file= NULL; + free(hgstate->infname); + hgstate->infname= NULL; + free(hgstate->host); + hgstate->host= NULL; + free(hgstate->hostport); + hgstate->hostport= NULL; + free(hgstate->port); + hgstate->port= NULL; + free(hgstate->path); + hgstate->path= NULL; + free(hgstate->sni); + hgstate->sni= NULL; + free(hgstate->user_agent); + hgstate->user_agent= NULL; + free(hgstate->post_header); + hgstate->post_header= NULL; + free(hgstate->post_file); + hgstate->post_file= NULL; + free(hgstate->post_footer); + hgstate->post_footer= NULL; + + free(hgstate); + + return 1; +} + +struct testops httpget_ops = { httpget_init, httpget_start, + httpget_delete }; + diff --git a/probe-busybox/eperd/json-macros.h b/probe-busybox/eperd/json-macros.h new file mode 100644 index 00000000..36d76736 --- /dev/null +++ b/probe-busybox/eperd/json-macros.h @@ -0,0 +1,14 @@ +#define DQ(str) "\"" #str "\"" +#define DQC(str) "\"" #str "\" : " +#define LBUF lbuf +#define ADDRESULT buf_add(LBUF, line, strlen(line)); +#define AS(val) buf_add(LBUF, val, strlen (val)); +#define JS(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : \"%s\" , ", val); ADDRESULT +#define JS_NC(key, val) snprintf(line, DEFAULT_LINE_LENGTH,"\"" #key"\" : \"%s\" ", val); ADDRESULT +#define JSDOT(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : \"%s.\" , ", val); ADDRESULT +#define JS1(key, fmt, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : "#fmt" , ", val); ADDRESULT +#define JD(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : %d , ", val); ADDRESULT +#define JD_NC(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : %d ", val); ADDRESULT +#define JU(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : %u , ", val); ADDRESULT +#define JU_NC(key, val) snprintf(line, DEFAULT_LINE_LENGTH, "\"" #key"\" : %u", val); ADDRESULT +#define JC snprintf(line, DEFAULT_LINE_LENGTH, ","); ADDRESULT diff --git a/probe-busybox/eperd/ntp.c b/probe-busybox/eperd/ntp.c new file mode 100644 index 00000000..b5084dfe --- /dev/null +++ b/probe-busybox/eperd/ntp.c @@ -0,0 +1,1527 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * ntp.c + */ + +#include "libbb.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eperd.h" +#include "atlas_path.h" + +#define SAFE_PREFIX_REL ATLAS_DATA_NEW_REL + +#define DBQ(str) "\"" #str "\"" + +#ifndef STANDALONE_BUSYBOX +#define uh_sport source +#define uh_dport dest +#define uh_ulen len +#define uh_sum check +#endif + +#define NTP_PORT 123 + +#define NTP_OPT_STRING ("!46c:i:s:w:A:B:O:R:W:") + +#define OPT_4 (1 << 0) +#define OPT_6 (1 << 1) + +#define IPHDR 20 + +#define SRC_BASE_PORT (20480) +#define MAX_DATA_SIZE (4096) + +#define DBQ(str) "\"" #str "\"" + +#define RESP_PACKET 1 +#define RESP_SOCKNAME 2 +#define RESP_DSTADDR 3 +#define RESP_TIMEOFDAY 4 +#define RESP_ADDRINFO 5 +#define RESP_ADDRINFO_SA 6 + +struct ntp_ts +{ + uint32_t ntp_seconds; + uint32_t ntp_fraction; +}; + +struct ntpbase +{ + struct event_base *event_base; + + int my_pid; + + struct ntpstate **table; + int tabsiz; + + /* For standalone ntp. Called when a ntp instance is + * done. Just one pointer for all instances. It is up to the caller + * to keep it consistent. + */ + void (*done)(void *state, int error); + + u_char packet[MAX_DATA_SIZE]; +}; + +struct ntpstate +{ + /* Parameters */ + char *atlas; + char *bundle; + char *hostname; + char *destportstr; + char *out_filename; + char *interface; + char do_v6; + char count; + uint16_t size; + unsigned timeout; + char *response_in; /* Fuzzing */ + char *response_out; + + /* Base and index in table */ + struct ntpbase *base; + int index; + + struct sockaddr_in6 sin6; + socklen_t socklen; + struct sockaddr_in6 loc_sin6; + socklen_t loc_socklen; + + int sent; + uint16_t seq; + int socket; /* Socket for sending and receiving */ + struct event event_socket; /* Event for this socket */ + unsigned first:1; /* Waiting for first response */ + unsigned done:1; /* We got something from the target + * host or a destination unreachable. + */ + unsigned not_done:1; /* Not got something else */ + unsigned busy:1; /* Busy, do not start another one */ + unsigned gotresp:1; /* Got a response to the last packet + * we sent. For dup detection. + */ + unsigned dnsip:1; /* Busy with dns name resolution */ + unsigned report_dst:1; /* Report dst anyhow */ + struct evutil_addrinfo *dns_res; + struct evutil_addrinfo *dns_curr; + + time_t starttime; + struct timeval xmit_time; + + struct timespec start_time; /* At the moment only for + * DNS resolution + */ + double ttr; /* Time to resolve a name, in ms */ + + + uint8_t ntp_flags; + uint8_t ntp_stratum; + int8_t ntp_poll; + int8_t ntp_precision; + uint32_t ntp_root_delay; + uint32_t ntp_root_dispersion; + uint32_t ntp_reference_id; + struct ntp_ts ntp_reference_ts; + + struct event timer; + + unsigned long min; + unsigned long max; + unsigned long sum; + int sentpkts; + int rcvdpkts; + int duppkts; + + char *result; + size_t reslen; + size_t resmax; + char open_result; + + FILE *resp_file_out; /* Fuzzing */ +}; + +static struct ntpbase *ntp_base; + +struct ntphdr +{ + uint8_t ntp_flags; + uint8_t ntp_stratum; + int8_t ntp_poll; + int8_t ntp_precision; + uint32_t ntp_root_delay; + uint32_t ntp_root_dispersion; + uint32_t ntp_reference_id; + struct ntp_ts ntp_reference_ts; + struct ntp_ts ntp_origin_ts; + struct ntp_ts ntp_receive_ts; + struct ntp_ts ntp_transmit_ts; +}; + +struct ntpextension +{ + uint16_t ext_type; + uint16_t ext_length; +}; + +/* RFC 5906 NTP Autokey extensions */ +#define NTP_EXT_REQUEST 0x0000 +#define NTP_EXT_MESSAGE 0x0002 +#define NTP_EXT_ERROR 0x4000 +#define NTP_EXT_RESPONSE 0x8000 +#define NTP_EXT_NOOP 0x0000 +#define NTP_EXT_ASSOC 0x0100 +#define NTP_EXT_CERT 0x0200 +#define NTP_EXT_COOKIE 0x0300 +#define NTP_EXT_AUTOKEY 0x0400 +#define NTP_EXT_LEAPSECS 0x0500 +#define NTP_EXT_SIGN 0x0600 +#define NTP_EXT_IFF_IDENT 0x0700 +#define NTP_EXT_GQ_IDENT 0x0800 +#define NTP_EXT_MV_IDENT 0x0900 + +#define NTP_LI_MASK 0xC0 +#define NTP_LI_SHIFT 6 +#define LI_NO_WARNING 0 +#define LI_61 1 +#define LI_59 2 +#define LI_UNKNOWN 3 +#define NTP_VERSION 4 +#define NTP_VERSION_MASK 0x38 +#define NTP_VERSION_SHIFT 3 +#define NTP_MODE_CLIENT 3 +#define NTP_MODE_MASK 0x7 +#define MODE_RESERVED 0 +#define MODE_SYM_ACT 1 +#define MODE_SYM_PASS 2 +#define MODE_CLIENT 3 +#define MODE_SERVER 4 +#define MODE_BROADCAST 5 +#define MODE_CONTROL 6 +#define MODE_PRIVATE 7 + +#define STRATUM_INVALID 0 +#define STRATUM_UNSYNCHRONIZED 16 + +#define NTP_1970 2208988800UL /* 1970 - 1900 in seconds */ + +#define NTP_4G 4294967296.0 + + +static void ready_callback(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s); +static int create_socket(struct ntpstate *state); + +static void add_str(struct ntpstate *state, const char *str) +{ + size_t len; + + len= strlen(str); + if (state->reslen + len+1 > state->resmax) + { + state->resmax= state->reslen + len+1 + 80; + state->result= xrealloc(state->result, state->resmax); + } + memcpy(state->result+state->reslen, str, len+1); + state->reslen += len; + //printf("add_str: result = '%s'\n", state->result); +} + +static void format_li(char *line, size_t size, uint8_t flags) +{ + const char *str; + + switch((flags & NTP_LI_MASK) >> NTP_LI_SHIFT) + { + case LI_NO_WARNING: str= "no"; break; + case LI_61: str= "61"; break; + case LI_59: str= "59"; break; + case LI_UNKNOWN: str= "unknown"; break; + } + snprintf(line, size, DBQ(li) ": " DBQ(%s), str); +} + +static void format_mode(char *line, size_t size, uint8_t flags) +{ + const char *str; + + switch(flags & NTP_MODE_MASK) + { + case MODE_RESERVED: str= "reserved"; break; + case MODE_SYM_ACT: str= "sym. active"; break; + case MODE_SYM_PASS: str= "sym. passive"; break; + case MODE_CLIENT: str= "client"; break; + case MODE_SERVER: str= "server"; break; + case MODE_BROADCAST: str= "broadcast"; break; + case MODE_CONTROL: str= "control"; break; + case MODE_PRIVATE: str= "private"; break; + } + snprintf(line, size, DBQ(mode) ": " DBQ(%s), str); +} + +static void format_stratum(char *line, size_t size, uint8_t stratum) +{ + const char *str; + + str= NULL; + switch(stratum) + { + case STRATUM_INVALID: str= "invalid"; break; + case STRATUM_UNSYNCHRONIZED: str= "unsynchronized"; break; + } + if (str) + { + snprintf(line, size, DBQ(stratum) ": " DBQ(%s), + str); + } + else if (stratum < STRATUM_UNSYNCHRONIZED) + { + snprintf(line, size, DBQ(stratum) ": %d", + stratum); + } + else + { + snprintf(line, size, DBQ(stratum) ": " DBQ(reserved (%d)), + stratum); + } +} + +static void format_8bit(char *line, size_t size, const char *label, + int8_t value) +{ + if (value >= 0 && value < 32) + { + snprintf(line, size, DBQ(%s) ": %u", label, 1U << value); + } + else + { + snprintf(line, size, DBQ(%s) ": %g", label, pow(2, value)); + } +} + +static void format_short_ts(char *line, size_t size, const char *label, + uint32_t value) +{ + snprintf(line, size, DBQ(%s) ": %g", label, value/65536.0); +} + +static void format_ref_id(char *line, size_t size, uint32_t value, + uint8_t stratum) +{ + int i; + size_t offset; + unsigned char *p; + char line2[40]; + + if (stratum == 0 || stratum == 1) + { + line2[0]= '\0'; + for (i= 0, p= (unsigned char *)&value; + i= 127) + { + snprintf(line2+offset, sizeof(line2)-offset, + "\\\\x%02x", *p); + } + else + { + snprintf(line2+offset, sizeof(line2)-offset, + "%c", *p); + } + + } + snprintf(line, size, DBQ(ref-id) ": " DBQ(%s), + line2); + } + else + { + snprintf(line, size, DBQ(ref-id) ": " DBQ(%08x), + ntohl(value)); + } +} + +static void format_ts(char *line, size_t size, const char *label, + struct ntp_ts *ts) +{ + double d; + + d= ntohl(ts->ntp_seconds) + ntohl(ts->ntp_fraction)/NTP_4G; + snprintf(line, size, DBQ(%s) ": %.9f", label, d); +} + +static void report(struct ntpstate *state) +{ + int r; + FILE *fh; + const char *proto; + struct addrinfo *ai; + char namebuf[NI_MAXHOST]; + char line[80]; + struct addrinfo hints; + + event_del(&state->timer); + + if (state->out_filename) + { + fh= fopen(state->out_filename, "a"); + if (!fh) + crondlog(DIE9 "ntp: unable to append to '%s'", + state->out_filename); + } + else + fh= stdout; + + fprintf(fh, "RESULT { "); + if (state->atlas) + { + fprintf(fh, DBQ(id) ":" DBQ(%s) + ", %s" + ", " DBQ(lts) ":%d" + ", " DBQ(time) ":%llu, ", + state->atlas, atlas_get_version_json_str(), + get_timesync(), + (unsigned long long)state->starttime); + if (state->bundle) + fprintf(fh, DBQ(bundle) ":%s, ", state->bundle); + } + + fprintf(fh, DBQ(dst_name) ":" DBQ(%s), + state->hostname); + + /* Check if hostname is numeric or had to be resolved */ + memset(&hints, '\0', sizeof(hints)); + hints.ai_flags= AI_NUMERICHOST; + r= getaddrinfo(state->hostname, NULL, &hints, &ai); + if (r == 0) + { + /* Getaddrinfo succeded so hostname is an address literal */ + freeaddrinfo(ai); + } + else + { + /* Assume that name resolution was required */ + fprintf(fh, ", " DBQ(ttr) ":%f", state->ttr); + } + + if (!state->dnsip || state->report_dst) + { + getnameinfo((struct sockaddr *)&state->sin6, state->socklen, + namebuf, sizeof(namebuf), NULL, 0, NI_NUMERICHOST); + + fprintf(fh, ", " DBQ(dst_addr) ":" DBQ(%s), namebuf); + + if (state->loc_socklen != 0) + { + namebuf[0]= '\0'; + getnameinfo((struct sockaddr *)&state->loc_sin6, + state->loc_socklen, + namebuf, sizeof(namebuf), NULL, 0, + NI_NUMERICHOST); + + fprintf(fh, ", " DBQ(src_addr) ":" DBQ(%s), namebuf); + } + } + + proto= "UDP"; + fprintf(fh, ", " DBQ(proto) ":" DBQ(%s) ", " DBQ(af) ": %d", + proto, + state->dnsip ? (state->do_v6 ? 6 : 4) : + (state->sin6.sin6_family == AF_INET6 ? 6 : 4)); + + if (!state->first && !state->dnsip) + { + format_li(line, sizeof(line), state->ntp_flags); + fprintf(fh, ", %s", line); + fprintf(fh, ", " DBQ(version) ": %d", + ((state->ntp_flags & NTP_VERSION_MASK) >> + NTP_VERSION_SHIFT)); + + format_mode(line, sizeof(line), state->ntp_flags); + fprintf(fh, ", %s", line); + + format_stratum(line, sizeof(line), state->ntp_stratum); + fprintf(fh, ", %s", line); + + format_8bit(line, sizeof(line), "poll", state->ntp_poll); + fprintf(fh, ", %s", line); + + format_8bit(line, sizeof(line), "precision", + state->ntp_precision); + fprintf(fh, ", %s", line); + + format_short_ts(line, sizeof(line), "root-delay", + ntohl(state->ntp_root_delay)); + fprintf(fh, ", %s", line); + + format_short_ts(line, sizeof(line), "root-dispersion", + ntohl(state->ntp_root_dispersion)); + fprintf(fh, ", %s", line); + + format_ref_id(line, sizeof(line), state->ntp_reference_id, + state->ntp_stratum); + fprintf(fh, ", %s", line); + + format_ts(line, sizeof(line), "ref-ts", + &state->ntp_reference_ts); + fprintf(fh, ", %s", line); + } + + fprintf(fh, ", " DBQ(result) ": [ %s ] }\n", state->result); + + free(state->result); + state->result= NULL; + + if (state->out_filename) + fclose(fh); + + /* Kill the event and close socket */ + if (state->socket != -1) + { + event_del(&state->event_socket); + close(state->socket); + state->socket= -1; + } + + state->busy= 0; + + if (state->base->done) + state->base->done(state, 0); +} + +static void send_pkt(struct ntpstate *state) +{ + int r, len, serrno; + struct ntpbase *base; + struct ntphdr *ntphdr; + struct ntpextension *ntpextension; + double d; + struct timeval interval; + char line[80]; + + state->gotresp= 0; + + base= state->base; + + if (state->sent >= state->count) + { + add_str(state, " }"); + + /* We are done */ + report(state); + return; + } + state->seq++; + + ntphdr= (struct ntphdr *)base->packet; + len= sizeof(*ntphdr); + + memset(ntphdr, '\0', len); + + if (state->size > 0) { + ntpextension = (struct ntpextension *) (base->packet + len); + memset(ntpextension, '\0', state->size); + // NTP autokey (RFC5906) no-operation request + ntpextension->ext_type= htons(NTP_EXT_MESSAGE | NTP_EXT_REQUEST | NTP_EXT_NOOP); + ntpextension->ext_length= htons(state->size); + len+= state->size; + } + + ntphdr->ntp_flags= (NTP_VERSION << NTP_VERSION_SHIFT) | NTP_MODE_CLIENT; + + gettimeofday(&state->xmit_time, NULL); + + ntphdr->ntp_transmit_ts.ntp_seconds= + htonl(state->xmit_time.tv_sec + NTP_1970); + d= state->xmit_time.tv_usec / 1e6; + d *= NTP_4G; + ntphdr->ntp_transmit_ts.ntp_fraction= htonl((uint32_t)d); + + if (state->sin6.sin6_family == AF_INET6) + { + /* Set port */ + state->sin6.sin6_port= htons(NTP_PORT); + + if (state->response_in) + { + /* Assume the send succeeded */ + r= len; + } + else + { + r= sendto(state->socket, base->packet, len, 0, + (struct sockaddr *)&state->sin6, + state->socklen); + } + +#if 0 + { static int doit=1; if (doit && r != -1) + { errno= ENOSYS; r= -1; } doit= !doit; } +#endif + serrno= errno; + + if (r == -1) + { + if (serrno != EACCES && + serrno != ECONNREFUSED && + serrno != EMSGSIZE) + { + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(sendto failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + return; + } + } + } + else + { +#if 0 + printf( +"send_pkt: sending IPv4 packet, do_icmp %d, parismod %d, index %d, state %p\n", + state->do_icmp, state->parismod, state->index, state); +#endif + + /* Set port */ + ((struct sockaddr_in *)&state->sin6)->sin_port= + htons(NTP_PORT); + + if (state->response_in) + r= 0; /* No need to send */ + else + { + r= sendto(state->socket, base->packet, len, 0, + (struct sockaddr *)&state->sin6, + state->socklen); + } + +#if 0 +{ static int doit=0; if (doit && r != -1) +{ errno= ENOSYS; r= -1; } doit= !doit; } +#endif + + serrno= errno; + if (r == -1) + { + if (serrno != EMSGSIZE) + { + serrno= errno; + + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(sendto failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + return; + } + } + } + + if (state->open_result) + add_str(state, " }, "); + add_str(state, "{ "); + state->open_result= 0; + + /* Increment packets sent */ + state->sent++; + + /* Set timer */ + interval.tv_sec= state->timeout/1000000; + interval.tv_usec= state->timeout % 1000000; + evtimer_add(&state->timer, &interval); + + if (state->response_in) + { + if (state->sin6.sin6_family == AF_INET6) + ready_callback(0, 0, state); + else + ready_callback(0, 0, state); + } +} + +static void ready_callback(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + struct ntpbase *base; + struct ntpstate *state; + int head; + ssize_t nrecv; + socklen_t slen; + double d; + struct ntphdr *ntphdr; + struct timeval now; + struct ntp_ts final_ts; + struct sockaddr_in remote; + char line[80]; + + state= s; + + if (state->response_in) + { + size_t len; + + len= sizeof(now); + read_response(state->socket, RESP_TIMEOFDAY, + &len, &now); + } + else + { + gettimeofday(&now, NULL); + if (state->resp_file_out) + { + write_response(state->resp_file_out, RESP_TIMEOFDAY, + sizeof(now), &now); + } + } + + base= state->base; + + slen= sizeof(remote); + if (state->response_in) + { + size_t len; + + len= sizeof(base->packet); + read_response(state->socket, RESP_PACKET, + &len, base->packet); + nrecv= len; + len= sizeof(remote); + read_response(state->socket, RESP_DSTADDR, + &len, &remote); + slen= len; + } + else + { + nrecv= recvfrom(state->socket, base->packet, + sizeof(base->packet), + MSG_DONTWAIT, (struct sockaddr *)&remote, &slen); + } + if (nrecv == -1) + { + /* Strange, read error */ + printf("ready_callback: read error '%s'\n", strerror(errno)); + return; + } + // printf("ready_callback: got packet\n"); + + if (state->resp_file_out) + { + write_response(state->resp_file_out, RESP_PACKET, + nrecv, base->packet); + write_response(state->resp_file_out, RESP_DSTADDR, + sizeof(remote), &remote); + } + + + if (nrecv < sizeof(*ntphdr)) + { + /* Short packet */ + printf("ready_callback: too short %d\n", (int)nrecv); + return; + } + + if (!state->busy) + { +printf("%s, %d: sin6_family = %d\n", __FILE__, __LINE__, state->sin6.sin6_family); + return; + } + + if (state->open_result) + add_str(state, " }, { "); + + head= 1; + + ntphdr= (struct ntphdr *)base->packet; + + if (state->first) + { + /* Copy mostly static fields */ + state->ntp_flags= ntphdr->ntp_flags; + state->ntp_stratum= ntphdr->ntp_stratum; + state->ntp_poll= ntphdr->ntp_poll; + state->ntp_precision= ntphdr->ntp_precision; + state->ntp_root_delay= ntphdr->ntp_root_delay; + state->ntp_root_dispersion= ntphdr->ntp_root_dispersion; + state->ntp_reference_id= ntphdr->ntp_reference_id; + state->ntp_reference_ts= ntphdr->ntp_reference_ts; + + state->first= 0; + } + else + { + if ((ntphdr->ntp_flags & NTP_LI_MASK) != + (state->ntp_flags & NTP_LI_MASK)) + { + format_li(line, sizeof(line), ntphdr->ntp_flags); + add_str(state, line); + head= 0; + } + + if ((ntphdr->ntp_flags & NTP_VERSION_MASK) != + (state->ntp_flags & NTP_VERSION_MASK)) + { + snprintf(line, sizeof(line), ", " DBQ(version) ": %d", + ((ntphdr->ntp_flags & NTP_VERSION_MASK) >> + NTP_VERSION_SHIFT)); + add_str(state, line); + head= 0; + } + + if ((ntphdr->ntp_flags & NTP_MODE_MASK) != + (state->ntp_flags & NTP_MODE_MASK)) + { + if (!head) + add_str(state, ", "); + format_mode(line, sizeof(line), ntphdr->ntp_flags); + add_str(state, line); + head= 0; + } + + if (ntphdr->ntp_stratum != state->ntp_stratum) + { + if (!head) + add_str(state, ", "); + format_stratum(line, sizeof(line), ntphdr->ntp_stratum); + add_str(state, line); + head= 0; + } + + if (ntphdr->ntp_poll != state->ntp_poll) + { + if (!head) + add_str(state, ", "); + format_8bit(line, sizeof(line), "poll", + ntphdr->ntp_poll); + add_str(state, line); + head= 0; + } + + if (ntphdr->ntp_precision != state->ntp_precision) + { + if (!head) + add_str(state, ", "); + format_8bit(line, sizeof(line), "precision", + ntphdr->ntp_precision); + add_str(state, line); + head= 0; + } + + if (ntphdr->ntp_root_delay != state->ntp_root_delay) + { + if (!head) + add_str(state, ", "); + format_short_ts(line, sizeof(line), "root-delay", + ntohl(ntphdr->ntp_root_delay)); + add_str(state, line); + head= 0; + } + + if (ntphdr->ntp_root_dispersion != state->ntp_root_dispersion) + { + if (!head) + add_str(state, ", "); + format_short_ts(line, sizeof(line), "root-dispersion", + ntohl(ntphdr->ntp_root_dispersion)); + add_str(state, line); + head= 0; + } + + if (ntphdr->ntp_reference_id != state->ntp_reference_id) + { + if (!head) + add_str(state, ", "); + format_ref_id(line, sizeof(line), + ntphdr->ntp_reference_id, ntphdr->ntp_stratum); + add_str(state, line); + head= 0; + } + + if (memcmp(&ntphdr->ntp_reference_ts, &state->ntp_reference_ts, + sizeof(ntphdr->ntp_reference_ts)) != 0) + { + if (!head) + add_str(state, ", "); + format_ts(line, sizeof(line), "ref-ts", + &ntphdr->ntp_reference_ts); + add_str(state, line); + head= 0; + } + } + + d= ntohl(ntphdr->ntp_origin_ts.ntp_seconds) + + ntohl(ntphdr->ntp_origin_ts.ntp_fraction)/NTP_4G; + snprintf(line, sizeof(line), "%s" DBQ(origin-ts) ": %.9f", + head ? "" : ", ", d); + head= 0; + add_str(state, line); + + d= ntohl(ntphdr->ntp_receive_ts.ntp_seconds) + + ntohl(ntphdr->ntp_receive_ts.ntp_fraction)/NTP_4G; + snprintf(line, sizeof(line), ", " DBQ(receive-ts) ": %.9f", d); + add_str(state, line); + + d= ntohl(ntphdr->ntp_transmit_ts.ntp_seconds) + + ntohl(ntphdr->ntp_transmit_ts.ntp_fraction)/NTP_4G; + snprintf(line, sizeof(line), ", " DBQ(transmit-ts) ": %.9f", d); + add_str(state, line); + + final_ts.ntp_seconds= now.tv_sec + NTP_1970; + d= now.tv_usec / 1e6; + d *= 4294967296.0; + final_ts.ntp_fraction= d; + + d= final_ts.ntp_seconds + final_ts.ntp_fraction/NTP_4G; + snprintf(line, sizeof(line), ", " DBQ(final-ts) ": %.9f", d); + add_str(state, line); + + /* Compute rtt */ + d= final_ts.ntp_seconds - ntohl(ntphdr->ntp_origin_ts.ntp_seconds) - + (ntohl(ntphdr->ntp_transmit_ts.ntp_seconds) - + ntohl(ntphdr->ntp_receive_ts.ntp_seconds)) + + final_ts.ntp_fraction/NTP_4G - + ntohl(ntphdr->ntp_origin_ts.ntp_fraction)/NTP_4G - + (ntohl(ntphdr->ntp_transmit_ts.ntp_fraction)/NTP_4G - + ntohl(ntphdr->ntp_receive_ts.ntp_fraction)/NTP_4G); + snprintf(line, sizeof(line), ", " DBQ(rtt) ": %f", d); + add_str(state, line); + + d= (ntohl(ntphdr->ntp_origin_ts.ntp_seconds) + + final_ts.ntp_seconds)/2.0 - + (ntohl(ntphdr->ntp_receive_ts.ntp_seconds) + + ntohl(ntphdr->ntp_transmit_ts.ntp_seconds))/2.0 + + (ntohl(ntphdr->ntp_origin_ts.ntp_fraction)/NTP_4G + + final_ts.ntp_fraction/NTP_4G)/2.0 - + (ntohl(ntphdr->ntp_receive_ts.ntp_fraction)/NTP_4G + + ntohl(ntphdr->ntp_transmit_ts.ntp_fraction)/NTP_4G)/2.0; + snprintf(line, sizeof(line), ", " DBQ(offset) ": %f", d); + add_str(state, line); + + state->open_result= 1; + + send_pkt(state); +} + +static struct ntpbase *ntp_base_new(struct event_base + *event_base) +{ + struct ntpbase *base; + + base= xzalloc(sizeof(*base)); + + base->event_base= event_base; + + base->tabsiz= 10; + base->table= xzalloc(base->tabsiz * sizeof(*base->table)); + + base->my_pid= getpid(); + + return base; +} + +static void noreply_callback(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + struct ntpstate *state; + + state= s; + +#if 0 + printf("noreply_callback: gotresp = %d\n", + state->gotresp); +#endif + + if (!state->gotresp) + { + if (state->open_result) + add_str(state, " }, { "); + add_str(state, DBQ(x) ":" DBQ(*)); + state->open_result= 1; + } + + send_pkt(state); +} + +static void *ntp_init(int __attribute((unused)) argc, char *argv[], + void (*done)(void *state, int error)) +{ + uint32_t opt; + int i, do_v6; + unsigned count, timeout, size; + /* must be int-sized */ + size_t newsiz; + char *str_Atlas; + char *str_bundle; + const char *hostname; + char *out_filename; + const char *destportstr; + char *interface; + char *response_in, *response_out; + char *validated_response_in= NULL; + char *validated_response_out= NULL; + char *validated_out_filename= NULL; + struct ntpstate *state; + FILE *fh; + + if (!ntp_base) + { + ntp_base= ntp_base_new(EventBase); + if (!ntp_base) + crondlog(DIE9 "ntp_base_new failed"); + } + + /* Parse arguments */ + count= 3; + size= 0; + interface= NULL; + timeout= 1000; + str_Atlas= NULL; + str_bundle= NULL; + out_filename= NULL; + response_in= NULL; + response_out= NULL; + opt_complementary = "=1:4--6:i--u:c+:s+:w+:"; + + opt = getopt32(argv, NTP_OPT_STRING, &count, + &interface, &size, &timeout, &str_Atlas, &str_bundle, &out_filename, + &response_in, &response_out); + hostname = argv[optind]; + + if (opt == 0xffffffff) + { + crondlog(LVL8 "bad options"); + return NULL; + } + + do_v6= !!(opt & OPT_6); + + if (response_in) + { + validated_response_in= rebased_validated_filename(ATLAS_SPOOLDIR, + response_in, ATLAS_FUZZING_REL); + if (!validated_response_in) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + response_in); + goto err; + } + } + if (response_out) + { + validated_response_out= rebased_validated_filename(ATLAS_SPOOLDIR, + response_out, ATLAS_FUZZING_REL); + if (!validated_response_out) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + response_out); + goto err; + } + } + + if (out_filename) + { + validated_out_filename= rebased_validated_filename(ATLAS_SPOOLDIR, + out_filename, SAFE_PREFIX_REL); + if (!validated_out_filename) + { + crondlog(LVL8 "insecure file '%s'", out_filename); + goto err; + } + fh= fopen(validated_out_filename, "a"); + if (!fh) + { + crondlog(LVL8 "ntp: unable to append to '%s'", + validated_out_filename); + goto err; + } + fclose(fh); + } + + if (str_Atlas) + { + if (!validate_atlas_id(str_Atlas)) + { + crondlog(LVL8 "bad atlas ID '%s'", str_Atlas); + goto err; + } + } + if (str_bundle) + { + if (!validate_atlas_id(str_bundle)) + { + crondlog(LVL8 "bad bundle ID '%s'", str_bundle); + goto err; + } + } + + // sanity check: ntp_base->packet isn't smaller than expected + if (size > sizeof(ntp_base->packet) - sizeof(struct ntphdr)) { + crondlog(LVL8 "ntp: packet buffer only allows %u bytes maximum", sizeof(ntp_base->packet) - sizeof(struct ntphdr)); + goto err; + } + // trying to avoid fragmentation: 1280 mtu - 48 ntp - 8 udp - 40 ipv6 + // chrony has a max of 1092 byte extensions + if (size > 1184) { + crondlog(LVL8 "ntp: maximum extension size is 1184 bytes"); + goto err; + } + if (size > 0 && size < 28) { + crondlog(LVL8 "ntp: mimimum extension size is 28 bytes per RFC7822"); + goto err; + } + if (size % 4 != 0) { + crondlog(LVL8 "ntp: extension field size is a multiple of 4 per RFC7822"); + goto err; + } + + destportstr= "123"; + + state= xzalloc(sizeof(*state)); + state->count= count; + state->interface= interface ? strdup(interface) : NULL; + state->size= size; + state->destportstr= strdup(destportstr); + state->timeout= timeout*1000; + state->atlas= str_Atlas ? strdup(str_Atlas) : NULL; + state->bundle= str_bundle ? strdup(str_bundle) : NULL; + state->hostname= strdup(hostname); + state->do_v6= do_v6; + state->out_filename= validated_out_filename; + validated_out_filename= NULL; + state->response_in= validated_response_in; + validated_response_in= NULL; + state->response_out= validated_response_out; + validated_response_out= NULL; + state->base= ntp_base; + state->busy= 0; + state->result= NULL; + state->reslen= 0; + state->resmax= 0; + + for (i= 0; itabsiz; i++) + { + if (ntp_base->table[i] == NULL) + break; + } + if (i >= ntp_base->tabsiz) + { + newsiz= 2*ntp_base->tabsiz; + ntp_base->table= xrealloc(ntp_base->table, + newsiz*sizeof(*ntp_base->table)); + for (i= ntp_base->tabsiz; itable[i]= NULL; + i= ntp_base->tabsiz; + ntp_base->tabsiz= newsiz; + } + state->index= i; + ntp_base->table[i]= state; + ntp_base->done= done; + + memset(&state->loc_sin6, '\0', sizeof(state->loc_sin6)); + state->loc_socklen= 0; + + evtimer_assign(&state->timer, state->base->event_base, + noreply_callback, state); + + return state; + +err: + if (validated_response_in) free(validated_response_in); + if (validated_response_out) free(validated_response_out); + if (validated_out_filename) free(validated_out_filename); + return NULL; +} + +static void ntp_start2(void *state) +{ + struct ntpstate *ntpstate; + + ntpstate= state; + + if (!ntpstate->busy) + { + printf("ntp_start: not busy, can't continue\n"); + return; + } + + ntpstate->min= ULONG_MAX; + ntpstate->max= 0; + ntpstate->sum= 0; + ntpstate->sentpkts= 0; + ntpstate->rcvdpkts= 0; + ntpstate->duppkts= 0; + + ntpstate->sent= 0; + ntpstate->seq= 0; + ntpstate->first= 1; + ntpstate->done= 0; + ntpstate->not_done= 0; + + if (ntpstate->result) free(ntpstate->result); + ntpstate->resmax= 80; + ntpstate->result= xmalloc(ntpstate->resmax); + ntpstate->reslen= 0; + ntpstate->open_result= 0; + ntpstate->starttime= atlas_time(); + + if (create_socket(ntpstate) == -1) + return; + if (ntpstate->do_v6) + { + ntpstate->loc_sin6.sin6_port= htons(SRC_BASE_PORT + + ntpstate->index); + } + else + { + ((struct sockaddr_in *)(&ntpstate->loc_sin6))-> + sin_port= htons(SRC_BASE_PORT + + ntpstate->index); + } + + send_pkt(ntpstate); +} + +static int create_socket(struct ntpstate *state) +{ + int af, type, protocol; + int r, serrno; + char line[80]; + + af= (state->do_v6 ? AF_INET6 : AF_INET); + type= SOCK_DGRAM; + protocol= 0; + + if (!state->response_in) + state->socket= xsocket(af, type, protocol); +#if 0 + { errno= ENOSYS; state->socket= -1; } +#endif + if (state->socket == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" DBQ(socket failed: %s) " }", + strerror(serrno)); + add_str(state, line); + report(state); + return -1; + } + + if (state->interface) + { + if (bind_interface(state->socket, + af, state->interface) == -1) + { + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" DBQ(bind_interface failed) " }"); + add_str(state, line); + report(state); + return -1; + } + } + + if (state->response_in) + r= 0; /* No need to connect */ + else + { + r= connect(state->socket, + (struct sockaddr *)&state->sin6, + state->socklen); + } +#if 0 + { errno= ENOSYS; r= -1; } +#endif + if (r == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" DBQ(connect failed: %s) " }", + strerror(serrno)); + add_str(state, line); + report(state); + return -1; + } + state->loc_socklen= sizeof(state->loc_sin6); + if (state->response_in) + { + size_t len; + + len= sizeof(state->loc_sin6); + read_response(state->socket, RESP_SOCKNAME, + &len, (struct sockaddr *)&state->loc_sin6); + state->loc_socklen= len; + } + else + { + if (getsockname(state->socket, + (struct sockaddr*)&state->loc_sin6, + &state->loc_socklen) == -1) + { + crondlog(DIE9 "getsockname failed"); + } + if (state->resp_file_out) + { + write_response(state->resp_file_out, + RESP_SOCKNAME, state->loc_socklen, + &state->loc_sin6); + } + } +#if 0 + printf("Got localname: %s\n", + inet_ntop(AF_INET6, + &state->loc_sin6.sin6_addr, + buf, sizeof(buf))); +#endif + + + event_assign(&state->event_socket, state->base->event_base, + state->socket, + EV_READ | EV_PERSIST, + (af == AF_INET6 ? ready_callback : ready_callback), + state); + if (!state->response_in) + event_add(&state->event_socket, NULL); + + return 0; +} + +static void dns_cb(int result, struct evutil_addrinfo *res, void *ctx) +{ + int r; + size_t tmp_len; + struct ntpstate *env; + double nsecs; + struct timespec now, elapsed; + struct addrinfo tmp_res; + struct sockaddr_storage tmp_sockaddr; + char line[160]; + + env= ctx; + + if (!env->dnsip) + { + crondlog(LVL7 + "dns_cb: in dns_cb but not doing dns at this time"); + if (res) + evutil_freeaddrinfo(res); + return; + } + + gettime_mono(&now); + elapsed.tv_sec= now.tv_sec - env->start_time.tv_sec; + if (now.tv_nsec < env->start_time.tv_sec) + { + elapsed.tv_sec--; + now.tv_nsec += 1000000000; + } + elapsed.tv_nsec= now.tv_nsec - env->start_time.tv_nsec; + nsecs= (elapsed.tv_sec * 1e9 + elapsed.tv_nsec); + env->ttr= nsecs/1e6; + + if (result != 0) + { + /* Hmm, great. Where do we put this init code */ + if (env->result) free(env->result); + env->resmax= 80; + env->result= xmalloc(env->resmax); + env->reslen= 0; + + env->starttime= time(NULL); + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" DBQ(name resolution failed: %s) " }", + evutil_gai_strerror(result)); + add_str(env, line); + report(env); + return; + } + + env->dnsip= 0; + env->report_dst= 0; + + env->dns_res= res; + env->dns_curr= res; + + if (env->response_in) + { + + env->socket= open(env->response_in, O_RDONLY); + if (env->socket == -1) + { + crondlog(DIE9 "unable to open '%s'", + env->response_in); + } + + tmp_len= sizeof(tmp_res); + read_response(env->socket, RESP_ADDRINFO, &tmp_len, &tmp_res); + assert(tmp_len == sizeof(tmp_res)); + tmp_len= sizeof(tmp_sockaddr); + read_response(env->socket, RESP_ADDRINFO_SA, + &tmp_len, &tmp_sockaddr); + assert(tmp_len == tmp_res.ai_addrlen); + tmp_res.ai_addr= (struct sockaddr *)&tmp_sockaddr; + env->dns_curr= &tmp_res; + } + + while (env->dns_curr) + { + if (env->response_out) + { + write_response(env->resp_file_out, RESP_ADDRINFO, + sizeof(*env->dns_curr), env->dns_curr); + write_response(env->resp_file_out, RESP_ADDRINFO_SA, + env->dns_curr->ai_addrlen, + env->dns_curr->ai_addr); + } + + env->socklen= env->dns_curr->ai_addrlen; + if (env->socklen > sizeof(env->sin6)) + break; /* Weird */ + memcpy(&env->sin6, env->dns_curr->ai_addr, + env->socklen); + + r= atlas_check_addr((struct sockaddr *)&env->sin6, + env->socklen); + if (r == -1) + { + if (env->result) free(env->result); + env->resmax= 80; + env->result= xmalloc(env->resmax); + env->reslen= 0; + + env->starttime= time(NULL); + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" DBQ(address not allowed) " }"); + add_str(env, line); + env->dnsip= 1; + env->report_dst= 1; + report(env); + return; + } + + ntp_start2(env); + + if (!env->response_in) + evutil_freeaddrinfo(env->dns_res); + env->dns_res= NULL; + env->dns_curr= NULL; + return; + } + + /* Something went wrong */ + if (!env->response_in) + evutil_freeaddrinfo(env->dns_res); + env->dns_res= NULL; + env->dns_curr= NULL; + snprintf(line, sizeof(line), +"%s{ " DBQ(error) ":" DBQ(name resolution failed: out of addresses) " } ] }", + env->sent ? " }, " : ""); + add_str(env, line); + report(env); +} + +static void ntp_start(void *state) +{ + struct ntpstate *ntpstate; + struct evutil_addrinfo hints; + + ntpstate= state; + + if (ntpstate->busy) + { + printf("ntp_start: busy, can't start\n"); + return; + } + ntpstate->busy= 1; + + ntpstate->socket= -1; + + if (ntpstate->response_out) + { + ntpstate->resp_file_out= fopen(ntpstate->response_out, "w"); + if (!ntpstate->resp_file_out) + { + crondlog(DIE9 "unable to write to '%s'", + ntpstate->response_out); + } + } + + + if (ntpstate->response_in) + { + ntpstate->dnsip= 1; + dns_cb(0, 0, ntpstate); + } + else + { + memset(&hints, '\0', sizeof(hints)); + hints.ai_socktype= SOCK_DGRAM; + hints.ai_family= ntpstate->do_v6 ? AF_INET6 : AF_INET; + ntpstate->dnsip= 1; + gettime_mono(&ntpstate->start_time); + (void) evdns_getaddrinfo(DnsBase, ntpstate->hostname, + ntpstate->destportstr, &hints, dns_cb, ntpstate); + } +} + +static int ntp_delete(void *state) +{ + int ind; + struct ntpstate *ntpstate; + struct ntpbase *base; + + ntpstate= state; + + printf("ntp_delete: state %p, index %d, busy %d\n", + state, ntpstate->index, ntpstate->busy); + + if (ntpstate->busy) + return 0; + + base= ntpstate->base; + ind= ntpstate->index; + + if (base->table[ind] != ntpstate) + crondlog(DIE9 "strange, state not in table"); + base->table[ind]= NULL; + + event_del(&ntpstate->timer); + + free(ntpstate->atlas); + ntpstate->atlas= NULL; + free(ntpstate->bundle); + ntpstate->bundle= NULL; + free(ntpstate->hostname); + ntpstate->hostname= NULL; + free(ntpstate->destportstr); + ntpstate->destportstr= NULL; + free(ntpstate->out_filename); + ntpstate->out_filename= NULL; + free(ntpstate->interface); + ntpstate->interface= NULL; + + free(ntpstate); + + return 1; +} + +struct testops ntp_ops = { ntp_init, ntp_start, ntp_delete }; + diff --git a/probe-busybox/eperd/ping.c b/probe-busybox/eperd/ping.c new file mode 100644 index 00000000..008391d6 --- /dev/null +++ b/probe-busybox/eperd/ping.c @@ -0,0 +1,1840 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Copyright (c) 2009 Rocco Carbone + * This includes code Copyright (c) 2009 Rocco Carbone + * taken from the libevent-based ping. + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * ping.c + */ + +#include "libbb.h" +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "eperd.h" +#include "atlas_path.h" + +#define SAFE_PREFIX_REL ATLAS_DATA_NEW_REL + +/* Don't report psize yet. */ +#define DO_PSIZE 0 + +#define DBQ(str) "\"" #str "\"" + +#define PING_OPT_STRING ("!46eprc:s:A:B:O:i:I:R:W:") + +enum +{ + opt_4 = (1 << 0), + opt_6 = (1 << 1), + opt_e = (1 << 2), + opt_p = (1 << 3), + opt_r = (1 << 4), +}; + +/* Intervals and timeouts (all are in milliseconds unless otherwise specified) + */ +#define DEFAULT_PING_INTERVAL 1000 /* 1 sec - 0 means flood mode */ + +/* Max IP packet size is 65536 while fixed IP header size is 20; + * the traditional ping program transmits 56 bytes of data, so the + * default data size is calculated as to be like the original + */ +#define IPHDR 20 +#define MAX_DATA_SIZE (4096 - IPHDR) + +#define ICMP6_HDRSIZE (offsetof(struct icmp6_hdr, icmp6_data16[2])) + +/* Error codes */ +#define PING_ERR_NONE 0 +#define PING_ERR_TIMEOUT 1 /* Communication with the host timed out */ +#define PING_ERR_DUP 2 /* Duplicate packet */ +#define PING_ERR_DONE 3 /* Max number of packets to send has been + * reached. + */ +#define PING_ERR_SENDTO 4 /* Sendto system call failed */ +#define PING_ERR_DNS 5 /* DNS error */ +#define PING_ERR_DNS_NO_ADDR 6 /* DNS no suitable addresses */ +#define PING_ERR_BAD_ADDR 7 /* Addresses is not allowed */ +#define PING_ERR_SHUTDOWN 10 /* The request was canceled because the PING subsystem was shut down */ +#define PING_ERR_CANCEL 12 /* The request was canceled via a call to evping_cancel_request */ +#define PING_ERR_UNKNOWN 16 /* An unknown error occurred */ + +#define RESP_PACKET 1 +#define RESP_PEERNAME 2 +#define RESP_SOCKNAME 3 +#define RESP_TTL 4 +#define RESP_DSTADDR 5 +#define RESP_ADDRINFO 6 +#define RESP_ADDRINFO_SA 7 + +/* Definition for various types of counters */ +typedef uint64_t counter_t; + +/* For matching up requests and replies. Assume that 64 bits is enough */ +struct cookie +{ + uint8_t data[8]; +}; + +/* How to keep track of a PING session */ +struct pingbase +{ + struct event_base *event_base; + + pid_t pid; /* Identifier to send with each ICMP + * Request */ + + /* A list of hosts to ping. */ + struct pingstate **table; + int tabsiz; + + void (*done)(void *state, int error); /* Called when a ping is done */ + + u_char packet[MAX_DATA_SIZE]; +}; + +struct pingstate +{ + /* Parameters */ + char *atlas; + char *bundle_id; + char *hostname; + char *interface; + int pingcount; + char *out_filename; + char include_probe_id; + char delay_name_res; + unsigned interval; + + /* State */ + struct sockaddr_in6 sin6; + socklen_t socklen; + struct sockaddr_in6 loc_sin6; + socklen_t loc_socklen; + int busy; + int socket; + char got_reply; + char first; + char no_dst; + char no_src; + char error; + unsigned char ttl; + unsigned size; + unsigned psize; + + struct event event; /* Used to detect read events on raw + * socket */ + int event_is_init; /* event variable is initialized */ + + char *result; + size_t reslen; + size_t resmax; + + struct pingbase *base; + struct cookie cookie; + + sa_family_t af; /* Desired address family */ + struct evutil_addrinfo *dns_res; + struct evutil_addrinfo *dns_curr; + + size_t maxsize; + + int maxpkts; /* Number of packets to send */ + + int index; /* Index into the array of hosts */ + u_int8_t seq; /* ICMP sequence (modulo 256) for next + * run + */ + u_int8_t rcvd_ttl; /* TTL in (last) reply packet */ + char dnsip; + char send_error; + struct timespec start_time; /* At the moment only for + * DNS resolution + */ + double ttr; /* Time to resolve a name, in ms */ + + struct event ping_timer; /* Timer to ping host at given + * intervals + */ + + /* Packets Counters */ + size_t cursize; + counter_t sentpkts; /* Total # of ICMP Echo Requests sent */ + + /* For fuzzing */ + char *response_in; + char *response_out; + FILE *resp_file_out; +}; + +/* User Data added to the ICMP header + * + * The 'ts' is the time the request is sent on the wire + * and it is used to compute the network round-trip value. + * + * The 'index' parameter is an index value in the array of hosts to ping + * and it is used to relate each response with the corresponding request + */ +struct evdata { + struct timespec ts; + uint32_t index; + struct cookie cookie; +}; + + +static void ready_callback4(int __attribute((unused)) unused, + const short __attribute((unused)) event, void * arg); +static void ready_callback6(int __attribute((unused)) unused, + const short __attribute((unused)) event, void * arg); + +/* Initialize a struct timeval by converting milliseconds */ +static void +msecstotv(time_t msecs, struct timeval *tv) +{ + tv->tv_sec = msecs / 1000; + tv->tv_usec = msecs % 1000 * 1000; +} + +static void add_str(struct pingstate *state, const char *str) +{ + size_t len; + + len= strlen(str); + if (state->reslen + len+1 > state->resmax) + { + state->resmax= state->reslen + len+1 + 80; + state->result= xrealloc(state->result, state->resmax); + } + memcpy(state->result+state->reslen, str, len+1); + state->reslen += len; + //printf("add_str: result = '%s'\n", state->result); +} + +static void report(struct pingstate *state) +{ + int r; + FILE *fh; + struct addrinfo *ai; + char namebuf[NI_MAXHOST]; + struct addrinfo hints; + + if (state->out_filename) + { + fh= fopen(state->out_filename, "a"); + if (!fh) + crondlog(DIE9 "ping: unable to append to '%s'", + state->out_filename); + } + else + fh= stdout; + + fprintf(fh, "RESULT { "); + if (state->atlas) + { + fprintf(fh, DBQ(id) ":" DBQ(%s) + ", %s" + ", " DBQ(lts) ":%d" + ", " DBQ(time) ":%ld, ", + state->atlas, atlas_get_version_json_str(), + get_timesync(), + (long)atlas_time()); + if (state->bundle_id) + fprintf(fh, DBQ(bundle) ":%s, ", state->bundle_id); + } + + fprintf(fh, DBQ(dst_name) ":" DBQ(%s), + state->hostname); + + /* Check if hostname is numeric or had to be resolved */ + memset(&hints, '\0', sizeof(hints)); + hints.ai_flags= AI_NUMERICHOST; + r= getaddrinfo(state->hostname, NULL, &hints, &ai); + if (r == 0) + { + /* Getaddrinfo succeded so hostname is an address literal */ + freeaddrinfo(ai); + } + else + { + /* Assume that name resolution was required */ + fprintf(fh, ", " DBQ(ttr) ":%f", state->ttr); + } + + fprintf(fh, ", " DBQ(af) ":%d", + state->af == AF_INET ? 4 : 6); + + if (!state->no_dst) + { + namebuf[0]= '\0'; + getnameinfo((struct sockaddr *)&state->sin6, state->socklen, + namebuf, sizeof(namebuf), NULL, 0, NI_NUMERICHOST); + + fprintf(fh, ", " DBQ(dst_addr) ":" DBQ(%s), namebuf); + } + + if (!state->no_dst && !state->no_src) + { + namebuf[0]= '\0'; + getnameinfo((struct sockaddr *)&state->loc_sin6, + state->loc_socklen, namebuf, sizeof(namebuf), + NULL, 0, NI_NUMERICHOST); + + fprintf(fh, ", \"src_addr\":\"%s\"", namebuf); + } + + fprintf(fh, ", " DBQ(proto) ":" DBQ(ICMP)); + + if (state->got_reply) + fprintf(fh, ", " DBQ(ttl) ":%d", state->ttl); + + fprintf(fh, ", " DBQ(size) ":%d", state->size); +#if DO_PSIZE + if (state->psize != -1) + fprintf(fh, ", " DBQ(psize) ":%d", state->psize); +#endif /* DO_PSIZE */ + + fprintf(fh, ", \"result\": [ %s ] }\n", state->result); + + free(state->result); + state->result= NULL; + + if (state->out_filename) + fclose(fh); + + /* Kill the event and close socket */ + if (!state->response_in) + { + if (state->event_is_init) + event_del(&state->event); + } + if (state->socket != -1) + { + close(state->socket); + state->socket= -1; + } + + state->busy= 0; + +} + +static void ping_cb(int result, int bytes, int psize, + struct sockaddr *sa, socklen_t socklen, + struct sockaddr *loc_sa, socklen_t loc_socklen, + int seq, int ttl, + struct timespec * elapsed, void * arg) +{ + struct pingstate *pingstate; + double nsecs; + char namebuf1[NI_MAXHOST], namebuf2[NI_MAXHOST]; + char line[256]; + + (void)socklen; /* Suppress GCC unused parameter warning */ + + pingstate= arg; + +#if 0 + crondlog(LVL7 "in ping_cb: result %d, bytes %d, seq %d, ttl %d", + result, bytes, seq, ttl); +#endif + + if (!pingstate->busy) + { + crondlog(LVL8 "ping_cb: not busy for state %p, '%s'", + pingstate, pingstate->hostname); + return; + } + + if (pingstate->first) + { + pingstate->size= bytes; + pingstate->psize= psize; + pingstate->ttl= ttl; + } + + if (result == PING_ERR_NONE || result == PING_ERR_DUP) + { + /* Got a ping reply */ + nsecs= (elapsed->tv_sec * 1e9 + elapsed->tv_nsec); + + snprintf(line, sizeof(line), + "%s{ ", pingstate->first ? "" : ", "); + add_str(pingstate, line); + pingstate->first= 0; + if (result == PING_ERR_DUP) + { + add_str(pingstate, DBQ(dup) ":1, "); + } + + snprintf(line, sizeof(line), + DBQ(rtt) ":%f", + nsecs/1e6); + add_str(pingstate, line); + + if (!pingstate->got_reply && result != PING_ERR_DUP) + { + memcpy(&pingstate->loc_sin6, loc_sa, loc_socklen); + pingstate->loc_socklen= loc_socklen; + + pingstate->got_reply= 1; + } + + if (pingstate->size != bytes) + { + snprintf(line, sizeof(line), + ", " DBQ(size) ":%d", bytes); + add_str(pingstate, line); + pingstate->size= bytes; + } + if (pingstate->psize != psize && psize != -1) + { +#if DO_PSIZE + snprintf(line, sizeof(line), + ", " DBQ(psize) ":%d", psize); + add_str(pingstate, line); +#endif /* DO_PSIZE */ + pingstate->psize= psize; + } + if (pingstate->ttl != ttl) + { + snprintf(line, sizeof(line), + ", " DBQ(ttl) ":%d", ttl); + add_str(pingstate, line); + pingstate->ttl= ttl; + } + namebuf1[0]= '\0'; + getnameinfo((struct sockaddr *)&pingstate->loc_sin6, + pingstate->loc_socklen, namebuf1, + sizeof(namebuf1), NULL, 0, NI_NUMERICHOST); + namebuf2[0]= '\0'; + getnameinfo(loc_sa, loc_socklen, namebuf2, + sizeof(namebuf2), NULL, 0, NI_NUMERICHOST); + + if (strcmp(namebuf1, namebuf2) != 0) + { + printf("loc_sin6: %s\n", namebuf1); + + printf("loc_sa: %s\n", namebuf2); + + snprintf(line, sizeof(line), + ", " DBQ(src_addr) ":" DBQ(%s), namebuf2); + add_str(pingstate, line); + } + + add_str(pingstate, " }"); + } + if (result == PING_ERR_TIMEOUT) + { + /* No ping reply */ + + snprintf(line, sizeof(line), + "%s{ " DBQ(x) ":" DBQ(*), + pingstate->first ? "" : ", "); + add_str(pingstate, line); + } + if (result == PING_ERR_SENDTO) + { + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(sendto failed: %s), + pingstate->first ? "" : ", ", strerror(seq)); + add_str(pingstate, line); + } + if (result == PING_ERR_TIMEOUT || result == PING_ERR_SENDTO) + { + add_str(pingstate, " }"); + pingstate->first= 0; + } + if (result == PING_ERR_DNS) + { + pingstate->size= bytes; + pingstate->psize= psize; + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(dns resolution failed: %s) " }", + pingstate->first ? "" : ", ", (char *)sa); + add_str(pingstate, line); + report(pingstate); + } + if (result == PING_ERR_BAD_ADDR) + { + pingstate->size= bytes; + pingstate->psize= psize; + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(address not allowed) " }", + pingstate->first ? "" : ", "); + add_str(pingstate, line); + + pingstate->no_dst= 0; + pingstate->no_src= 1; + + report(pingstate); + } + if (result == PING_ERR_DONE) + { + pingstate->error= (pingstate->got_reply == 0); + report(pingstate); + } +} + +/* + * Checksum routine for Internet Protocol family headers (C Version). + * From ping examples in W. Richard Stevens "Unix Network Programming" book. + */ +static int mkcksum(u_short *p, int n) +{ + u_short answer; + long sum = 0; + u_short odd_byte = 0; + + while (n > 1) + { + sum += *p++; + n -= 2; + } + + /* mop up an odd byte, if necessary */ + if (n == 1) + { + * (u_char *) &odd_byte = * (u_char *) p; + sum += odd_byte; + } + + sum = (sum >> 16) + (sum & 0xffff); /* add high 16 to low 16 */ + sum += (sum >> 16); /* add carry */ + answer = ~sum; /* ones-complement, truncate */ + + return answer; +} + + +/* + * Format an ICMP Echo Request packet to be sent over the wire. + * + * o the IP packet will be added on by the kernel + * o the ID field is the Unix process ID + * o the sequence number is an ascending integer + * + * The first 8 bytes of the data portion are used + * to hold a Unix "timeval" struct in VAX byte-order, + * to compute the network round-trip value. + * + * The second 8 bytes of the data portion are used + * to keep an unique integer used as index in the array + * ho hosts being monitored + */ +static void fmticmp4(u_char *buffer, size_t *sizep, u_int8_t seq, + uint32_t idx, pid_t pid, struct cookie *cookiep, + int include_probe_id) +{ + int probe_id; + size_t minlen, len; + struct icmp *icmp = (struct icmp *) buffer; + struct evdata *data = (struct evdata *) (buffer + ICMP_MINLEN); + struct timespec now; + char probe_id_line[80]; + + minlen= sizeof(*data); + if (*sizep < minlen) + *sizep= minlen; + if (*sizep > MAX_DATA_SIZE - ICMP_MINLEN) + *sizep= MAX_DATA_SIZE - ICMP_MINLEN; + + memset(buffer, '\0', *sizep + ICMP_MINLEN); + + /* The ICMP header (no checksum here until user data has been filled in) */ + icmp->icmp_type = ICMP_ECHO; /* type of message */ + icmp->icmp_code = 0; /* type sub code */ + + /* Keep the high nibble clear for traceroute */ + icmp->icmp_id = 0x0fff & pid; /* unique process identifier */ + icmp->icmp_seq = htons(seq); /* message identifier */ + + /* User data */ + gettime_mono(&now); + data->ts = now; /* current uptime time */ + data->index = idx; /* index into an array */ + data->cookie= *cookiep; + + if (include_probe_id) + { + probe_id= get_probe_id(); + if (probe_id == -1) + { + snprintf(probe_id_line, sizeof(probe_id_line), + "RIPE Atlas probe "); + } + else + { + snprintf(probe_id_line, sizeof(probe_id_line), + "RIPE Atlas probe %d", probe_id); + } + + len= strlen(probe_id_line); + if (*sizep < sizeof(*data) + len) + len= *sizep - sizeof(*data); + if (len) + { + memcpy(buffer + ICMP_MINLEN + sizeof(*data), + probe_id_line, len); + } + } + + /* Last, compute ICMP checksum */ + icmp->icmp_cksum = 0; + icmp->icmp_cksum = mkcksum((u_short *) icmp, ICMP_MINLEN + *sizep); /* ones complement checksum of struct */ +} + + +/* + * Format an ICMPv6 Echo Request packet to be sent over the wire. + * + * o the IP packet will be added on by the kernel + * o the ID field is the Unix process ID + * o the sequence number is an ascending integer + * + * The first 8 bytes of the data portion are used + * to hold a Unix "timeval" struct in VAX byte-order, + * to compute the network round-trip value. + * + * The second 8 bytes of the data portion are used + * to keep an unique integer used as index in the array + * ho hosts being monitored + */ +static void fmticmp6(u_char *buffer, size_t *sizep, + u_int8_t seq, uint32_t idx, pid_t pid, struct cookie *cookiep, + int include_probe_id) +{ + int probe_id; + size_t minlen, len; + struct icmp6_hdr *icmp = (struct icmp6_hdr *) buffer; + struct evdata *data = (struct evdata *) (buffer + ICMP6_HDRSIZE); + struct timespec now; + char probe_id_line[80]; + + minlen= sizeof(*data); + if (*sizep < minlen) + *sizep= minlen; + if (*sizep > MAX_DATA_SIZE - ICMP6_HDRSIZE) + *sizep= MAX_DATA_SIZE - ICMP6_HDRSIZE; + + memset(buffer, '\0', *sizep+ICMP6_HDRSIZE); + + /* The ICMP header (no checksum here until user data has been filled in) */ + icmp->icmp6_type = ICMP6_ECHO_REQUEST; /* type of message */ + icmp->icmp6_code = 0; /* type sub code */ + icmp->icmp6_id = 0xffff & pid; /* unique process identifier */ + icmp->icmp6_seq = htons(seq); /* message identifier */ + + /* User data */ + gettime_mono(&now); + data->ts = now; /* current uptime time */ + data->index = idx; /* index into an array */ + data->cookie= *cookiep; + + if (include_probe_id) + { + probe_id= get_probe_id(); + if (probe_id == -1) + { + snprintf(probe_id_line, sizeof(probe_id_line), + "RIPE Atlas probe "); + } + else + { + snprintf(probe_id_line, sizeof(probe_id_line), + "RIPE Atlas probe %d", probe_id); + } + + len= strlen(probe_id_line); + if (*sizep < sizeof(*data) + len) + len= *sizep - sizeof(*data); + if (len) + { + memcpy(buffer + ICMP6_HDRSIZE + sizeof(*data), + probe_id_line, len); + } + } + + + icmp->icmp6_cksum = 0; +} + + +/* Attempt to transmit an ICMP Echo Request to a given host */ +static void ping_xmit(struct pingstate *host) +{ + struct pingbase *base = host->base; + int nsent; + struct timeval tv_interval; + + if (host->sentpkts >= host->maxpkts) + { + /* Done. */ + ping_cb(PING_ERR_DONE, host->cursize, host->psize, + (struct sockaddr *)&host->sin6, host->socklen, + (struct sockaddr *)&host->loc_sin6, host->loc_socklen, + 0, host->rcvd_ttl, NULL, + host); + if (host->dns_res) + { + evutil_freeaddrinfo(host->dns_res); + host->dns_res= NULL; + } + if (host->base->done) + host->base->done(host, host->error); + + return; + } + + /* Transmit the request over the network */ + if (host->sin6.sin6_family == AF_INET6) + { + /* Format the ICMP Echo Reply packet to send */ + fmticmp6(base->packet, &host->cursize, host->seq, host->index, + base->pid, &host->cookie, host->include_probe_id); + + host->loc_socklen= sizeof(host->loc_sin6); + if (host->response_in) + { + size_t len; + + len= sizeof(host->loc_sin6); + read_response(host->socket, RESP_SOCKNAME, + &len, &host->loc_sin6); + host->loc_socklen= len; + } + else + { + getsockname(host->socket, + (struct sockaddr *)&host->loc_sin6, + &host->loc_socklen); + if (host->resp_file_out) + { + write_response(host->resp_file_out, + RESP_SOCKNAME, host->loc_socklen, + &host->loc_sin6); + } + } + + if (host->response_in) + { + /* Assume the send succeeded */ + nsent= host->cursize+ICMP6_HDRSIZE; + } + else + { + nsent = sendto(host->socket, base->packet, + host->cursize+ICMP6_HDRSIZE, + MSG_DONTWAIT, (struct sockaddr *)&host->sin6, + host->socklen); + } + + } + else + { + /* Format the ICMP Echo Reply packet to send */ + fmticmp4(base->packet, &host->cursize, host->seq, + host->index, base->pid, &host->cookie, + host->include_probe_id); + + host->loc_socklen= sizeof(host->loc_sin6); + getsockname(host->socket, (struct sockaddr *)&host->loc_sin6, + &host->loc_socklen); + + if (host->response_in) + { + /* Assume the send succeeded */ + nsent= host->cursize+ICMP_MINLEN; + } + else + { + nsent = sendto(host->socket, base->packet, + host->cursize+ICMP_MINLEN, + MSG_DONTWAIT, (struct sockaddr *)&host->sin6, + host->socklen); + } + } + + if (nsent > 0) + { + /* Update timestamps and counters */ + host->sentpkts++; + + } + else + { + host->sentpkts++; + host->send_error= 1; + + /* Report the failure and stop */ + ping_cb(PING_ERR_SENDTO, host->cursize, -1, + (struct sockaddr *)&host->sin6, host->socklen, + (struct sockaddr *)&host->loc_sin6, host->loc_socklen, + errno, 0, NULL, + host); + } + + + /* Add the timer to handle no reply condition in the given timeout */ + msecstotv(host->interval, &tv_interval); + if (!host->response_in) + evtimer_add(&host->ping_timer, &tv_interval); + + if (host->response_in) + { + if (host->sin6.sin6_family == AF_INET6) + ready_callback6(0, 0, host); + else + ready_callback4(0, 0, host); + } +} + + +/* The callback to handle timeouts due to destination host unreachable condition */ +static void noreply_callback(int __attribute((unused)) unused, const short __attribute((unused)) event, void *h) +{ + struct pingstate *host = h; + + if (!host->got_reply && !host->send_error) + { + ping_cb(PING_ERR_TIMEOUT, host->cursize, -1, + (struct sockaddr *)&host->sin6, host->socklen, + NULL, 0, host->seq, -1, NULL, host); + + /* Update the sequence number for the next run */ + host->seq = (host->seq + 1) % 256; + } + + ping_xmit(host); +} + +/* + * Called by libevent when the kernel says that the raw socket is ready for reading. + * + * It reads a packet from the wire and attempt to decode and relate ICMP Echo Request/Reply. + * + * To be legal the packet received must be: + * o of enough size (> IPHDR + ICMP_MINLEN) + * o of ICMP Protocol + * o of type ICMP_ECHOREPLY + * o the one we are looking for (matching the same identifier of all the packets the program is able to send) + */ +static void ready_callback4 (int __attribute((unused)) unused, + const short __attribute((unused)) event, void * arg) +{ + struct pingstate *state; + struct pingbase *base; + int nrecv, isDup; + struct sockaddr_in remote; /* responding internet address */ + socklen_t slen = sizeof(struct sockaddr); + struct sockaddr_in *sin4p; + struct sockaddr_in loc_sin4; + struct ip * ip; + struct icmphdr * icmp; + struct evdata * data; + int hlen = 0; + struct timespec now; + state= arg; + base = state->base; + + /* Pointer to relevant portions of the packet (IP, ICMP and user + * data) */ + ip = (struct ip *) base->packet; + + /* Time the packet has been received */ + gettime_mono(&now); + + /* Receive data from the network */ + if (state->response_in) + { + size_t len; + + len= sizeof(base->packet); + read_response(state->socket, RESP_PACKET, + &len, base->packet); + nrecv= len; + + len= sizeof(remote); + read_response(state->socket, RESP_PEERNAME, + &len, &remote); + } + else nrecv = recvfrom(state->socket, base->packet, sizeof(base->packet), MSG_DONTWAIT, (struct sockaddr *) &remote, &slen); + if (nrecv < 0) + { + goto done; + } + + if (state->resp_file_out) + { + write_response(state->resp_file_out, RESP_PACKET, + nrecv, base->packet); + write_response(state->resp_file_out, RESP_PEERNAME, + sizeof(remote), &remote); + } + +#if 0 + { int i; + printf("received:"); + for (i= 0; ipacket[i]); + printf("\n"); + } +#endif + + /* Calculate the IP header length */ + hlen = ip->ip_hl * 4; + + /* Check the IP header */ + if (nrecv < hlen + ICMP_MINLEN + sizeof (struct evdata) || + ip->ip_hl < 5) + { + /* One more too short packet */ + goto done; + } + + /* The ICMP portion */ + icmp = (struct icmphdr *) (base->packet + hlen); + + /* Check the ICMP header to drop unexpected packets due to unrecognized id */ + if (icmp->un.echo.id != (base->pid & 0x0fff)) + { +#if 0 + printf("ready_callback4: bad pid: got %d, expect %d\n", + icmp->un.echo.id, base->pid & 0x0fff); +#endif + goto done; + } + + /* Check the ICMP payload for legal values of the 'index' portion */ + data = (struct evdata *) (base->packet + hlen + ICMP_MINLEN); + if (data->index >= base->tabsiz || base->table[data->index] == NULL) + { +#if 0 + printf("ready_callback4: bad index: got %d\n", + data->index); +#endif + goto done; + } + + /* Get the pointer to the host descriptor in our internal table */ + if (state != base->table[data->index]) + goto done; /* Not for us */ + + /* Make sure we got the right cookie */ + if (memcmp(&state->cookie, &data->cookie, sizeof(state->cookie)) != 0) + { + crondlog(LVL8 "ICMP with wrong cookie"); + goto done; + } + + /* Check for Destination Host Unreachable */ + if (icmp->type == ICMP_ECHO) + { + /* Completely ignore ECHO requests */ + } + else if (icmp->type == ICMP_ECHOREPLY) + { + /* Use the User Data to relate Echo Request/Reply and evaluate the Round Trip Time */ + struct timespec elapsed; /* response time */ + + /* Compute time difference to calculate the round trip */ + elapsed.tv_sec= now.tv_sec - data->ts.tv_sec; + if (now.tv_nsec < data->ts.tv_sec) + { + elapsed.tv_sec--; + now.tv_nsec += 1000000000; + } + elapsed.tv_nsec= now.tv_nsec - data->ts.tv_nsec; + + /* Set destination address of packet as local address */ + sin4p= &loc_sin4; + memset(sin4p, '\0', sizeof(*sin4p)); + sin4p->sin_family= AF_INET; + sin4p->sin_addr= ip->ip_dst; + state->rcvd_ttl= ip->ip_ttl; + + /* Report everything with the wrong sequence number as a dup. + * This is not quite right, it could be a late packet. Do we + * care? + */ + isDup= (ntohs(icmp->un.echo.sequence) != state->seq); + ping_cb(isDup ? PING_ERR_DUP : PING_ERR_NONE, + nrecv - IPHDR - ICMP_MINLEN, nrecv, + (struct sockaddr *)&state->sin6, state->socklen, + (struct sockaddr *)&loc_sin4, sizeof(loc_sin4), + ntohs(icmp->un.echo.sequence), ip->ip_ttl, &elapsed, + state); + + if (!isDup) + { + state->got_reply= 1; + + /* Update the sequence number for the next run */ + state->seq = (state->seq + 1) % 256; + } + } + else + { + /* Handle this condition exactly as the request has expired */ + noreply_callback (-1, -1, state); + } + +done: + if (state->response_in) + noreply_callback (-1, -1, state); +} + +/* + * Called by libevent when the kernel says that the raw socket is ready for reading. + * + * It reads a packet from the wire and attempt to decode and relate ICMP Echo Request/Reply. + * + * To be legal the packet received must be: + * o of enough size (> IPHDR + ICMP_MINLEN) + * o of ICMP Protocol + * o of type ICMP_ECHOREPLY + * o the one we are looking for (matching the same identifier of all the packets the program is able to send) + */ +static void ready_callback6 (int __attribute((unused)) unused, + const short __attribute((unused)) event, void * arg) +{ + struct pingbase *base; + struct pingstate *state; + + int nrecv, isDup; + size_t icmp_len; + struct sockaddr_in6 remote; /* responding internet address */ + + struct icmp6_hdr *icmp; + struct evdata * data; + + struct timespec now; + struct cmsghdr *cmsgptr; + struct sockaddr_in6 *sin6p; + struct msghdr msg; + struct sockaddr_in6 loc_sin6; + struct iovec iov[1]; + char cmsgbuf[256]; + + state= arg; + base = state->base; + + /* Pointer to relevant portions of the packet (IP, ICMP and user + * data) */ + icmp = (struct icmp6_hdr *) base->packet; + icmp_len= offsetof(struct icmp6_hdr, icmp6_data16[2]); + data = (struct evdata *) (base->packet + icmp_len); + + /* Time the packet has been received */ + gettime_mono(&now); + + iov[0].iov_base= base->packet; + iov[0].iov_len= sizeof(base->packet); + msg.msg_name= &remote; + msg.msg_namelen= sizeof(remote); + msg.msg_iov= iov; + msg.msg_iovlen= 1; + msg.msg_control= cmsgbuf; + msg.msg_controllen= sizeof(cmsgbuf); + msg.msg_flags= 0; /* Not really needed */ + + /* Receive data from the network */ + if (state->response_in) + { + size_t len; + + len= sizeof(base->packet); + read_response(state->socket, + RESP_PACKET, &len, base->packet); + nrecv= len; + len= sizeof(remote); + read_response(state->socket, + RESP_PEERNAME, &len, &remote); + + /* Do not try to fuzz the cmsgbuf. We assume stuff returned by + * the kernel can be trusted. + */ + memset(cmsgbuf, '\0', sizeof(cmsgbuf)); + } + else + nrecv= recvmsg(state->socket, &msg, MSG_DONTWAIT); + + if (nrecv < 0) + { + goto done; + } + + if (state->resp_file_out) + { + write_response(state->resp_file_out, + RESP_PACKET, nrecv, base->packet); + write_response(state->resp_file_out, + RESP_PEERNAME, sizeof(remote), &remote); + } + + if (nrecv < icmp_len+sizeof(struct evdata)) + { + // printf("ready_callback6: short packet\n"); + goto done; + } + + /* Check the ICMP header to drop unexpected packets due to + * unrecognized id + */ + if (icmp->icmp6_id != (base->pid & 0xffff)) + { + goto done; + } + + /* Check the ICMP payload for legal values of the 'index' portion */ + if (data->index >= base->tabsiz || base->table[data->index] == NULL) + { + goto done; + } + + /* Get the pointer to the host descriptor in our internal table */ + if (state != base->table[data->index]) + goto done; /* Not for us */ + + /* Make sure we got the right cookie */ + if (memcmp(&state->cookie, &data->cookie, sizeof(state->cookie)) != 0) + { + crondlog(LVL8 "ICMP with wrong cookie"); + goto done; + } + + /* Check for Destination Host Unreachable */ + if (icmp->icmp6_type == ICMP6_ECHO_REQUEST) + { + /* Completely ignore echo requests */ + } + else if (icmp->icmp6_type == ICMP6_ECHO_REPLY) + { + /* Use the User Data to relate Echo Request/Reply and evaluate the Round Trip Time */ + struct timespec elapsed; /* response time */ + + /* Compute time difference to calculate the round trip */ + elapsed.tv_sec= now.tv_sec - data->ts.tv_sec; + if (now.tv_nsec < data->ts.tv_sec) + { + elapsed.tv_sec--; + now.tv_nsec += 1000000000; + } + elapsed.tv_nsec= now.tv_nsec - data->ts.tv_nsec; + + /* Set destination address of packet as local address */ + memset(&loc_sin6, '\0', sizeof(loc_sin6)); + if (state->response_in) + { + size_t len; + + len= sizeof(loc_sin6); + read_response(state->socket, RESP_DSTADDR, &len, &loc_sin6); + len= sizeof(state->rcvd_ttl); + read_response(state->socket, RESP_TTL, &len, + &state->rcvd_ttl); + } + else + { + for (cmsgptr= CMSG_FIRSTHDR(&msg); cmsgptr; + cmsgptr= CMSG_NXTHDR(&msg, cmsgptr)) + { + if (cmsgptr->cmsg_len == 0) + break; /* Can this happen? */ + if (cmsgptr->cmsg_level == IPPROTO_IPV6 && + cmsgptr->cmsg_type == IPV6_PKTINFO) + { + sin6p= &loc_sin6; + sin6p->sin6_family= AF_INET6; + sin6p->sin6_addr= ((struct in6_pktinfo *) + CMSG_DATA(cmsgptr))->ipi6_addr; + } + if (cmsgptr->cmsg_level == IPPROTO_IPV6 && + cmsgptr->cmsg_type == IPV6_HOPLIMIT) + { + state->rcvd_ttl= *(int *)CMSG_DATA(cmsgptr); + } + } + } + if (state->resp_file_out) + { + write_response(state->resp_file_out, + RESP_DSTADDR, sizeof(*sin6p), sin6p); + write_response(state->resp_file_out, + RESP_TTL, sizeof(state->rcvd_ttl), + &state->rcvd_ttl); + } + + /* Report everything with the wrong sequence number as a dup. + * This is not quite right, it could be a late packet. Do we + * care? + */ + isDup= (ntohs(icmp->icmp6_seq) != state->seq); + ping_cb(isDup ? PING_ERR_DUP : PING_ERR_NONE, + nrecv - ICMP6_HDRSIZE, nrecv + sizeof(struct ip6_hdr), + (struct sockaddr *)&state->sin6, state->socklen, + (struct sockaddr *)&loc_sin6, sizeof(loc_sin6), + ntohs(icmp->icmp6_seq), state->rcvd_ttl, &elapsed, + state); + + /* Update the sequence number for the next run */ + state->seq = (state->seq + 1) % 256; + + if (!isDup) + state->got_reply= 1; + } + else + /* Handle this condition exactly as the request has expired */ + noreply_callback (-1, -1, state); + +done: + if (state->response_in) + noreply_callback (-1, -1, state); +} + + +static void *ping_init(int __attribute((unused)) argc, char *argv[], + void (*done)(void *state, int error)) +{ + static struct pingbase *ping_base; + + int i, r, fd, newsiz, include_probe_id, delay_name_res; + uint32_t opt; + unsigned pingcount; /* must be int-sized */ + unsigned size, interval; + sa_family_t af; + const char *hostname; + char *str_Atlas; + char *str_bundle; + char *out_filename; + char *interface; + char *response_in, *response_out; + char *validated_response_in= NULL; + char *validated_response_out= NULL; + char *validated_out_filename= NULL; + struct pingstate *state; + len_and_sockaddr *lsa; + FILE *fh; + struct cookie cookie; + + if (!ping_base) + { + ping_base = malloc(sizeof(*ping_base)); + if (ping_base == NULL) + return (NULL); + memset(ping_base, 0, sizeof(*ping_base)); + + ping_base->event_base = EventBase; + + ping_base->tabsiz= 10; + ping_base->table= xzalloc(ping_base->tabsiz * + sizeof(*ping_base->table)); + + /* Set default values */ + ping_base->pid = getpid(); + + ping_base->done= 0; + } + + /* Get cookie */ + fd= open("/dev/urandom", O_RDONLY); + if (fd == -1) + { + crondlog(LVL8 "unable to open /dev/urandom"); + return NULL; + } + r= read(fd, &cookie, sizeof(cookie)); + close(fd); + if (r != sizeof(cookie)) + { + crondlog(LVL8 "unable to read from /dev/urandom"); + return NULL; + } + + /* Parse arguments */ + pingcount= 3; + size= 0; + str_Atlas= NULL; + str_bundle= NULL; + out_filename= NULL; + interval= DEFAULT_PING_INTERVAL; + interface= NULL; + response_in= NULL; + response_out= NULL; + /* exactly one argument needed; -c NUM */ + opt_complementary = "=1:c+:s+:i+"; + opt = getopt32(argv, PING_OPT_STRING, &pingcount, &size, + &str_Atlas, &str_bundle, &out_filename, &interval, &interface, + &response_in, &response_out); + hostname = argv[optind]; + + if (opt == 0xffffffff) + { + crondlog(LVL8 "bad options"); + return NULL; + } + + if (interval < 1 || interval > 60000) + { + crondlog(LVL8 "bad interval"); + return NULL; + } + + if (response_in) + { + validated_response_in= rebased_validated_filename(ATLAS_SPOOLDIR, + response_in, ATLAS_FUZZING_REL); + if (!validated_response_in) + { + crondlog(LVL8 "insecure fuzzing file '%s'", response_in); + goto err; + } + } + if (response_out) + { + validated_response_out= rebased_validated_filename(ATLAS_SPOOLDIR, + response_out, ATLAS_FUZZING_REL); + if (!validated_response_out) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + response_out); + goto err; + } + } + + if (out_filename) + { + validated_out_filename= rebased_validated_filename(ATLAS_SPOOLDIR, + out_filename, SAFE_PREFIX_REL); + if (!validated_out_filename) + { + crondlog(LVL8 "insecure file '%s'", out_filename); + goto err; + } + fh= fopen(validated_out_filename, "a"); + if (!fh) + { + crondlog(LVL8 "ping: unable to append to '%s'", + validated_out_filename); + goto err; + } + fclose(fh); + } + + if (str_Atlas) + { + if (!validate_atlas_id(str_Atlas)) + { + crondlog(LVL8 "bad atlas ID '%s'", str_Atlas); + goto err; + } + } + if (str_bundle) + { + if (!validate_atlas_id(str_bundle)) + { + crondlog(LVL8 "bad bundle ID '%s'", str_bundle); + goto err; + } + } + + if (opt & opt_4) + af= AF_INET; + else + af= AF_INET6; + include_probe_id= !!(opt & opt_p); + + /* Keep -r in case there is still code using that option */ + delay_name_res= !!(opt & opt_r); + delay_name_res= 1; /* Always enabled, leave the old code in + * place for now. + */ + /* Introduce a new option to use the libc stub resolver */ + if (opt & opt_e) + delay_name_res= 0; + + if (!delay_name_res) + { + /* Attempt to resolv 'name' */ + lsa= host_and_af2sockaddr(hostname, 0, af); + if (!lsa) + goto err; + + if (lsa->len > sizeof(state->sin6)) + { + free(lsa); + goto err; + } + + if (atlas_check_addr(&lsa->u.sa, lsa->len) == -1) + { + free(lsa); + goto err; + } + } + + state= xzalloc(sizeof(*state)); + + memset(&state->loc_sin6, '\0', sizeof(state->loc_sin6)); + state->loc_socklen= 0; + if (!delay_name_res) + { + state->socklen= lsa->len; + memcpy(&state->sin6, &lsa->u.sa, state->socklen); + free(lsa); lsa= NULL; + } + + state->base = ping_base; + state->af= af; + state->include_probe_id= include_probe_id; + state->delay_name_res= delay_name_res; + state->interval= interval; + state->interface= interface ? strdup(interface) : NULL; + state->socket= -1; + state->response_in= validated_response_in; + validated_response_in= NULL; + state->response_out= validated_response_out; + validated_response_out= NULL; + + if (state->response_in || state->response_out) + { + ping_base->pid= 42; + memset(&cookie, 42, sizeof(cookie)); + } + + state->seq = 1; + + /* Define here the callbacks to ping the host and to handle no reply + * timeouts + */ + evtimer_assign(&state->ping_timer, state->base->event_base, + noreply_callback, state); + + for (i= 0; itabsiz; i++) + { + if (ping_base->table[i] == NULL) + break; + } + if (i >= ping_base->tabsiz) + { + newsiz= 2*ping_base->tabsiz; + ping_base->table= xrealloc(ping_base->table, + newsiz*sizeof(*ping_base->table)); + for (i= ping_base->tabsiz; itable[i]= NULL; + i= ping_base->tabsiz; + ping_base->tabsiz= newsiz; + } + state->index= i; + ping_base->table[i]= state; + + state->pingcount= pingcount; + state->atlas= str_Atlas ? strdup(str_Atlas) : NULL; + state->bundle_id= str_bundle ? strdup(str_bundle) : NULL; + state->hostname= strdup(hostname); + state->out_filename= validated_out_filename; + validated_out_filename= NULL; + + state->result= NULL; + state->reslen= 0; + state->resmax= 0; + state->cookie= cookie; + + state->maxsize = size; + state->base->done= done; + + return state; + +err: + if (validated_response_in) free(validated_response_in); + if (validated_response_out) free(validated_response_out); + if (validated_out_filename) free(validated_out_filename); + + return NULL; +} + +static void ping_start2(void *state) +{ + int p_proto, on, fd; + struct pingstate *pingstate; + char line[80]; + + pingstate= state; + + pingstate->sentpkts= 0; + pingstate->cursize= pingstate->maxsize; + + pingstate->send_error= 0; + pingstate->got_reply= 0; + pingstate->no_dst= 0; + pingstate->no_src= 0; + pingstate->error= 0; + + if (pingstate->af == AF_INET) + { + /* Check if the ICMP protocol is available on this system */ + p_proto= IPPROTO_ICMP; + + if (!pingstate->response_in) + { + if ((fd = socket(AF_INET, SOCK_RAW, p_proto)) == -1) { + /* Create an endpoint for communication + * using raw socket for ICMP calls */ + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" + DBQ(socket failed: %s) " }", + strerror(errno)); + add_str(pingstate, line); + report(pingstate); + if (pingstate->base->done) + pingstate->base->done(pingstate, 1); + return; + } + pingstate->socket= fd; + } + + /* Define the callback to handle ICMP Echo Reply and add the + * raw file descriptor to those monitored for read events */ + pingstate->event_is_init= 1; + event_assign(&pingstate->event, pingstate->base->event_base, + pingstate->socket, EV_READ | EV_PERSIST, + ready_callback4, state); + } + else + { + /* Check if the ICMP6 protocol is available on this system */ + p_proto= IPPROTO_ICMPV6; + + if (!pingstate->response_in) + { + if ((fd = socket(AF_INET6, SOCK_RAW, p_proto)) == -1) { + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" + DBQ(socket failed: %s) " }", + strerror(errno)); + add_str(pingstate, line); + report(pingstate); + if (pingstate->base->done) + pingstate->base->done(pingstate, 1); + return; + } + pingstate->socket= fd; + + } + + on = 1; + setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &on, + sizeof(on)); + + on = 1; + setsockopt(fd, IPPROTO_IPV6, IPV6_RECVHOPLIMIT, &on, + sizeof(on)); + + if (!pingstate->response_in) + { + /* Define the callback to handle ICMP Echo Reply and + * add the raw file descriptor to those monitored + * for read events */ + pingstate->event_is_init= 1; + event_assign(&pingstate->event, + pingstate->base->event_base, + pingstate->socket, EV_READ | EV_PERSIST, + ready_callback6, state); + } + } + + evutil_make_socket_nonblocking(pingstate->socket); + + if (pingstate->interface) + { + if (bind_interface(pingstate->socket, pingstate->af, + pingstate->interface) == -1) + { + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" + DBQ(bind to interface failed) + " }"); + add_str(pingstate, line); + report(pingstate); + if (pingstate->base->done) + pingstate->base->done(pingstate, 1); + return; + } + } + + if (!pingstate->response_in && + connect(pingstate->socket, + (struct sockaddr *)&pingstate->sin6, + pingstate->socklen) == -1) + { + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" DBQ(connect failed: %s) + " }", strerror(errno)); + add_str(pingstate, line); + report(pingstate); + if (pingstate->base->done) + pingstate->base->done(pingstate, 1); + return; + } + + pingstate->loc_socklen= sizeof(pingstate->loc_sin6); + if (pingstate->response_in) + { + size_t len; + + len= sizeof(pingstate->loc_sin6); + read_response(pingstate->socket, RESP_SOCKNAME, &len, + &pingstate->loc_sin6); + pingstate->loc_socklen= len; + } + else + { + getsockname(pingstate->socket, + (struct sockaddr *)&pingstate->loc_sin6, + &pingstate->loc_socklen); + if (pingstate->resp_file_out) + { + write_response(pingstate->resp_file_out, + RESP_SOCKNAME, pingstate->loc_socklen, + &pingstate->loc_sin6); + } + } + + if (!pingstate->response_in) + event_add(&pingstate->event, NULL); + + ping_xmit(pingstate); +} + +static void dns_cb(int result, struct evutil_addrinfo *res, void *ctx) +{ + int r; + size_t tmp_len; + struct pingstate *env; + struct timespec now, elapsed; + double nsecs; + struct addrinfo tmp_res; + struct sockaddr_storage tmp_sockaddr; + + env= ctx; + + if (!env->dnsip) + { + crondlog(LVL7 + "dns_cb: in dns_cb but not doing dns at this time"); + if (res) + evutil_freeaddrinfo(res); + return; + } + + gettime_mono(&now); + elapsed.tv_sec= now.tv_sec - env->start_time.tv_sec; + if (now.tv_nsec < env->start_time.tv_sec) + { + elapsed.tv_sec--; + now.tv_nsec += 1000000000; + } + elapsed.tv_nsec= now.tv_nsec - env->start_time.tv_nsec; + nsecs= (elapsed.tv_sec * 1e9 + elapsed.tv_nsec); + env->ttr= nsecs/1e6; + + env->dnsip= 0; + + if (result != 0) + { + ping_cb(PING_ERR_DNS, env->maxsize, -1, + (struct sockaddr *)evutil_gai_strerror(result), 0, + (struct sockaddr *)NULL, 0, + 0, 0, NULL, + env); + ping_cb(PING_ERR_DONE, env->maxsize, -1, + (struct sockaddr *)NULL, 0, + (struct sockaddr *)NULL, 0, + 0, 0, NULL, + env); + if (env->base->done) + env->base->done(env, 1); + return; + } + + env->dns_res= res; + env->dns_curr= res; + + if (env->response_in) + { + env->socket= open(env->response_in, O_RDONLY); + if (env->socket == -1) + { + crondlog(DIE9 "unable to open '%s'", + env->response_in); + } + + tmp_len= sizeof(tmp_res); + read_response(env->socket, RESP_ADDRINFO, &tmp_len, &tmp_res); + assert(tmp_len == sizeof(tmp_res)); + tmp_len= sizeof(tmp_sockaddr); + read_response(env->socket, RESP_ADDRINFO_SA, + &tmp_len, &tmp_sockaddr); + assert(tmp_len == tmp_res.ai_addrlen); + tmp_res.ai_addr= (struct sockaddr *)&tmp_sockaddr; + env->dns_curr= &tmp_res; + } + + while (env->dns_curr) + { + if (env->response_out) + { + write_response(env->resp_file_out, RESP_ADDRINFO, + sizeof(*env->dns_curr), env->dns_curr); + write_response(env->resp_file_out, RESP_ADDRINFO_SA, + env->dns_curr->ai_addrlen, + env->dns_curr->ai_addr); + } + + env->socklen= env->dns_curr->ai_addrlen; + if (env->socklen > sizeof(env->sin6)) + break; /* Weird */ + memcpy(&env->sin6, env->dns_curr->ai_addr, + env->socklen); + + r= atlas_check_addr((struct sockaddr *)&env->sin6, + env->socklen); + if (r == -1) + { + ping_cb(PING_ERR_BAD_ADDR, env->maxsize, -1, + NULL, 0, + (struct sockaddr *)NULL, 0, + 0, 0, NULL, + env); + ping_cb(PING_ERR_DONE, env->maxsize, -1, + (struct sockaddr *)NULL, 0, + (struct sockaddr *)NULL, 0, + 0, 0, NULL, + env); + if (env->base->done) + env->base->done(env, 1); + return; + } + + ping_start2(env); + + return; + } + + /* Something went wrong */ + if (!env->response_in) + { + evutil_freeaddrinfo(env->dns_res); + env->dns_res= NULL; + env->dns_curr= NULL; + } + ping_cb(PING_ERR_DNS_NO_ADDR, env->cursize, -1, + (struct sockaddr *)NULL, 0, + (struct sockaddr *)NULL, 0, + 0, 0, NULL, + env); + if (env->base->done) + env->base->done(env, 1); +} + +static void ping_start(void *state) +{ + struct pingstate *pingstate; + struct evutil_addrinfo hints; + + pingstate= state; + + if (pingstate->busy) + return; + + if (pingstate->result) free(pingstate->result); + pingstate->resmax= 80; + pingstate->result= xmalloc(pingstate->resmax); + pingstate->reslen= 0; + pingstate->resp_file_out= NULL; + + pingstate->first= 1; + pingstate->got_reply= 0; + pingstate->no_dst= 1; + pingstate->busy= 1; + pingstate->event_is_init= 0; + + pingstate->maxpkts= pingstate->pingcount; + + if (pingstate->response_out) + { + pingstate->resp_file_out= fopen(pingstate->response_out, "w"); + if (!pingstate->resp_file_out) + { + crondlog(DIE9 "unable to write to '%s'", + pingstate->response_out); + } + } + + if (!pingstate->delay_name_res) + { + ping_start2(state); + return; + } + + pingstate->dnsip= 1; + gettime_mono(&pingstate->start_time); + if (pingstate->response_in) + { + dns_cb(0, 0, pingstate); + } + else + { + memset(&hints, '\0', sizeof(hints)); + hints.ai_socktype= SOCK_DGRAM; + hints.ai_family= pingstate->af; + (void) evdns_getaddrinfo(DnsBase, pingstate->hostname, NULL, + &hints, dns_cb, pingstate); + } +} + +static int ping_delete(void *state) +{ + struct pingstate *pingstate; + struct pingbase *base; + + pingstate= state; + + if (pingstate->busy) + { + crondlog(LVL8 + "ping_delete: not deleting, busy for state %p, '%s'", + pingstate, pingstate->hostname); + return 0; + } + + base= pingstate->base; + + evtimer_del(&pingstate->ping_timer); + + base->table[pingstate->index]= NULL; + + free(pingstate->atlas); + pingstate->atlas= NULL; + free(pingstate->bundle_id); + pingstate->bundle_id= NULL; + free(pingstate->interface); + pingstate->interface= NULL; + free(pingstate->hostname); + pingstate->hostname= NULL; + free(pingstate->out_filename); + pingstate->out_filename= NULL; + + free(pingstate); + + return 1; +} + +struct testops ping_ops = { ping_init, ping_start, ping_delete }; + diff --git a/probe-busybox/eperd/readresolv.c b/probe-busybox/eperd/readresolv.c new file mode 100644 index 00000000..c90deab1 --- /dev/null +++ b/probe-busybox/eperd/readresolv.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#define LINEL (INET6_ADDRSTRLEN * 2) +#include "libbb.h" +#include "resolv.h" +#include "eperd.h" +#include "readresolv.h" +#include + +static void nameserver_ip_add (char **nsentry, char *ip_as_string) +{ + *nsentry= strdup(ip_as_string); +} + +static int resolv_conf_parse_line (char **nsentry, char *line) +{ + +#define NEXT_TOKEN strtok_r(NULL, delims, &strtok_state) + char *strtok_state; + static const char *const delims = " \t"; + char *const first_token = strtok_r(line, delims, &strtok_state); + + if (!first_token) return 0; + + if (!strcmp(first_token, "nameserver")) { + char *const nameserver = NEXT_TOKEN; + if (nameserver) { + if(nameserver[(strlen(nameserver) - 1)] == '\n') + nameserver[(strlen(nameserver) - 1)] = '\0'; + nameserver_ip_add(nsentry, nameserver); + //printf("AA added nameserver %s\n", nsentry); + return 1; + } + } + return 0; +} + +void get_local_resolvers(char *nslist[MAXNS], int *resolv_max, char *ifname) +{ + +#ifndef RESOLV_CONF +#define RESOLV_CONF "/etc/resolv.conf" +#endif + char buf[LINEL]; + char filename[80]; + char *buf_start; + int i = 0; + struct stat sb; + FILE *R; + + if (ifname) + { + snprintf(filename, sizeof(filename), "%s.%s", + RESOLV_CONF, ifname); + + /* Check if it exists */ + if (stat(filename, &sb) == -1) + { + crondlog(LVL8 "get_local_resolvers: stat of %s failed: %s", + filename, strerror(errno)); + /* Fall back to resolv.conf */ + strlcpy(filename, RESOLV_CONF, sizeof(filename)); + } + } + else + { + /* Just use resolv.conf */ + strlcpy(filename, RESOLV_CONF, sizeof(filename)); + } + + // crondlog(LVL8 "get_local_resolvers: using %s", filename); + + R = fopen (filename, "r"); + if (R != NULL) { + while ( (fgets (buf, LINEL, R)) && (i < MAXNS)) { + buf_start = buf; + if(resolv_conf_parse_line(&nslist[i], buf) ) { + // crondlog(LVL5 "parsed file %s , line %s i=%d", filename, buf_start, i); + i++; + } + else + { + // crondlog(LVL5 "ERROR failed to parse from %s i=%d, line %s", filename, i, buf_start); + } + } + fclose (R); + } + + *resolv_max = i; +} diff --git a/probe-busybox/eperd/readresolv.h b/probe-busybox/eperd/readresolv.h new file mode 100644 index 00000000..2223a6d3 --- /dev/null +++ b/probe-busybox/eperd/readresolv.h @@ -0,0 +1,5 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ +void get_local_resolvers(char *nslist[MAXNS], int *resolv_max, char *infname); diff --git a/probe-busybox/eperd/sslgetcert.c b/probe-busybox/eperd/sslgetcert.c new file mode 100644 index 00000000..b69c6afd --- /dev/null +++ b/probe-busybox/eperd/sslgetcert.c @@ -0,0 +1,1946 @@ +/* +sslgetcert.c -- libevent-based version of sslgetcert +Copyright (c) 2013-2014 RIPE NCC +Created: April 2013 by Philip Homburg for RIPE NCC +*/ + +#include "libbb.h" +#include +#include +#include +#include +#include +#include +#include + +#include "eperd.h" +#include "tcputil.h" +#include "atlas_path.h" + +#define SAFE_PREFIX_IN ATLAS_DATA_OUT +#define SAFE_PREFIX_OUT_REL ATLAS_DATA_NEW_REL + +#define CONN_TO 5 + +#define ENV2STATE(env) \ + ((struct state *)((char *)env - offsetof(struct state, tu_env))) + +#define DBQ(str) "\"" #str "\"" + +#define MAX_LINE_LEN 2048 /* We don't deal with lines longer than this */ +#define POST_BUF_SIZE 2048 /* Big enough to be efficient? */ + +#define RESP_PACKET 1 +#define RESP_SOCKNAME 2 +#define RESP_DSTADDR 3 + +static struct option longopts[]= +{ + { NULL, } +}; + +enum readstate { READ_HELLO, READ_CERTS, READ_DONE }; +enum writestate { WRITE_HELLO, WRITE_DONE }; + +struct hgbase +{ + struct event_base *event_base; + + struct state **table; + int tabsiz; + + /* For standalone sslgetcert. Called when a sslgetcert instance is + * done. Just one pointer for all instances. It is up to the caller + * to keep it consistent. + */ + void (*done)(void *state, int error); +}; + +struct buf +{ + size_t offset; + size_t size; + size_t maxsize; + char *buf; + struct bufferevent *bev; +}; + +struct msgbuf +{ + struct buf *inbuf; + struct buf *outbuf; + + struct buf buffer; +}; + +struct state +{ + /* Parameters */ + char *output_file; + char *atlas; + char *bundle; + char *infname; + char *response_in; /* Fuzzing */ + char *response_out; + char only_v4; + char only_v6; + char major_version; + char minor_version; + + /* State */ + char busy; + struct tu_env tu_env; + char dnserr; + char connecting; + char *hostname; + char *portname; + char *sni; + struct bufferevent *bev; + enum readstate readstate; + enum writestate writestate; + time_t gstart; + struct timespec start; + struct timespec t_connect; + double resptime; + FILE *post_fh; + char *post_buf; + char recv_major; + char recv_minor; + unsigned server_cipher; + + struct buf inbuf; + struct msgbuf msginbuf; + + char *line; + size_t linemax; /* Allocated size of line */ + size_t linelen; /* Current amount of data in line */ + size_t lineoffset; /* Offset in line where to start processing */ + + /* Base and index in table */ + struct hgbase *base; + int index; + + struct sockaddr_in6 sin6; + socklen_t socklen; + struct sockaddr_in6 loc_sin6; + socklen_t loc_socklen; + + char *result; + size_t reslen; + size_t resmax; + + FILE *resp_file; /* Fuzzing */ +}; + +#define BUF_CHUNK 4096 + +#define MSG_ALERT 21 +#define MSG_HANDSHAKE 22 +#define HS_CLIENT_HELLO 1 +#define HS_SERVER_HELLO 2 +#define HS_CERTIFICATE 11 + +struct hsbuf +{ + struct buf buffer; +}; + +#define URANDOM_DEV "/dev/urandom" + +static struct hgbase *hg_base; + +static int eat_server_hello(struct state *state); +static int eat_certificate(struct state *state); +static void report(struct state *state); +static void add_str(struct state *state, const char *str); +static void timeout_callback(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s); + +static void buf_init(struct buf *buf, struct bufferevent *bev) +{ + buf->maxsize= 0; + buf->size= 0; + buf->offset= 0; + buf->buf= NULL; + buf->bev= bev; +} + +static void buf_add(struct buf *buf, const void *data, size_t len) +{ + size_t maxsize; + void *newbuf; + + if (buf->size+len <= buf->maxsize) + { + /* Easy case, just add data */ + memcpy(buf->buf+buf->size, data, len); + buf->size += len; + return; + } + + /* Just get a new buffer */ + maxsize= buf->size-buf->offset + len + BUF_CHUNK; + newbuf= malloc(maxsize); + if (!newbuf) + { + fprintf(stderr, "unable to allocate %ld bytes\n", (long)maxsize); + exit(1); + } + + if (buf->offset < buf->size) + { + /* Copy existing data */ + memcpy(newbuf, buf->buf+buf->offset, buf->size-buf->offset); + buf->size -= buf->offset; + buf->offset= 0; + } + else + { + buf->size= buf->offset= 0; + } + buf->maxsize= maxsize; + free(buf->buf); + buf->buf= newbuf; + + memcpy(buf->buf+buf->size, data, len); + buf->size += len; +} + +static void buf_add_b64(struct buf *buf, void *data, size_t len) +{ + char b64[]= + "ABCDEFGHIJKLMNOP" + "QRSTUVWXYZabcdef" + "ghijklmnopqrstuv" + "wxyz0123456789+/"; + int i; + uint8_t *p; + uint32_t v; + char str[4]; + + p= data; + + for (i= 0; i+3 <= len; i += 3, p += 3) + { + v= (p[0] << 16) + (p[1] << 8) + p[2]; + str[0]= b64[(v >> 18) & 63]; + str[1]= b64[(v >> 12) & 63]; + str[2]= b64[(v >> 6) & 63]; + str[3]= b64[(v >> 0) & 63]; + buf_add(buf, str, 4); + if (i % 48 == 45) + buf_add(buf, "\n", 1); + } + switch(len-i) + { + case 0: break; /* Nothing to do */ + case 1: + v= (p[0] << 16); + str[0]= b64[(v >> 18) & 63]; + str[1]= b64[(v >> 12) & 63]; + str[2]= '='; + str[3]= '='; + buf_add(buf, str, 4); + break; + case 2: + v= (p[0] << 16) + (p[1] << 8); + str[0]= b64[(v >> 18) & 63]; + str[1]= b64[(v >> 12) & 63]; + str[2]= b64[(v >> 6) & 63]; + str[3]= '='; + buf_add(buf, str, 4); + break; + default: + fprintf(stderr, "bad state in buf_add_b64"); + return; + } +} + +static int buf_read(struct state *state, struct buf *buf) +{ + int r; + size_t len, maxsize; + void *newbuf; + + if (buf->size >= buf->maxsize) + { + if (buf->size-buf->offset + BUF_CHUNK <= buf->maxsize) + { + /* The buffer is big enough, just need to compact */ + len= buf->size-buf->offset; + if (len != 0) + { + memmove(buf->buf, &buf->buf[buf->offset], + len); + } + buf->size -= buf->offset; + buf->offset= 0; + } + else + { + maxsize= buf->size-buf->offset + BUF_CHUNK; + newbuf= malloc(maxsize); + if (!newbuf) + { + fprintf(stderr, "unable to allocate %lu bytes", + (unsigned long)maxsize); + errno= ENOMEM; + return -1; + } + buf->maxsize= maxsize; + + if (buf->size > buf->offset) + { + memcpy(newbuf, buf->buf+buf->offset, + buf->size-buf->offset); + buf->size -= buf->offset; + buf->offset= 0; + } + else + { + buf->size= buf->offset= 0; + } + free(buf->buf); + buf->buf= newbuf; + } + } + + if (state->response_in) + { + size_t tmplen; + + tmplen= buf->maxsize-buf->size; + read_response_file(state->resp_file, RESP_PACKET, + &tmplen, buf->buf+buf->size); + r= tmplen; + } + else + { + r= bufferevent_read(buf->bev, + buf->buf+buf->size, buf->maxsize-buf->size); + } + if (r > 0) + { + if (state->response_out) + { + write_response(state->resp_file, RESP_PACKET, + r, buf->buf+buf->size); + } + buf->size += r; + return 0; + } + if (r == 0) + { + errno= EAGAIN; + return -1; + } + fprintf(stderr, "read error: %s", + r == 0 ? "eof" : strerror(errno)); + return -1; +} + +static int buf_write(struct buf *buf) +{ + int r; + size_t len; + struct evbuffer *output; + + output= bufferevent_get_output(buf->bev); + while (buf->offset < buf->size) + { + len= buf->size - buf->offset; + r= evbuffer_add(output, buf->buf+buf->offset, len); + if (r >= 0) + { + buf->offset += len; + continue; + } + fprintf(stderr, "write to %p failed: %s\n", + buf->bev, r == 0 ? "eof" : strerror(errno)); + return -1; + } + return 0; +} + +static void buf_cleanup(struct buf *buf) +{ + free(buf->buf); + buf->offset= buf->size= buf->maxsize= 0; +} + +static void msgbuf_init(struct msgbuf *msgbuf, + struct buf *inbuf, struct buf *outbuf) +{ + buf_init(&msgbuf->buffer, NULL); + msgbuf->inbuf= inbuf; + msgbuf->outbuf= outbuf; +} + +static void msgbuf_add(struct msgbuf *msgbuf, void *buf, size_t size) +{ + buf_add(&msgbuf->buffer, buf, size); +} + +static int msgbuf_read(struct state *state, struct msgbuf *msgbuf, int *typep, + char *majorp, char *minorp) +{ + int r; + size_t len; + uint8_t *p; + + for(;;) + { + if (msgbuf->inbuf->size - msgbuf->inbuf->offset < 5) + { + r= buf_read(state, msgbuf->inbuf); + if (r < 0) + { + fprintf(stderr, + "msgbuf_read: buf_read failed\n"); + return -1; + } + continue; + } + p= (uint8_t *)msgbuf->inbuf->buf+msgbuf->inbuf->offset; + *typep= p[0]; + *majorp= p[1]; + *minorp= p[2]; + len= (p[3] << 8) + p[4]; + /* Note that buf_read may reallocate msgbuf->inbuf->buf, + * which invalidates p. For this reason, after buf_read + * either return to the caller, or use 'continue' to + * restart at the top of the loop. + */ + if (msgbuf->inbuf->size - msgbuf->inbuf->offset < 5 + len) + { + r= buf_read(state, msgbuf->inbuf); + if (r < 0) + { + if (errno != EAGAIN) + { + fprintf(stderr, + "msgbuf_read: buf_read failed: %s\n", + strerror(errno)); + } + return -1; + } + continue; + } + + /* Move the data to the msg buffer */ + msgbuf_add(msgbuf, msgbuf->inbuf->buf+msgbuf->inbuf->offset+5, + len); + msgbuf->inbuf->offset += 5+len; + break; + } + return 0; +} + +static void msgbuf_final(struct msgbuf *msgbuf, int type) +{ + uint8_t c; + size_t len; + + while (msgbuf->buffer.offset < msgbuf->buffer.size) + { + len= msgbuf->buffer.size-msgbuf->buffer.offset; + if (len > 0x4000) + len= 0x4000; + + c= type; + buf_add(msgbuf->outbuf, &c, 1); + + c= 3; + buf_add(msgbuf->outbuf, &c, 1); + + c= 0; + buf_add(msgbuf->outbuf, &c, 1); + + c= len >> 8; + buf_add(msgbuf->outbuf, &c, 1); + + c= len; + buf_add(msgbuf->outbuf, &c, 1); + + buf_add(msgbuf->outbuf, + msgbuf->buffer.buf + msgbuf->buffer.offset, len); + + msgbuf->buffer.offset += len; + } +} + +static void msgbuf_cleanup(struct msgbuf *msgbuf) +{ + buf_cleanup(&msgbuf->buffer); +} + +static void hsbuf_init(struct hsbuf *hsbuf) +{ + buf_init(&hsbuf->buffer, NULL); +} + +static void hsbuf_add(struct hsbuf *hsbuf, const void *buf, size_t len) +{ + buf_add(&hsbuf->buffer, buf, len); +} + +static void hsbuf_add_u16(struct hsbuf *hsbuf, unsigned u16) +{ + uint8_t c; + + c= (u16 >> 8); + buf_add(&hsbuf->buffer, &c, 1); + + c= u16; + buf_add(&hsbuf->buffer, &c, 1); +} + +static size_t hsbuf_len(struct hsbuf *hsbuf) +{ + return hsbuf->buffer.size - hsbuf->buffer.offset; +} + +static void hsbuf_copy(struct hsbuf *dst, struct hsbuf *src) +{ + size_t len; + + len= src->buffer.size - src->buffer.offset; + hsbuf_add(dst, src->buffer.buf + src->buffer.offset, len); +} + +static void hsbuf_cleanup(struct hsbuf *hsbuf) +{ + buf_cleanup(&hsbuf->buffer); +} + +static void hsbuf_final(struct hsbuf *hsbuf, int type, struct msgbuf *msgbuf) +{ + uint8_t c; + size_t len; + + len= hsbuf->buffer.size - hsbuf->buffer.offset; + + c= type; + msgbuf_add(msgbuf, &c, 1); + + c= (len >> 16); + msgbuf_add(msgbuf, &c, 1); + + c= (len >> 8); + msgbuf_add(msgbuf, &c, 1); + + c= len; + msgbuf_add(msgbuf, &c, 1); + + msgbuf_add(msgbuf, hsbuf->buffer.buf + hsbuf->buffer.offset, len); + hsbuf->buffer.offset += len; +} + +static void add_random(struct hsbuf *hsbuf) +{ + int fd; + time_t t; + uint8_t buf[32]; + + t= time(NULL); + buf[0]= t >> 24; + buf[1]= t >> 16; + buf[2]= t >> 8; + buf[3]= t; + + fd= open(URANDOM_DEV, O_RDONLY); + + /* Best effort, just ignore errors */ + if (fd != -1) + { + read(fd, buf+4, sizeof(buf)-4); + close(fd); + } + hsbuf_add(hsbuf, buf, sizeof(buf)); +} + +static void add_sessionid(struct hsbuf *hsbuf) +{ + uint8_t c; + + c= 0; + hsbuf_add(hsbuf, &c, 1); +} + +static void add_ciphers(struct hsbuf *hsbuf) +{ + size_t len; + uint8_t ciphers[]= { + /* From Firefox 89.0.2 */ + 0x13,0x01, /* TLS_AES_128_GCM_SHA256 */ + 0x13,0x03, /* TLS_CHACHA20_POLY1305_SHA256 */ + 0x13,0x02, /* TLS_AES_256_GCM_SHA384 */ + 0xc0,0x2b, /* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 */ + 0xc0,0x2f, /* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 */ + 0xcc,0xa9, /* TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 */ + 0xcc,0xa8, /* TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 */ + 0xc0,0x2c, /* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 */ + 0xc0,0x30, /* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 */ + 0xc0,0x0a, /* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA */ + 0xc0,0x09, /* TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA */ + 0xc0,0x13, /* TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA */ + 0xc0,0x14, /* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA */ + 0x00,0x9c, /* TLS_RSA_WITH_AES_128_GCM_SHA256 */ + 0x00,0x9d, /* TLS_RSA_WITH_AES_256_GCM_SHA384 */ + 0x00,0x2f, /* TLS_RSA_WITH_AES_128_CBC_SHA */ + 0x00,0x35, /* TLS_RSA_WITH_AES_256_CBC_SHA */ + 0x00,0x0a, /* TLS_RSA_WITH_3DES_EDE_CBC_SHA */ + }; + + len= sizeof(ciphers); + + hsbuf_add_u16(hsbuf, len); + hsbuf_add(hsbuf, ciphers, len); +} + +static void add_compression(struct hsbuf *hsbuf) +{ + uint8_t c; + size_t len; + uint8_t compression[]= { 0x1, 0x0 }; + + len= sizeof(compression); + + c= len; + hsbuf_add(hsbuf, &c, 1); + + hsbuf_add(hsbuf, compression, len); +} + +static void ext_sigs(struct hsbuf *hsbuf) +{ + uint16_t sigextlen, siglen; + uint8_t sigs[] = + { + /* From Firefox 89.0.2 */ + 0x04, 0x03, /* ecdsa_secp256r1_sha256 */ + 0x05, 0x03, /* ecdsa_secp256r1_sha384 */ + 0x06, 0x03, /* ecdsa_secp256r1_sha512 */ + 0x08, 0x04, /* rsa_pss_rsae_sha256 */ + 0x08, 0x05, /* rsa_pss_rsae_sha384 */ + 0x08, 0x06, /* rsa_pss_rsae_sha512 */ + 0x04, 0x01, /* rsa_pkcs1_sha256 */ + 0x05, 0x01, /* rsa_pkcs1_sha384 */ + 0x06, 0x01, /* rsa_pkcs1_sha512 */ + 0x02, 0x03, /* ecdsa_sha1 */ + 0x02, 0x01, /* rsa_pkcs1_sha1 */ + }; + + siglen= sizeof(sigs); + sigextlen= siglen + 2; + + hsbuf_add_u16(hsbuf, 13 /*signature_algorithms*/); + hsbuf_add_u16(hsbuf, sigextlen); + hsbuf_add_u16(hsbuf, siglen); + hsbuf_add(hsbuf, sigs, siglen); +} + +static void elliptic_curves(struct hsbuf *hsbuf) +{ + uint16_t curvesextlen, curveslen; + uint8_t curves[] = + { + /* From Firefox 89.0.2 */ + 0x00, 0x1d, /* x25519 */ + 0x00, 0x17, /* secp256r1 */ + 0x00, 0x18, /* secp384r1 */ + 0x00, 0x19, /* secp521r1 */ + 0x01, 0x00, /* ffdhe2048 */ + 0x01, 0x01, /* ffdhe3072 */ + }; + + curveslen= sizeof(curves); + curvesextlen= curveslen + 2; + + hsbuf_add_u16(hsbuf, 10 /*elliptic_curves*/); + hsbuf_add_u16(hsbuf, curvesextlen); + hsbuf_add_u16(hsbuf, curveslen); + hsbuf_add(hsbuf, curves, curveslen); +} + +static void sni(struct hsbuf *hsbuf, const char *server_name) +{ + uint8_t c; + size_t size_hostname, size_server_name_list, size_extension_data; + + size_hostname= strlen(server_name); + size_server_name_list= 1 /*name_type*/ + 2 /*size_hostname*/ + + size_hostname; + size_extension_data= 2 /*size_server_name_list*/ + + size_server_name_list; + + hsbuf_add_u16(hsbuf, 0 /* server_name */); + hsbuf_add_u16(hsbuf, size_extension_data); + hsbuf_add_u16(hsbuf, size_server_name_list); + c= 0; /* host_name */ + hsbuf_add(hsbuf, &c, 1); + hsbuf_add_u16(hsbuf, size_hostname); + hsbuf_add(hsbuf, server_name, size_hostname); +} + +static void add_extensions(struct state *state, struct hsbuf *hsbuf) +{ + size_t size_extensions; + struct hsbuf ext_sigs_buf; + struct hsbuf sni_buf; + struct hsbuf elliptic_curves_buf; + + /* SNI */ + hsbuf_init(&sni_buf); + if (state->sni) + sni(&sni_buf, state->sni); + + /* Signatures */ + hsbuf_init(&ext_sigs_buf); + ext_sigs(&ext_sigs_buf); + + /* Elliptic curvers */ + hsbuf_init(&elliptic_curves_buf); + elliptic_curves(&elliptic_curves_buf); + + size_extensions= hsbuf_len(&sni_buf) + hsbuf_len(&ext_sigs_buf) + + hsbuf_len(&elliptic_curves_buf); + + hsbuf_add_u16(hsbuf, size_extensions); + hsbuf_copy(hsbuf, &sni_buf); + hsbuf_cleanup(&sni_buf); + hsbuf_copy(hsbuf, &ext_sigs_buf); + hsbuf_cleanup(&ext_sigs_buf); + hsbuf_copy(hsbuf, &elliptic_curves_buf); + hsbuf_cleanup(&elliptic_curves_buf); +} + +static struct hgbase *sslgetcert_base_new(struct event_base *event_base) +{ + struct hgbase *base; + + base= xzalloc(sizeof(*base)); + + base->event_base= event_base; + + base->tabsiz= 10; + base->table= xzalloc(base->tabsiz * sizeof(*base->table)); + + return base; +} + +static void timeout_callback(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + struct state *state; + double resptime; + char hostbuf[NI_MAXHOST]; + char line[80]; + + state= ENV2STATE(s); + + if (state->connecting) + { + add_str(state, DBQ(err) ":" DBQ(connect: timeout)); + if (0 /*state->do_all*/) + report(state); + else + tu_restart_connect(&state->tu_env); + return; + } + + getnameinfo((struct sockaddr *)&state->loc_sin6, + state->loc_socklen, hostbuf, sizeof(hostbuf), NULL, 0, + NI_NUMERICHOST); + snprintf(line, sizeof(line), DBQ(src_addr) ":" DBQ(%s) ", " , hostbuf); + add_str(state, line); + + resptime= (state->t_connect.tv_sec- state->start.tv_sec)*1e3 + + (state->t_connect.tv_nsec-state->start.tv_nsec)/1e6; + snprintf(line, sizeof(line), DBQ(ttc) ": %f, ", resptime); + add_str(state, line); + + switch(state->readstate) + { + case READ_HELLO: + add_str(state, DBQ(err) ":" DBQ(timeout reading hello)); + report(state); + break; + case READ_CERTS: + add_str(state, DBQ(err) ":" DBQ(timeout reading certificates)); + report(state); + break; + default: + printf("in timeout_callback, unhandled case: %d\n", + state->readstate); + } +} + +static void *sslgetcert_init(int __attribute((unused)) argc, char *argv[], + void (*done)(void *state, int error)) +{ + int c, i, only_v4, only_v6, major, minor; + size_t newsiz; + char *hostname, *str_port, *infname, *version_str; + char *output_file, *A_arg, *B_arg, *h_arg; + char *response_in, *response_out; + char *validated_response_in= NULL; + char *validated_response_out= NULL; + char *validated_output_file= NULL; + struct state *state; + FILE *fh; + + /* Arguments */ + output_file= NULL; + version_str= NULL; + A_arg= NULL; + B_arg= NULL; + h_arg= NULL; + infname= NULL; + str_port= NULL; + response_in= NULL; + response_out= NULL; + only_v4= 0; + only_v6= 0; + + if (!hg_base) + { + hg_base= sslgetcert_base_new(EventBase); + if (!hg_base) + crondlog(DIE9 "sslgetcert_base_new failed"); + } + + + /* Allow us to be called directly by another program in busybox */ + optind= 0; + while (c= getopt_long(argc, argv, "A:B:h:O:R:V:W:i:p:46", + longopts, NULL), c != -1) + { + switch(c) + { + case 'A': + A_arg= optarg; + break; + case 'B': + B_arg= optarg; + break; + case 'h': + h_arg= optarg; + break; + case 'O': + output_file= optarg; + break; + case 'R': + response_in= optarg; + break; + case 'V': + version_str= optarg; + break; + case 'W': + response_out= optarg; + break; + case 'i': + infname= optarg; + break; + case 'p': + str_port= optarg; + break; + case '4': + only_v4= 1; + only_v6= 0; + break; + case '6': + only_v6= 1; + only_v4= 0; + break; + default: + crondlog(LVL8 "bad option '%c'", c); + return NULL; + } + } + + if (optind != argc-1) + { + crondlog(LVL8 "exactly one hostname expected"); + return NULL; + } + hostname= argv[optind]; + + if (response_in) + { + validated_response_in= rebased_validated_filename(ATLAS_SPOOLDIR, + response_in, ATLAS_FUZZING_REL); + if (!validated_response_in) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + response_in); + goto err; + } + } + if (response_out) + { + validated_response_out= rebased_validated_filename(ATLAS_SPOOLDIR, + response_out, ATLAS_FUZZING_REL); + if (!validated_response_out) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + response_out); + goto err; + } + } + + if (output_file) + { + validated_output_file= rebased_validated_filename(ATLAS_SPOOLDIR, + output_file, SAFE_PREFIX_OUT_REL); + if (!validated_output_file) + { + crondlog(LVL8 "insecure file '%s'", output_file); + goto err; + } + fh= fopen(validated_output_file, "a"); + if (!fh) + { + crondlog(LVL8 "sslgetcert: unable to append to '%s'", + validated_output_file); + goto err; + } + fclose(fh); + } + + if (A_arg) + { + if (!validate_atlas_id(A_arg)) + { + crondlog(LVL8 "bad atlas ID '%s'", A_arg); + goto err; + } + } + if (B_arg) + { + if (!validate_atlas_id(B_arg)) + { + crondlog(LVL8 "bad bundle ID '%s'", B_arg); + goto err; + } + } + + if (version_str == NULL || strcasecmp(version_str, "TLS1.2") == 0) + { + major= 3; /* TLS 1.2 */ + minor= 3; + } + else if (strcasecmp(version_str, "TLS1.1") == 0) + { + major= 3; + minor= 2; + } + else if (strcasecmp(version_str, "TLS1.0") == 0) + { + major= 3; + minor= 1; + } + else if (strcasecmp(version_str, "SSL3.0") == 0) + { + major= 3; + minor= 0; + } + else + { + crondlog(LVL8 "bad protocol version '%s'", version_str); + goto err; + } + + state= xzalloc(sizeof(*state)); + state->base= hg_base; + state->atlas= A_arg ? strdup(A_arg) : NULL; + state->bundle= B_arg ? strdup(B_arg) : NULL; + state->output_file= validated_output_file; + validated_output_file= NULL; + state->response_in= validated_response_in; + validated_response_in= NULL; + state->response_out= validated_response_out; + validated_response_out= NULL; + state->infname= infname ? strdup(infname) : NULL; + state->hostname= strdup(hostname); + state->sni= h_arg ? strdup(h_arg) : NULL; + state->major_version= major; + state->minor_version= minor; + if (str_port) + state->portname= strdup(str_port); + else + state->portname= strdup("443"); + + state->only_v4= 2; + + state->only_v4= !!only_v4; /* Gcc bug? */ + state->only_v6= !!only_v6; + + state->line= NULL; + state->linemax= 0; + state->linelen= 0; + state->lineoffset= 0; + + for (i= 0; itabsiz; i++) + { + if (hg_base->table[i] == NULL) + break; + } + if (i >= hg_base->tabsiz) + { + newsiz= 2*hg_base->tabsiz; + hg_base->table= xrealloc(hg_base->table, + newsiz*sizeof(*hg_base->table)); + for (i= hg_base->tabsiz; itable[i]= NULL; + i= hg_base->tabsiz; + hg_base->tabsiz= newsiz; + } + state->index= i; + hg_base->table[i]= state; + hg_base->done= done; + + return state; + +err: + if (validated_response_in) free(validated_response_in); + if (validated_response_out) free(validated_response_out); + if (validated_output_file) free(validated_output_file); + return NULL; +} + +static void report(struct state *state) +{ + FILE *fh; + char hostbuf[NI_MAXHOST]; + // char line[160]; + + fh= NULL; + if (state->output_file) + { + fh= fopen(state->output_file, "a"); + if (!fh) + crondlog(DIE9 "sslgetcert: unable to append to '%s'", + state->output_file); + } + else + fh= stdout; + + fprintf(fh, "RESULT { "); + if (state->atlas) + { + fprintf(fh, DBQ(id) ":" DBQ(%s) ", " + "%s, " + DBQ(lts) ":%d, " + DBQ(time) ":%llu, ", + state->atlas, atlas_get_version_json_str(), + get_timesync(), (unsigned long long)state->gstart); + if (state->bundle) + fprintf(fh, DBQ(bundle) ":%s, ", state->bundle); + } + + fprintf(fh, DBQ(dst_name) ":" DBQ(%s) ", " + DBQ(dst_port) ":" DBQ(%s) ", ", + state->hostname, state->portname); + + if (!state->tu_env.host_is_literal) + fprintf(fh, DBQ(ttr) ":%f, ", state->tu_env.ttr); + + if (!state->dnserr) + { + getnameinfo((struct sockaddr *)&state->sin6, state->socklen, + hostbuf, sizeof(hostbuf), NULL, 0, + NI_NUMERICHOST); + fprintf(fh, DBQ(dst_addr) ":" DBQ(%s) ", ", hostbuf); + fprintf(fh, DBQ(af) ": %d, ", + state->sin6.sin6_family == AF_INET6 ? 6 : 4); + + } + + fprintf(fh, "%s }\n", state->result); + free(state->result); + state->result= NULL; + state->resmax= 0; + state->reslen= 0; + + if (state->output_file) + fclose(fh); + + free(state->post_buf); + state->post_buf= NULL; + + if (state->linemax) + { + state->linemax= 0; + free(state->line); + state->line= NULL; + } + + state->bev= NULL; + + tu_cleanup(&state->tu_env); + + if (state->resp_file) + { + fclose(state->resp_file); + state->resp_file= NULL; + } + + state->busy= 0; + if (state->base->done) + state->base->done(state, 0); +} + + +static void add_str(struct state *state, const char *str) +{ + size_t len; + + len= strlen(str); + if (state->reslen + len+1 > state->resmax) + { + state->resmax= state->reslen + len+1 + 80; + state->result= xrealloc(state->result, state->resmax); + } + memcpy(state->result+state->reslen, str, len+1); + state->reslen += len; + //printf("add_str: result = '%s'\n", state->result); +} + +static void readcb(struct bufferevent *bev UNUSED_PARAM, void *ptr) +{ + int r; + struct state *state; + + state= ENV2STATE(ptr); + + for (;;) + { + switch(state->readstate) + { + case READ_HELLO: + r= eat_server_hello(state); + if (r == -1) + return; + if (r == 1) + { + state->readstate= READ_DONE; + continue; + } + state->readstate= READ_CERTS; + continue; + + case READ_CERTS: + r= eat_certificate(state); + if (r == -1) + return; + state->readstate= READ_DONE; + continue; + + case READ_DONE: + msgbuf_cleanup(&state->msginbuf); + buf_cleanup(&state->inbuf); + if (!state->response_in) + tu_cleanup(&state->tu_env); + state->busy= 0; + if (state->base->done) + state->base->done(state, 0); + return; + + default: + printf("readcb: readstate = %d\n", state->readstate); + return; + } + } +} + +static FILE *report_head(struct state *state) +{ + int major, minor; + const char *method; + FILE *fh; + double resptime; + struct timespec endtime; + char hostbuf[NI_MAXHOST]; + + gettime_mono(&endtime); + + fh= NULL; + if (state->output_file) + { + fh= fopen(state->output_file, "a"); + if (!fh) + { + crondlog(DIE9 "sslgetcert: unable to append to '%s'", + state->output_file); + return NULL; + } + } + else + fh= stdout; + + fprintf(fh, "RESULT { "); + if (state->atlas) + { + fprintf(fh, DBQ(id) ":" DBQ(%s) + ", %s" + ", " DBQ(lts) ":%d", + state->atlas, atlas_get_version_json_str(), + get_timesync()); + if (state->bundle) + fprintf(fh, DBQ(bundle) ":%s, ", state->bundle); + } + + fprintf(fh, "%s" DBQ(time) ":%llu", + state->atlas ? ", " : "", (unsigned long long)atlas_time()); + fprintf(fh, ", " DBQ(dst_name) ":" DBQ(%s) ", " + DBQ(dst_port) ":" DBQ(%s), + state->hostname, state->portname); + + if (!state->tu_env.host_is_literal) + { + /* Assume that name resolution was required */ + fprintf(fh, ", " DBQ(ttr) ":%f", state->tu_env.ttr); + } + + if (state->recv_major == 3 && state->recv_minor == 3) + { + method= "TLS"; + major= 1; + minor= 2; + } + else if (state->recv_major == 3 && state->recv_minor == 2) + { + method= "TLS"; + major= 1; + minor= 1; + } + else if (state->recv_major == 3 && state->recv_minor == 1) + { + method= "TLS"; + major= 1; + minor= 0; + } + else if (state->recv_major == 3 && state->recv_minor == 0) + { + method= "SSL"; + major= 3; + minor= 0; + } + else + { + method= "(unknown)"; + major= state->recv_major; + minor= state->recv_minor; + } + + fprintf(fh, ", " DBQ(method) ":" DBQ(%s) ", " + DBQ(ver) ":" DBQ(%d.%d), method, major, minor); + getnameinfo((struct sockaddr *)&state->sin6, state->socklen, + hostbuf, sizeof(hostbuf), NULL, 0, + NI_NUMERICHOST); + fprintf(fh, ", " DBQ(dst_addr) ":" DBQ(%s), hostbuf); + fprintf(fh, ", " DBQ(af) ": %d", + state->sin6.sin6_family == AF_INET6 ? 6 : 4); + + getnameinfo((struct sockaddr *)&state->loc_sin6, + state->loc_socklen, hostbuf, sizeof(hostbuf), NULL, 0, + NI_NUMERICHOST); + fprintf(fh, ", " DBQ(src_addr) ":" DBQ(%s), hostbuf); + + resptime= (state->t_connect.tv_sec- state->start.tv_sec)*1e3 + + (state->t_connect.tv_nsec-state->start.tv_nsec)/1e6; + fprintf(fh, ", " DBQ(ttc) ": %f", resptime); + + resptime= (endtime.tv_sec- state->start.tv_sec)*1e3 + + (endtime.tv_nsec-state->start.tv_nsec)/1e6; + fprintf(fh, ", " DBQ(rt) ": %f", resptime); + + fprintf(fh, ", " DBQ(server_cipher) ": " DBQ(0x%04x), state->server_cipher); + + return fh; +} + +static int eat_alert(struct state *state) +{ + int r, type, level, descr; + uint8_t *p; + struct msgbuf *msgbuf; + FILE *fh; + + msgbuf= &state->msginbuf; + + for (;;) + { + if (msgbuf->buffer.size - msgbuf->buffer.offset < 2) + { + r= msgbuf_read(state, msgbuf, &type, + &state->recv_major, &state->recv_minor); + if (r < 0) + { + if (errno != EAGAIN) + { + fprintf(stderr, + "eat_alert: msgbuf_read failed: %s\n", + strerror(errno)); + } + return -1; + } + if (type != MSG_ALERT) + { + fprintf(stderr, + "eat_alert: got bad type %d from msgbuf_read\n", + type); + return -1; + } + continue; + } + p= (uint8_t *)msgbuf->buffer.buf+msgbuf->buffer.offset; + level= p[0]; + descr= p[1]; + + fh= report_head(state); + if (!fh) + return -1; + + fprintf(fh, ", " DBQ(alert) ": { " DBQ(level) ": %d, " + DBQ(description) ": %d }", + level, descr); + + msgbuf->buffer.offset += 2; + break; + } + + fprintf(fh, " }\n"); + + if (state->output_file) + fclose(fh); + + return 1; +} + +static int eat_server_hello(struct state *state) +{ + int r, type; + size_t o, len, seslen, totlen; + uint8_t *p; + struct msgbuf *msgbuf; + + msgbuf= &state->msginbuf; + + for (;;) + { + if (msgbuf->buffer.size - msgbuf->buffer.offset < 4) + { + r= msgbuf_read(state, msgbuf, &type, + &state->recv_major, &state->recv_minor); + if (r < 0) + { + fprintf(stderr, + "eat_server_hello: msgbuf_read failed\n"); + return -1; + + } + if (type == MSG_ALERT) + { + r= eat_alert(state); + if (r == 0) + continue; + return r; /* No need to continue */ + } + if (type != MSG_HANDSHAKE) + { + fprintf(stderr, + "eat_server_hello: got bad type %d from msgbuf_read\n", + type); + add_str(state, DBQ(err) ":" DBQ(bad type)); + report(state); + return -1; + } + continue; + } + p= (uint8_t *)msgbuf->buffer.buf+msgbuf->buffer.offset; + if (p[0] != HS_SERVER_HELLO) + { + fprintf(stderr, "eat_server_hello: got type %d\n", + p[0]); + add_str(state, DBQ(err) ":" DBQ(bad type)); + report(state); + return -1; + } + len= (p[1] << 16) + (p[2] << 8) + p[3]; + if (msgbuf->buffer.size - msgbuf->buffer.offset < 4+len) + { + r= msgbuf_read(state, msgbuf, &type, + &state->recv_major, &state->recv_minor); + if (r < 0) + { + fprintf(stderr, + "eat_server_hello: msgbuf_read failed\n"); + return -1; + } + if (type != MSG_HANDSHAKE) + { + fprintf(stderr, + "eat_server_hello: got bad type %d from msgbuf_read\n", + type); + add_str(state, DBQ(err) ":" DBQ(bad type)); + report(state); + return -1; + } + continue; + } + + totlen= 4+len; + o= 4; + + /* ProtocolVersion */ + if (o+2 > totlen) + { + add_str(state, DBQ(err) ":" + DBQ(Server Hello too short (ProtocolVersion))); + report(state); + return -1; + } + o += 2; + + /* Random */ + if (o+32 > totlen) + { + add_str(state, DBQ(err) ":" + DBQ(Server Hello too short (Random))); + report(state); + return -1; + } + o += 32; + + /* opaque SessionID<0..32> */ + if (o+1 > totlen) + { + add_str(state, DBQ(err) ":" + DBQ(Server Hello too short (SessionID len))); + report(state); + return -1; + } + seslen= p[o]; + o++; + if (seslen > 32) + { + add_str(state, DBQ(err) ":" + DBQ(Server Hello bad SessionID len)); + report(state); + return -1; + } + if (o+seslen > totlen) + { + add_str(state, DBQ(err) ":" + DBQ(Server Hello too short (SessionID))); + report(state); + return -1; + } + o += seslen; + + + /* CipherSuite */ + if (o+2 > totlen) + { + add_str(state, DBQ(err) ":" + DBQ(Server Hello too short (CipherSuite))); + report(state); + return -1; + } + state->server_cipher= (p[o] << 8) | p[o+1]; + o += 2; + + msgbuf->buffer.offset += totlen; + break; + } + return 0; +} + +static int eat_certificate(struct state *state) +{ + int i, n, r, first, slen, need_nl, type; + size_t o, len; + uint8_t *p; + struct msgbuf *msgbuf; + FILE *fh; + struct buf tmpbuf; + + msgbuf= &state->msginbuf; + + for (;;) + { + if (msgbuf->buffer.size - msgbuf->buffer.offset < 4) + { + r= msgbuf_read(state, msgbuf, &type, + &state->recv_major, &state->recv_minor); + if (r < 0) + { + if (errno != EAGAIN) + { + fprintf(stderr, + "eat_certificate: msgbuf_read failed: %s\n", + strerror(errno)); + } + return -1; + } + if (type != MSG_HANDSHAKE) + { + fprintf(stderr, + "eat_certificate: got bad type %d from msgbuf_read\n", + type); + add_str(state, DBQ(err) ":" DBQ(bad type)); + report(state); + return -1; + } + continue; + } + p= (uint8_t *)msgbuf->buffer.buf+msgbuf->buffer.offset; + if (p[0] != HS_CERTIFICATE) + { + fprintf(stderr, "eat_certificate: got type %d\n", p[0]); + add_str(state, DBQ(err) ":" DBQ(bad type)); + report(state); + return -1; + } + len= (p[1] << 16) + (p[2] << 8) + p[3]; + + /* Note that msgbuf_read may cause the buffer + * (msgbuf->buffer.buf) to be reallocated. This will make + * p a wild pointer. To counter that, after msgbuf_read, + * either return an error to the caller or use 'continue' + * to restart at the top of the loop. + */ + if (msgbuf->buffer.size - msgbuf->buffer.offset < 4+len) + { + r= msgbuf_read(state, msgbuf, &type, + &state->recv_major, &state->recv_minor); + if (r < 0) + { + fprintf(stderr, + "eat_certificate: msgbuf_read failed\n"); + return -1; + } + if (type != MSG_HANDSHAKE) + { + fprintf(stderr, + "eat_certificate: got bad type %d from msgbuf_read\n", + type); + add_str(state, DBQ(err) ":" DBQ(bad type)); + report(state); + return -1; + } + continue; + } + p += 4; + n= (p[0] << 16) + (p[1] << 8) + p[2]; + o= 3; + + fh= report_head(state); + if (fh == NULL) + return -1; + + first= 1; + fprintf(fh, ", " DBQ(cert) ":[ "); + + buf_init(&tmpbuf, NULL); + while (o < 3+n) + { + slen= (p[o] << 16) + (p[o+1] << 8) + p[o+2]; + if (o+3+slen > len) + { + fprintf(stderr, + "eat_certificate: got bad len %d\n", + slen); + msgbuf->buffer.offset= + msgbuf->buffer.size; + add_str(state, DBQ(err) ":" DBQ(bad len)); + report(state); + return -1; + } + buf_add_b64(&tmpbuf, p+o+3, slen); + fprintf(fh, "%s\"-----BEGIN CERTIFICATE-----\\n", + !first ? ", " : ""); + need_nl= 0; + for (i= tmpbuf.offset; ibuffer.offset += 4+len; + break; + } + + fprintf(fh, " }\n"); + + if (state->output_file) + fclose(fh); + + return 0; +} + +static void writecb(struct bufferevent *bev, void *ptr) +{ + char c; + struct state *state; + struct buf outbuf; + struct msgbuf msgoutbuf; + struct hsbuf hsbuf; + + state= ENV2STATE(ptr); + + for(;;) + { + switch(state->writestate) + { + case WRITE_HELLO: + gettime_mono(&state->t_connect); + + buf_init(&outbuf, bev); + msgbuf_init(&msgoutbuf, NULL, &outbuf); + hsbuf_init(&hsbuf); + + /* Major/minor */ + c= state->major_version; + hsbuf_add(&hsbuf, &c, 1); + + c= state->minor_version; + hsbuf_add(&hsbuf, &c, 1); + add_random(&hsbuf); + add_sessionid(&hsbuf); + add_ciphers(&hsbuf); + add_compression(&hsbuf); + + add_extensions(state, &hsbuf); + + hsbuf_final(&hsbuf, HS_CLIENT_HELLO, &msgoutbuf); + msgbuf_final(&msgoutbuf, MSG_HANDSHAKE); + + /* Ignore error */ + if (!state->response_in) + (void) buf_write(&outbuf); + + hsbuf_cleanup(&hsbuf); + msgbuf_cleanup(&msgoutbuf); + buf_cleanup(&outbuf); + + /* Done */ + state->writestate= WRITE_DONE; + continue; + + case WRITE_DONE: + return; + + default: + printf("writecb: unknown write state: %d\n", + state->writestate); + return; + } + } + +} + +static void err_reading(struct state *state) +{ + switch(state->readstate) + { + default: + printf("in err_reading, unhandled case\n"); + } +} + +static void dnscount(struct tu_env *env, int count) +{ + struct state *state; + + state= ENV2STATE(env); +} + +static void beforeconnect(struct tu_env *env, + struct sockaddr *addr, socklen_t addrlen) +{ + struct state *state; + + state= ENV2STATE(env); + + state->socklen= addrlen; + memcpy(&state->sin6, addr, state->socklen); + if (state->response_out) + { + write_response(state->resp_file, RESP_DSTADDR, + addrlen, addr); + } + + state->connecting= 1; + state->readstate= READ_HELLO; + state->writestate= WRITE_HELLO; + + state->linelen= 0; + state->lineoffset= 0; + + /* Clear result */ + //if (!state->do_all || !state->do_combine) + state->reslen= 0; + + gettime_mono(&state->start); +} + + +static void reporterr(struct tu_env *env, enum tu_err cause, + const char *str) +{ + struct state *state; + char line[80]; + + state= ENV2STATE(env); + + if (env != &state->tu_env) abort(); + + switch(cause) + { + case TU_DNS_ERR: + snprintf(line, sizeof(line), + DBQ(dnserr) ":" DBQ(%s), str); + add_str(state, line); + state->dnserr= 1; + report(state); + break; + + case TU_READ_ERR: + err_reading(state); + break; + + case TU_SOCKET_ERR: + snprintf(line, sizeof(line), + DBQ(sockerr) ":" DBQ(%s), str); + add_str(state, line); + report(state); + break; + + case TU_CONNECT_ERR: + snprintf(line, sizeof(line), + DBQ(err) ":" DBQ(connect: %s), str); + add_str(state, line); + + if (0 /*state->do_all*/) + report(state); + else + tu_restart_connect(&state->tu_env); + break; + + case TU_OUT_OF_ADDRS: + report(state); + break; + + case TU_BAD_ADDR: + state->socklen= env->dns_curr->ai_addrlen; + memcpy(&state->sin6, env->dns_curr->ai_addr, state->socklen); + add_str(state, DBQ(error) ": " DBQ(address not allowed)); + report(state); + break; + + default: + crondlog(DIE9 "reporterr: bad cause %d", cause); + } +} + +static void connected(struct tu_env *env, struct bufferevent *bev) +{ + struct state *state; + + state= ENV2STATE(env); + + if (env != &state->tu_env) abort(); + + state->connecting= 0; + state->bev= bev; + + buf_init(&state->inbuf, bev); + msgbuf_init(&state->msginbuf, &state->inbuf, NULL); + + state->loc_socklen= sizeof(state->loc_sin6); + if (state->response_in) + { + size_t len; + + len= state->loc_socklen; + read_response_file(state->resp_file, RESP_SOCKNAME, + &len, &state->loc_sin6); + state->loc_socklen= len; + } + else + { + getsockname(bufferevent_getfd(bev), + (struct sockaddr *)&state->loc_sin6, + &state->loc_socklen); + if (state->response_out) + { + write_response(state->resp_file, RESP_SOCKNAME, + state->loc_socklen, &state->loc_sin6); + } + } +} + +static void sslgetcert_start(void *vstate) +{ + size_t len; + struct state *state; + struct evutil_addrinfo hints; + struct timeval interval; + + state= vstate; + + if (state->busy) + { + printf("httget_start: busy\n"); + return; + } + state->busy= 1; + + state->dnserr= 0; + state->connecting= 0; + state->readstate= READ_HELLO; + state->writestate= WRITE_HELLO; + state->gstart= atlas_time(); + + if (state->response_out) + { + state->resp_file= fopen(state->response_out, "w"); + if (!state->resp_file) + { + crondlog(DIE9 "unable to write to '%s'", + state->response_out); + } + } + + + memset(&hints, '\0', sizeof(hints)); + hints.ai_socktype= SOCK_STREAM; + if (state->only_v4) + hints.ai_family= AF_INET; + else if (state->only_v6) + hints.ai_family= AF_INET6; + interval.tv_sec= CONN_TO; + interval.tv_usec= 0; + + if (state->response_in) + { + state->resp_file= fopen(state->response_in, "r"); + if (!state->resp_file) + { + crondlog(DIE9 "unable to read from '%s'", + state->response_in); + } + + /* Emulate ttr */ + tu_fake_ttr(&state->tu_env, state->hostname); + + len= sizeof(state->sin6); + read_response_file(state->resp_file, RESP_DSTADDR, + &len, &state->sin6); + state->socklen= len; + + /* Start time */ + gettime_mono(&state->start); + + connected(&state->tu_env, NULL); + + writecb(NULL, &state->tu_env); + while(state->resp_file != NULL) + readcb(NULL, &state->tu_env); + report(state); + } + else + { + tu_connect_to_name(&state->tu_env, state->hostname, 0, 0, + state->portname, + &interval, &hints, state->infname, NULL, NULL, + timeout_callback, + reporterr, dnscount, beforeconnect, + connected, readcb, writecb); + } +} + +static int sslgetcert_delete(void *vstate) +{ + int ind; + struct state *state; + struct hgbase *base; + + state= vstate; + + printf("sslgetcert_delete: state %p, index %d, busy %d\n", + state, state->index, state->busy); + + if (state->busy) + return 0; + + if (state->line) + crondlog(DIE9 "line is not empty"); + + base= state->base; + ind= state->index; + + if (base->table[ind] != state) + crondlog(DIE9 "strange, state not in table"); + base->table[ind]= NULL; + + //event_del(&state->timer); + + free(state->atlas); + state->atlas= NULL; + free(state->bundle); + state->bundle= NULL; + free(state->output_file); + state->output_file= NULL; + free(state->hostname); + state->hostname= NULL; + free(state->portname); + state->portname= NULL; + free(state->infname); + state->infname= NULL; + + free(state); + + return 1; +} + +struct testops sslgetcert_ops = { sslgetcert_init, sslgetcert_start, + sslgetcert_delete }; + diff --git a/probe-busybox/eperd/tcputil.c b/probe-busybox/eperd/tcputil.c new file mode 100644 index 00000000..4caa710a --- /dev/null +++ b/probe-busybox/eperd/tcputil.c @@ -0,0 +1,575 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * tcputil.c + */ + +#include "libbb.h" +#include "eperd.h" +#include +#include +#include +#include + +#include "tcputil.h" + +const char *ssl_version= NULL; + +static int ssl_initialized= 0; + +static void dns_cb(int result, struct evutil_addrinfo *res, void *ctx); +static int create_bev(struct tu_env *env); +static void eventcb(struct bufferevent *bev, short events, void *ptr); + +void tu_connect_to_name(struct tu_env *env, char *host, + bool do_tls, bool do_http2, char *port, + struct timeval *interval, + struct evutil_addrinfo *hints, + char *infname, + const char *server_name, + const char *cert_name, + void (*timeout_callback)(int unused, const short event, void *s), + void (*reporterr)(struct tu_env *env, enum tu_err cause, + const char *err), + void (*reportcount)(struct tu_env *env, int count), + void (*beforeconnect)(struct tu_env *env, + struct sockaddr *addr, socklen_t addrlen), + void (*connected)(struct tu_env *env, struct bufferevent *bev), + void (*readcb)(struct bufferevent *bev, void *ptr), + void (*writecb)(struct bufferevent *bev, void *ptr)) +{ + int r; + struct addrinfo *ai; + struct addrinfo loc_hints; + + env->interval= *interval; + env->infname= infname; + env->reporterr= reporterr; + env->reportcount= reportcount; + env->beforeconnect= beforeconnect; + env->connected= connected; + env->readcb= readcb; + env->writecb= writecb; + env->dns_res= NULL; + env->bev= NULL; + env->do_tls = do_tls; + env->do_http2 = do_http2; + env->server_name = server_name; + env->cert_name = cert_name; + + evtimer_assign(&env->timer, EventBase, + timeout_callback, env); + + /* Check if hostname is numeric or had to be resolved */ + env->host_is_literal= 0; + memset(&loc_hints, '\0', sizeof(loc_hints)); + loc_hints.ai_flags= AI_NUMERICHOST; + r= getaddrinfo(host, NULL, &loc_hints, &ai); + if (r == 0) + { + /* Getaddrinfo succeded so hostname is an address literal */ + freeaddrinfo(ai); + env->host_is_literal= 1; + } + + env->dnsip= 1; + env->connecting= 0; + gettime_mono(&env->start_time); + (void) evdns_getaddrinfo(DnsBase, host, port, hints, dns_cb, env); +} + +void tu_restart_connect(struct tu_env *env) +{ + int r; + struct bufferevent *bev; + + /* Connect failed, try next address */ + if (env->dns_curr) /* Just to be on the safe side */ + { + env->dns_curr= env->dns_curr->ai_next; + } + while (env->dns_curr) + { + evtimer_add(&env->timer, &env->interval); + + r= atlas_check_addr(env->dns_curr->ai_addr, + env->dns_curr->ai_addrlen); + if (r == -1) + { + env->reporterr(env, TU_BAD_ADDR, ""); + return; + } + + env->beforeconnect(env, + env->dns_curr->ai_addr, env->dns_curr->ai_addrlen); + + /* Delete old bev */ + if (env->bev) + { + bufferevent_free(env->bev); + env->bev= NULL; + } + + /* And create a new one */ + r= create_bev(env); + if (r == -1) + { + return; + } + bev= env->bev; + if (bufferevent_socket_connect(bev, + env->dns_curr->ai_addr, + env->dns_curr->ai_addrlen) == 0) + { + /* Connecting, wait for callback */ + return; + } + + /* Immediate error? */ + if (!env->dns_curr) + { + /* Callback cleaned up */ + return; + } + env->dns_curr= env->dns_curr->ai_next; + } + + /* Something went wrong */ + bufferevent_free(env->bev); + env->bev= NULL; + if (env->dns_res) + { + evutil_freeaddrinfo(env->dns_res); + env->dns_res= NULL; + env->dns_curr= NULL; + } + env->reporterr(env, TU_OUT_OF_ADDRS, ""); +} + +void tu_fake_ttr(void *ctx, char *host) +{ + int r; + struct tu_env *env; + struct addrinfo *ai; + struct timespec now, elapsed; + double nsecs; + struct addrinfo loc_hints; + + env= ctx; + + /* Check if hostname is numeric or had to be resolved */ + env->host_is_literal= 0; + memset(&loc_hints, '\0', sizeof(loc_hints)); + loc_hints.ai_flags= AI_NUMERICHOST; + r= getaddrinfo(host, NULL, &loc_hints, &ai); + if (r == 0) + { + /* Getaddrinfo succeded so hostname is an address literal */ + freeaddrinfo(ai); + env->host_is_literal= 1; + } + + + gettime_mono(&env->start_time); + gettime_mono(&now); + elapsed.tv_sec= now.tv_sec - env->start_time.tv_sec; + if (now.tv_nsec < env->start_time.tv_sec) + { + elapsed.tv_sec--; + now.tv_nsec += 1000000000; + } + elapsed.tv_nsec= now.tv_nsec - env->start_time.tv_nsec; + nsecs= (elapsed.tv_sec * 1e9 + elapsed.tv_nsec); + env->ttr= nsecs/1e6; +} + +void tu_cleanup(struct tu_env *env) +{ + if (env->dns_res) + { + evutil_freeaddrinfo(env->dns_res); + env->dns_res= NULL; + env->dns_curr= NULL; + } + if (env->tls_ctx) + { + SSL_CTX_free(env->tls_ctx); + env->tls_ctx= NULL; + } + if (env->bev) + { + bufferevent_free(env->bev); + env->bev= NULL; + } + + event_del(&env->timer); +} + +static void dns_cb(int result, struct evutil_addrinfo *res, void *ctx) +{ + int r, count; + long err; + struct tu_env *env; + struct bufferevent *bev; + struct evutil_addrinfo *cur; + double nsecs; + struct timespec now, elapsed; + char errbuf[128]; + + env= ctx; + + if (!env->dnsip) + { + crondlog(LVL7 + "dns_cb: in dns_cb but not doing dns at this time"); + if (res) + evutil_freeaddrinfo(res); + return; + } + + gettime_mono(&now); + elapsed.tv_sec= now.tv_sec - env->start_time.tv_sec; + if (now.tv_nsec < env->start_time.tv_sec) + { + elapsed.tv_sec--; + now.tv_nsec += 1000000000; + } + elapsed.tv_nsec= now.tv_nsec - env->start_time.tv_nsec; + nsecs= (elapsed.tv_sec * 1e9 + elapsed.tv_nsec); + env->ttr= nsecs/1e6; + + env->dnsip= 0; + + if (result != 0) + { + env->reporterr(env, TU_DNS_ERR, evutil_gai_strerror(result)); + return; + } + + env->dns_res= res; + env->dns_curr= res; + + count= 0; + for (cur= res; cur; cur= cur->ai_next) + count++; + + env->reportcount(env, count); + + while (env->dns_curr) + { + evtimer_add(&env->timer, &env->interval); + + r= atlas_check_addr(env->dns_curr->ai_addr, + env->dns_curr->ai_addrlen); + if (r == -1) + { + env->reporterr(env, TU_BAD_ADDR, ""); + return; + } + + env->beforeconnect(env, + env->dns_curr->ai_addr, env->dns_curr->ai_addrlen); + + /* Delete old bev if any */ + if (env->bev) + { + bufferevent_free(env->bev); + env->bev= NULL; + } + + /* And create a new one */ + r= create_bev(env); + if (r == -1) + { + return; + } + + bev= env->bev; + if (bufferevent_socket_connect(bev, + env->dns_curr->ai_addr, + env->dns_curr->ai_addrlen) == 0) + { + /* Connecting, wait for callback */ + return; + } + + /* Immediate error, for example if there is no default route */ + + /* It is possible that the callback already freed dns_curr. */ + if (!env->dns_curr) + { + printf("dns_cb: callback ate dns_curr\n"); + if (env->dns_res) + crondlog(DIE9 "dns_cb: dns_res not null"); + return; + } + + err= bufferevent_get_openssl_error(bev); + if (err) + { + ERR_error_string_n(err, errbuf, sizeof(errbuf)); + env->reporterr(env, TU_CONNECT_ERR, errbuf); + } + else + { + env->reporterr(env, TU_CONNECT_ERR, strerror(errno)); + } + + /* Check again... */ + if (!env->dns_curr) + { + printf("dns_cb: reporterr ate dns_curr\n"); + if (env->dns_res) + crondlog(DIE9 "dns_cb: dns_res not null"); + return; + } + + env->dns_curr= env->dns_curr->ai_next; + } + + /* Something went wrong */ + printf("dns_cb: Connect failed\n"); + bufferevent_free(env->bev); + env->bev= NULL; + evutil_freeaddrinfo(env->dns_res); + env->dns_res= NULL; + env->dns_curr= NULL; + env->reporterr(env, TU_OUT_OF_ADDRS, ""); +} + +static int create_bev(struct tu_env *env) +{ + int af, fd, fl; + struct bufferevent *bev; + SSL *tls; + X509_VERIFY_PARAM *vpm = NULL; + + af= env->dns_curr->ai_addr->sa_family; + + /* Consistency check. These fields need to be clear */ + assert(!env->tls_ctx); +#if ENABLE_FEATURE_EVHTTPGET_HTTPS + if(env->do_tls || env->do_http2) + { + if (!ssl_initialized) { + ssl_initialized= 1; + RAND_poll(); + SSL_library_init(); /* call only once this is not reentrant. */ + ERR_load_crypto_strings(); + SSL_load_error_strings(); + OpenSSL_add_all_algorithms(); + + /* SSLeay_version seems work everywhere. + * What about OpenSSL_version(OPENSSL_VERSION)? + */ + ssl_version= SSLeay_version(SSLEAY_VERSION); + } + /* fancy ssl options yet. just what is default in lib */ + if ((env->tls_ctx = SSL_CTX_new(SSLv23_client_method())) == NULL) + { + env->reporterr(env, TU_SSL_CTX_INIT_ERR, + "SSL_CTX_new call failed"); + return -1; + } + if (env->do_http2) + { + /* SSL_CTX_set_alpn_protos gets an ALPN list + * in wire format. Each string is prefixed by + * a length byte. + */ + const unsigned char alpn_list[]= + { '\2', 'h', '2' }; + size_t alpn_len= sizeof(alpn_list); + int r; + + r= SSL_CTX_set_alpn_protos(env->tls_ctx, + alpn_list, alpn_len); + if (r != 0) + { + env->reporterr(env, TU_SSL_CTX_INIT_ERR, + "SSL_CTX_set_alpn_protos call failed"); + return -1; + } + if (env->cert_name) + { + if (!SSL_CTX_set_default_verify_paths + (env->tls_ctx)) + { + env->reporterr(env, + TU_SSL_CTX_INIT_ERR, + "SSL_CTX_set_default_verify_paths call failed"); + return -1; + } + vpm= X509_VERIFY_PARAM_new(); + if (!X509_VERIFY_PARAM_set1_host(vpm, + env->cert_name, 0)) + { + env->reporterr(env, + TU_SSL_CTX_INIT_ERR, + "X509_VERIFY_PARAM_set1_host call failed"); + X509_VERIFY_PARAM_free(vpm); + vpm= NULL; + return -1; + } + } + } + if (vpm) + { + if (!SSL_CTX_set1_param(env->tls_ctx, vpm)) + { + env->reporterr(env, TU_SSL_CTX_INIT_ERR, + "SSL_CTX_set1_param call failed"); + X509_VERIFY_PARAM_free(vpm); + vpm= NULL; + return -1; + } + X509_VERIFY_PARAM_free(vpm); + vpm= NULL; + SSL_CTX_set_verify(env->tls_ctx, + SSL_VERIFY_PEER, 0); + } + if ((tls = SSL_new(env->tls_ctx)) == NULL) { + env->reporterr(env, TU_SSL_OBJ_INIT_ERR, + "SSL_new call failed"); + return -1; + } + if (env->server_name) + { + if (!SSL_set_tlsext_host_name(tls, + env->server_name)) + { + env->reporterr(env, TU_SSL_OBJ_INIT_ERR, + "SSL_set_tlsext_host_name call failed"); + return -1; + } + } + bev = bufferevent_openssl_socket_new(EventBase, -1, tls, + BUFFEREVENT_SSL_CONNECTING, + BEV_OPT_CLOSE_ON_FREE); + if (bev == NULL) + { + env->reporterr(env, TU_SSL_INIT_ERR, + "bufferevent_openssl_socket_new call failed"); + return -1; + } + } + else if +#else + if +#endif + ((bev= bufferevent_socket_new(EventBase, -1, + BEV_OPT_CLOSE_ON_FREE)) == NULL) + { + crondlog(DIE9 "bufferevent_socket_new failed"); + } + if (env->infname) + { + fd= socket(af, SOCK_STREAM, 0); + if (fd == -1) + { + env->reporterr(env, TU_SOCKET_ERR, + "socket call failed"); + return -1; + } + + if (bind_interface(fd, af, env->infname) == -1) + { + env->reporterr(env, TU_SOCKET_ERR, + "bind_interface failed"); + close(fd); + return -1; + } + + /* Set socket to nonblocking */ + fl= fcntl(fd, F_GETFL); + if (fl < 0) { + env->reporterr(env, TU_SOCKET_ERR, "fcntl F_GETFL"); + close(fd); + return -1; + } + if (fcntl(fd, F_SETFL, fl | O_NONBLOCK) == -1) { + env->reporterr(env, TU_SOCKET_ERR, "fcntl F_SETFL"); + close(fd); + return -1; + } + + bufferevent_setfd(bev, fd); + } + bufferevent_setcb(bev, env->readcb, env->writecb, eventcb, env); + bufferevent_enable(bev, EV_WRITE); + env->bev= bev; + env->connecting= 1; + + return 0; +} + +static void eventcb(struct bufferevent *bev, short events, void *ptr) +{ + long err; + struct tu_env *env; + char errbuf[80]; + + env= ptr; + + if (events & BEV_EVENT_ERROR) + { + if (env->connecting) + { + err= bufferevent_get_openssl_error(bev); + if (err) + { + ERR_error_string_n(err, errbuf, sizeof(errbuf)); + env->reporterr(env, TU_CONNECT_ERR, errbuf); + } + else + { + env->reporterr(env, TU_CONNECT_ERR, + strerror(errno)); + } + return; + } + events &= ~BEV_EVENT_ERROR; + } + if (events & BEV_EVENT_READING) + { + env->reporterr(env, TU_READ_ERR, ""); + events &= ~BEV_EVENT_READING; + return; + } + if (events & BEV_EVENT_CONNECTED) + { + const unsigned char *data; + unsigned int len; + + events &= ~BEV_EVENT_CONNECTED; + env->connecting= 0; + + if (env->do_http2) + { + /* Check if the server accepted the h2 ALPN */ + SSL_get0_alpn_selected( + bufferevent_openssl_get_ssl(bev), + &data, &len); + if (data == NULL) + { + env->reporterr(env, TU_CONNECT_ERR, + "server does not offer 'h2' ALPN"); + return; + } + else if (len != 2 || memcmp("h2", data, len) != 0) + { + env->reporterr(env, TU_CONNECT_ERR, + "server offers wrong ALPN"); + return; + } + } + + bufferevent_enable(bev, EV_READ); + + env->connected(env, bev); + env->writecb(bev, ptr); + } + if (events) + printf("events = 0x%x\n", events); +} + diff --git a/probe-busybox/eperd/tcputil.h b/probe-busybox/eperd/tcputil.h new file mode 100644 index 00000000..2943fd6a --- /dev/null +++ b/probe-busybox/eperd/tcputil.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * tcputil.h + */ + +#include +#include +#include +#include +#include + +enum tu_err { TU_DNS_ERR, TU_READ_ERR, TU_SOCKET_ERR, TU_CONNECT_ERR, + TU_OUT_OF_ADDRS, TU_BAD_ADDR, TU_SSL_CTX_INIT_ERR, TU_SSL_OBJ_INIT_ERR, + TU_SSL_INIT_ERR }; + +struct tu_env +{ + char dnsip; + char connecting; + char host_is_literal; + struct evutil_addrinfo *dns_res; + struct evutil_addrinfo *dns_curr; + struct bufferevent *bev; + struct timeval interval; + char *infname; + char do_tls; + char do_http2; + const char *server_name; + const char *cert_name; + SSL_CTX *tls_ctx; + struct event timer; + struct timespec start_time; /* name resolution */ + double ttr; + void (*reporterr)(struct tu_env *env, enum tu_err cause, + const char *str); + void (*reportcount)(struct tu_env *env, int count); + void (*beforeconnect)(struct tu_env *env, + struct sockaddr *addr, socklen_t addrlen); + void (*connected)(struct tu_env *env, struct bufferevent *bev); + void (*readcb)(struct bufferevent *bev, void *env); + void (*writecb)(struct bufferevent *bev, void *env); +}; + +extern const char *ssl_version; + +void tu_connect_to_name(struct tu_env *env, char *host, + bool do_tls, bool do_http2, char *port, + struct timeval *interval, + struct evutil_addrinfo *hints, + char *infname, + const char *server_name, + const char *cert_name, + void (*timeout_callback)(int unused, const short event, void *s), + void (*reporterr)(struct tu_env *env, enum tu_err cause, + const char *err), + void (*reportcount)(struct tu_env *env, int count), + void (*beforeconnect)(struct tu_env *env, + struct sockaddr *addr, socklen_t addrlen), + void (*connected)(struct tu_env *env, struct bufferevent *bev), + void (*readcb)(struct bufferevent *bev, void *ptr), + void (*writecb)(struct bufferevent *bev, void *ptr)); +void tu_restart_connect(struct tu_env *env); +void tu_fake_ttr(void *ctx, char *host); +void tu_cleanup(struct tu_env *env); diff --git a/probe-busybox/eperd/tls-hostcheck.c b/probe-busybox/eperd/tls-hostcheck.c new file mode 100644 index 00000000..fdcb8f5c --- /dev/null +++ b/probe-busybox/eperd/tls-hostcheck.c @@ -0,0 +1,217 @@ +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at http://curl.haxx.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ***************************************************************************/ + +/* This file is an amalgamation of hostcheck.c and most of rawstr.c + from cURL. The contents of the COPYING file mentioned above are: + +COPYRIGHT AND PERMISSION NOTICE + +Copyright (c) 1996 - 2013, Daniel Stenberg, . + +All rights reserved. + +Permission to use, copy, modify, and distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright +notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN +NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the name of a copyright holder shall not +be used in advertising or otherwise to promote the sale, use or other dealings +in this Software without prior written authorization of the copyright holder. +*/ + +#include "tls-hostcheck.h" +#include + +/* Portable, consistent toupper (remember EBCDIC). Do not use toupper() because + its behavior is altered by the current locale. */ +static char Curl_raw_toupper(char in) +{ + switch (in) { + case 'a': + return 'A'; + case 'b': + return 'B'; + case 'c': + return 'C'; + case 'd': + return 'D'; + case 'e': + return 'E'; + case 'f': + return 'F'; + case 'g': + return 'G'; + case 'h': + return 'H'; + case 'i': + return 'I'; + case 'j': + return 'J'; + case 'k': + return 'K'; + case 'l': + return 'L'; + case 'm': + return 'M'; + case 'n': + return 'N'; + case 'o': + return 'O'; + case 'p': + return 'P'; + case 'q': + return 'Q'; + case 'r': + return 'R'; + case 's': + return 'S'; + case 't': + return 'T'; + case 'u': + return 'U'; + case 'v': + return 'V'; + case 'w': + return 'W'; + case 'x': + return 'X'; + case 'y': + return 'Y'; + case 'z': + return 'Z'; + } + return in; +} + +/* + * Curl_raw_equal() is for doing "raw" case insensitive strings. This is meant + * to be locale independent and only compare strings we know are safe for + * this. See http://daniel.haxx.se/blog/2008/10/15/strcasecmp-in-turkish/ for + * some further explanation to why this function is necessary. + * + * The function is capable of comparing a-z case insensitively even for + * non-ascii. + */ + +static int Curl_raw_equal(const char *first, const char *second) +{ + while(*first && *second) { + if(Curl_raw_toupper(*first) != Curl_raw_toupper(*second)) + /* get out of the loop as soon as they don't match */ + break; + first++; + second++; + } + /* we do the comparison here (possibly again), just to make sure that if the + loop above is skipped because one of the strings reached zero, we must not + return this as a successful match */ + return (Curl_raw_toupper(*first) == Curl_raw_toupper(*second)); +} + +static int Curl_raw_nequal(const char *first, const char *second, size_t max) +{ + while(*first && *second && max) { + if(Curl_raw_toupper(*first) != Curl_raw_toupper(*second)) { + break; + } + max--; + first++; + second++; + } + if(0 == max) + return 1; /* they are equal this far */ + + return Curl_raw_toupper(*first) == Curl_raw_toupper(*second); +} + +/* + * Match a hostname against a wildcard pattern. + * E.g. + * "foo.host.com" matches "*.host.com". + * + * We use the matching rule described in RFC6125, section 6.4.3. + * http://tools.ietf.org/html/rfc6125#section-6.4.3 + */ + +static int hostmatch(const char *hostname, const char *pattern) +{ + const char *pattern_label_end, *pattern_wildcard, *hostname_label_end; + int wildcard_enabled; + size_t prefixlen, suffixlen; + pattern_wildcard = strchr(pattern, '*'); + if(pattern_wildcard == NULL) + return Curl_raw_equal(pattern, hostname) ? + CURL_HOST_MATCH : CURL_HOST_NOMATCH; + + /* We require at least 2 dots in pattern to avoid too wide wildcard + match. */ + wildcard_enabled = 1; + pattern_label_end = strchr(pattern, '.'); + if(pattern_label_end == NULL || strchr(pattern_label_end+1, '.') == NULL || + pattern_wildcard > pattern_label_end || + Curl_raw_nequal(pattern, "xn--", 4)) { + wildcard_enabled = 0; + } + if(!wildcard_enabled) + return Curl_raw_equal(pattern, hostname) ? + CURL_HOST_MATCH : CURL_HOST_NOMATCH; + + hostname_label_end = strchr(hostname, '.'); + if(hostname_label_end == NULL || + !Curl_raw_equal(pattern_label_end, hostname_label_end)) + return CURL_HOST_NOMATCH; + + /* The wildcard must match at least one character, so the left-most + label of the hostname is at least as large as the left-most label + of the pattern. */ + if(hostname_label_end - hostname < pattern_label_end - pattern) + return CURL_HOST_NOMATCH; + + prefixlen = pattern_wildcard - pattern; + suffixlen = pattern_label_end - (pattern_wildcard+1); + return Curl_raw_nequal(pattern, hostname, prefixlen) && + Curl_raw_nequal(pattern_wildcard+1, hostname_label_end - suffixlen, + suffixlen) ? + CURL_HOST_MATCH : CURL_HOST_NOMATCH; +} + +int Curl_cert_hostcheck(const char *match_pattern, const char *hostname) +{ + if(!match_pattern || !*match_pattern || + !hostname || !*hostname) /* sanity check */ + return 0; + + if(Curl_raw_equal(hostname, match_pattern)) /* trivial case */ + return 1; + + if(hostmatch(hostname,match_pattern) == CURL_HOST_MATCH) + return 1; + return 0; +} diff --git a/probe-busybox/eperd/tls-hostcheck.h b/probe-busybox/eperd/tls-hostcheck.h new file mode 100644 index 00000000..f40bc434 --- /dev/null +++ b/probe-busybox/eperd/tls-hostcheck.h @@ -0,0 +1,30 @@ +#ifndef HEADER_CURL_HOSTCHECK_H +#define HEADER_CURL_HOSTCHECK_H +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at http://curl.haxx.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ***************************************************************************/ + +#define CURL_HOST_NOMATCH 0 +#define CURL_HOST_MATCH 1 +int Curl_cert_hostcheck(const char *match_pattern, const char *hostname); + +#endif /* HEADER_CURL_HOSTCHECK_H */ + diff --git a/probe-busybox/eperd/tls_hostname_validation.c b/probe-busybox/eperd/tls_hostname_validation.c new file mode 100644 index 00000000..163807f4 --- /dev/null +++ b/probe-busybox/eperd/tls_hostname_validation.c @@ -0,0 +1,172 @@ +/* Obtained from: https://github.com/iSECPartners/ssl-conservatory */ + +/* +Copyright (C) 2012, iSEC Partners. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + */ + +/* + * Helper functions to perform basic hostname validation using OpenSSL. + * + * Please read "everything-you-wanted-to-know-about-openssl.pdf" before + * attempting to use this code. This whitepaper describes how the code works, + * how it should be used, and what its limitations are. + * + * Author: Alban Diquet + * License: See LICENSE + * + */ + +// Get rid of OSX 10.7 and greater deprecation warnings. +#if defined(__APPLE__) && defined(__clang__) +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + +#include +#include + +#include "tls_hostname_validation.h" +#include "hostcheck.h" + +#define HOSTNAME_MAX_SIZE 255 + +/** +* Tries to find a match for hostname in the certificate's Common Name field. +* +* Returns MatchFound if a match was found. +* Returns MatchNotFound if no matches were found. +* Returns MalformedCertificate if the Common Name had a NUL character embedded in it. +* Returns Error if the Common Name could not be extracted. +*/ +static HostnameValidationResult matches_common_name(const char *hostname, const X509 *server_cert) { + int common_name_loc = -1; + X509_NAME_ENTRY *common_name_entry = NULL; + ASN1_STRING *common_name_asn1 = NULL; + char *common_name_str = NULL; + + // Find the position of the CN field in the Subject field of the certificate + common_name_loc = X509_NAME_get_index_by_NID(X509_get_subject_name((X509 *) server_cert), NID_commonName, -1); + if (common_name_loc < 0) { + return Error; + } + + // Extract the CN field + common_name_entry = X509_NAME_get_entry(X509_get_subject_name((X509 *) server_cert), common_name_loc); + if (common_name_entry == NULL) { + return Error; + } + + // Convert the CN field to a C string + common_name_asn1 = X509_NAME_ENTRY_get_data(common_name_entry); + if (common_name_asn1 == NULL) { + return Error; + } + common_name_str = (char *) ASN1_STRING_data(common_name_asn1); + + // Make sure there isn't an embedded NUL character in the CN + if ((size_t)ASN1_STRING_length(common_name_asn1) != strlen(common_name_str)) { + return MalformedCertificate; + } + + // Compare expected hostname with the CN + if (Curl_cert_hostcheck(common_name_str, hostname) == CURL_HOST_MATCH) { + return MatchFound; + } + else { + return MatchNotFound; + } +} + + +/** +* Tries to find a match for hostname in the certificate's Subject Alternative Name extension. +* +* Returns MatchFound if a match was found. +* Returns MatchNotFound if no matches were found. +* Returns MalformedCertificate if any of the hostnames had a NUL character embedded in it. +* Returns NoSANPresent if the SAN extension was not present in the certificate. +*/ +static HostnameValidationResult matches_subject_alternative_name(const char *hostname, const X509 *server_cert) { + HostnameValidationResult result = MatchNotFound; + int i; + int san_names_nb = -1; + STACK_OF(GENERAL_NAME) *san_names = NULL; + + // Try to extract the names within the SAN extension from the certificate + san_names = X509_get_ext_d2i((X509 *) server_cert, NID_subject_alt_name, NULL, NULL); + if (san_names == NULL) { + return NoSANPresent; + } + san_names_nb = sk_GENERAL_NAME_num(san_names); + + // Check each name within the extension + for (i=0; itype == GEN_DNS) { + // Current name is a DNS name, let's check it + char *dns_name = (char *) ASN1_STRING_data(current_name->d.dNSName); + + // Make sure there isn't an embedded NUL character in the DNS name + if ((size_t)ASN1_STRING_length(current_name->d.dNSName) != strlen(dns_name)) { + result = MalformedCertificate; + break; + } + else { // Compare expected hostname with the DNS name + if (Curl_cert_hostcheck(dns_name, hostname) + == CURL_HOST_MATCH) { + result = MatchFound; + break; + } + } + } + } + sk_GENERAL_NAME_pop_free(san_names, GENERAL_NAME_free); + + return result; +} + + +/** +* Validates the server's identity by looking for the expected hostname in the +* server's certificate. As described in RFC 6125, it first tries to find a match +* in the Subject Alternative Name extension. If the extension is not present in +* the certificate, it checks the Common Name instead. +* +* Returns MatchFound if a match was found. +* Returns MatchNotFound if no matches were found. +* Returns MalformedCertificate if any of the hostnames had a NUL character embedded in it. +* Returns Error if there was an error. +*/ +HostnameValidationResult validate_hostname(const char *hostname, const X509 *server_cert) { + HostnameValidationResult result; + + if((hostname == NULL) || (server_cert == NULL)) + return Error; + + // First try the Subject Alternative Names extension + result = matches_subject_alternative_name(hostname, server_cert); + if (result == NoSANPresent) { + // Extension was not found: try the Common Name + result = matches_common_name(hostname, server_cert); + } + + return result; +} diff --git a/probe-busybox/eperd/tls_hostname_validation.h b/probe-busybox/eperd/tls_hostname_validation.h new file mode 100644 index 00000000..54aa1c43 --- /dev/null +++ b/probe-busybox/eperd/tls_hostname_validation.h @@ -0,0 +1,56 @@ +/* Obtained from: https://github.com/iSECPartners/ssl-conservatory */ + +/* +Copyright (C) 2012, iSEC Partners. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + */ + +/* + * Helper functions to perform basic hostname validation using OpenSSL. + * + * Please read "everything-you-wanted-to-know-about-openssl.pdf" before + * attempting to use this code. This whitepaper describes how the code works, + * how it should be used, and what its limitations are. + * + * Author: Alban Diquet + * License: See LICENSE + * + */ + +typedef enum { + MatchFound, + MatchNotFound, + NoSANPresent, + MalformedCertificate, + Error +} HostnameValidationResult; + +/** +* Validates the server's identity by looking for the expected hostname in the +* server's certificate. As described in RFC 6125, it first tries to find a match +* in the Subject Alternative Name extension. If the extension is not present in +* the certificate, it checks the Common Name instead. +* +* Returns MatchFound if a match was found. +* Returns MatchNotFound if no matches were found. +* Returns MalformedCertificate if any of the hostnames had a NUL character embedded in it. +* Returns Error if there was an error. +*/ +HostnameValidationResult validate_hostname(const char *hostname, const X509 *server_cert); diff --git a/probe-busybox/eperd/traceroute.c b/probe-busybox/eperd/traceroute.c new file mode 100644 index 00000000..f8359505 --- /dev/null +++ b/probe-busybox/eperd/traceroute.c @@ -0,0 +1,5018 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + * traceroute.c + */ + +#include "libbb.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eperd.h" +#include "atlas_path.h" + +#define SAFE_PREFIX_REL ATLAS_DATA_NEW_REL + +#ifndef STANDALONE_BUSYBOX +#define uh_sport source +#define uh_dport dest +#define uh_ulen len +#define uh_sum check +#endif + +#define TRACEROUTE_OPT_STRING ("!46IUFrTa:b:c:f:g:i:m:p:t:w:z:A:B:O:S:H:D:R:W:") + +#define OPT_4 (1 << 0) +#define OPT_6 (1 << 1) +#define OPT_I (1 << 2) +#define OPT_U (1 << 3) +#define OPT_F (1 << 4) +#define OPT_r (1 << 5) +#define OPT_T (1 << 6) + +#define IPHDR 20 +#define ICMP6_HDR (sizeof(struct icmp6_hdr)) +#define TCP_HDR (sizeof(*tcphdr)) + +#define BASE_PORT (0x8000 + 666) +#define SRC_BASE_PORT (20480) +#define MAX_DATA_SIZE (4096) + +#define DBQ(str) "\"" #str "\"" + +#define ICMPEXT_VERSION_SHIFT 4 + +#define ICMPEXT_MPLS 1 +#define ICMPEXT_MPLS_IN 1 + +#define MPLS_LABEL_SHIFT 12 +#define MPLS_EXT_SHIFT 9 +#define MPLS_EXT_MASK 0x7 +#define MPLS_S_BIT 0x100 +#define MPLS_TTL_MASK 0xff + +#define IP6_TOS(ip6_hdr) ((ntohl((ip6_hdr)->ip6_flow) >> 20) & 0xff) + +#define RESP_PACKET 1 +#define RESP_PEERNAME 2 +#define RESP_SOCKNAME 3 +#define RESP_PROTO 4 +#define RESP_RCVDTTL 5 +#define RESP_RCVDTCLASS 6 +#define RESP_SENDTO 7 +#define RESP_ADDRINFO 8 +#define RESP_ADDRINFO_SA 9 + +struct trtbase +{ + struct event_base *event_base; + + int my_pid; + + struct trtstate **table; + int tabsiz; + + /* For standalone traceroute. Called when a traceroute instance is + * done. Just one pointer for all instances. It is up to the caller + * to keep it consistent. + */ + void (*done)(void *state, int error); + + /* Leave some space for headers. The various traceroute variations + * have to check that it fits. + */ + u_char packet[MAX_DATA_SIZE+128]; +}; + +struct trtstate +{ + /* Parameters */ + char *atlas; + char *bundle_id; + char *hostname; + char *destportstr; + char *out_filename; + char *interface; + char do_icmp; + char do_tcp; + char do_udp; + char do_v6; + char dont_fragment; + char delay_name_res; + char trtcount; + unsigned short maxpacksize; + unsigned short hbhoptsize; + unsigned short destoptsize; + unsigned char firsthop; + unsigned char maxhops; + unsigned char gaplimit; + unsigned char parismod; + unsigned char parisbase; + unsigned duptimeout; + unsigned timeout; + int tos; + + char *response_in; /* Fuzzing */ + char *response_out; + + /* Base and index in table */ + struct trtbase *base; + int index; + + struct sockaddr_in6 sin6; + socklen_t socklen; + struct sockaddr_in6 loc_sin6; + socklen_t loc_socklen; + + int sent; + uint8_t hop; + uint16_t paris; + uint16_t seq; + unsigned short curpacksize; + + int socket_icmp; /* Socket for sending and receiving + * ICMPs */ + struct event event_icmp; /* Event for this socket */ + int socket_tcp; /* Socket for sending and receiving + * raw TCP */ + struct event event_tcp; /* Event for this socket */ + + uint8_t last_response_hop; /* Hop at which we last got something + * back. + */ + unsigned done:1; /* We got something from the target + * host or a destination unreachable. + */ + unsigned not_done:1; /* Not got something else */ + unsigned lastditch:1; /* In last-ditch hop */ + unsigned busy:1; /* Busy, do not start another one */ + unsigned gotresp:1; /* Got a response to the last packet + * we sent. For dup detection. + */ + unsigned dnsip:1; /* Busy with dns name resolution */ + unsigned no_src:1; /* Did not bind yet */ + struct evutil_addrinfo *dns_res; + struct evutil_addrinfo *dns_curr; + + time_t starttime; + struct timespec xmit_time; + + struct timespec start_time; /* At the moment only for + * DNS resolution + */ + double ttr; /* Time to resolve a name, in ms */ + + struct event timer; + + unsigned long min; + unsigned long max; + unsigned long sum; + int sentpkts; + int rcvdpkts; + int duppkts; + + char *result; + size_t reslen; + size_t resmax; + char open_result; + + FILE *resp_file_out; /* Fuzzing */ +}; + +static struct trtbase *trt_base; + +struct v4_ph +{ + struct in_addr src; + struct in_addr dst; + uint8_t zero; + uint8_t proto; + uint16_t len; +}; + +struct v6_ph +{ + struct in6_addr src; + struct in6_addr dst; + uint32_t len; + uint8_t zero[3]; + uint8_t nxt; +}; + +struct v6info +{ + uint16_t fuzz; + uint32_t pid; + uint32_t id; + uint32_t seq; + struct timespec tv; +}; + +static int create_socket(struct trtstate *state, int do_tcp); +static void ready_callback4(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s); +static void ready_tcp4(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s); +static void ready_callback6(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s); +static void noreply_callback(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s); + +static int in_cksum(unsigned short *buf, int sz) +{ + int nleft = sz; + int sum = 0; + unsigned short *w = buf; + unsigned short ans = 0; + + while (nleft > 1) { + sum += *w++; + nleft -= 2; + } + + if (nleft == 1) { + *(unsigned char *) (&ans) = *(unsigned char *) w; + sum += ans; + } + + sum = (sum >> 16) + (sum & 0xFFFF); + sum += (sum >> 16); + ans = ~sum; + return ans; +} + +static int in_cksum_udp(struct v4_ph *v4_ph, struct udphdr *udp, + unsigned short *buf, int sz) +{ + int nleft = sz; + int sum = 0; + unsigned short *w = buf; + unsigned short ans = 0; + + nleft= sizeof(*v4_ph); + w= (unsigned short *)v4_ph; + while (nleft > 1) { + sum += *w++; + nleft -= 2; + } + + if (udp) + { + nleft= sizeof(*udp); + w= (unsigned short *)udp; + while (nleft > 1) { + sum += *w++; + nleft -= 2; + } + } + + nleft= sz; + w= buf; + + while (nleft > 1) { + sum += *w++; + nleft -= 2; + } + + if (nleft == 1) { + *(unsigned char *) (&ans) = *(unsigned char *) w; + sum += ans; + } + + sum = (sum >> 16) + (sum & 0xFFFF); + sum += (sum >> 16); + ans = ~sum; + return ans; +} + +static int in_cksum_icmp6(struct v6_ph *v6_ph, unsigned short *buf, int sz) +{ + int nleft = sz; + int sum = 0; + unsigned short *w = buf; + unsigned short ans = 0; + + nleft= sizeof(*v6_ph); + w= (unsigned short *)v6_ph; + while (nleft > 1) { + sum += *w++; + nleft -= 2; + } + + nleft= sz; + w= buf; + + while (nleft > 1) { + sum += *w++; + nleft -= 2; + } + + if (nleft == 1) { + *(unsigned char *) (&ans) = *(unsigned char *) w; + sum += ans; + } + + sum = (sum >> 16) + (sum & 0xFFFF); + sum += (sum >> 16); + ans = ~sum; + return ans; +} + +static void add_str(struct trtstate *state, const char *str) +{ + size_t len; + + len= strlen(str); + if (state->reslen + len+1 > state->resmax) + { + state->resmax= state->reslen + len+1 + 80; + state->result= xrealloc(state->result, state->resmax); + } + memcpy(state->result+state->reslen, str, len+1); + state->reslen += len; + //printf("add_str: result = '%s'\n", state->result); +} + +static void report(struct trtstate *state) +{ + int r; + FILE *fh; + const char *proto; + struct addrinfo *ai; + char namebuf[NI_MAXHOST]; + struct addrinfo hints; + + event_del(&state->timer); + + if (state->out_filename) + { + fh= fopen(state->out_filename, "a"); + if (!fh) + crondlog(DIE9 "traceroute: unable to append to '%s'", + state->out_filename); + } + else + fh= stdout; + + fprintf(fh, "RESULT { "); + if (state->atlas) + { + fprintf(fh, DBQ(id) ":" DBQ(%s) + ", %s" + ", " DBQ(lts) ":%d" + ", " DBQ(time) ":%llu" + ", " DBQ(endtime) ":%llu, ", + state->atlas, atlas_get_version_json_str(), + get_timesync(), + (unsigned long long)state->starttime, + (unsigned long long)atlas_time()); + if (state->bundle_id) + fprintf(fh, DBQ(bundle) ":%s, ", state->bundle_id); + } + + fprintf(fh, DBQ(dst_name) ":" DBQ(%s), + state->hostname); + + /* Check if hostname is numeric or had to be resolved */ + memset(&hints, '\0', sizeof(hints)); + hints.ai_flags= AI_NUMERICHOST; + r= getaddrinfo(state->hostname, NULL, &hints, &ai); + if (r == 0) + { + /* Getaddrinfo succeded so hostname is an address literal */ + freeaddrinfo(ai); + } + else + { + /* Assume that name resolution was required */ + fprintf(fh, ", " DBQ(ttr) ":%f", state->ttr); + } + + if (!state->dnsip) + { + getnameinfo((struct sockaddr *)&state->sin6, state->socklen, + namebuf, sizeof(namebuf), NULL, 0, NI_NUMERICHOST); + + fprintf(fh, ", " DBQ(dst_addr) ":" DBQ(%s), namebuf); + } + if (!state->dnsip && !state->no_src) + { + namebuf[0]= '\0'; + getnameinfo((struct sockaddr *)&state->loc_sin6, + state->loc_socklen, + namebuf, sizeof(namebuf), NULL, 0, NI_NUMERICHOST); + + fprintf(fh, ", " DBQ(src_addr) ":" DBQ(%s), namebuf); + } + + if (state->do_icmp) + proto= "ICMP"; + else if (state->do_tcp) + proto= "TCP"; + else + proto= "UDP"; + fprintf(fh, ", " DBQ(proto) ":" DBQ(%s) ", " DBQ(af) ": %d", + proto, + state->dnsip ? (state->do_v6 ? 6 : 4) : + (state->sin6.sin6_family == AF_INET6 ? 6 : 4)); + + fprintf(fh, ", " DBQ(size) ":%d", state->maxpacksize); + if (state->parismod) + { + fprintf(fh, ", " DBQ(paris_id) ":%d", state->paris); + } + fprintf(fh, ", " DBQ(result) ": [ %s ] }\n", state->result); + + free(state->result); + state->result= NULL; + + if (state->out_filename) + fclose(fh); + + /* Kill the event and close socket */ + if (state->socket_icmp != -1) + { + event_del(&state->event_icmp); + close(state->socket_icmp); + state->socket_icmp= -1; + } + if (state->socket_tcp != -1) + { + event_del(&state->event_tcp); + close(state->socket_tcp); + state->socket_tcp= -1; + } + + state->busy= 0; + + if (state->base->done) + state->base->done(state, 0); +} + +static int set_tos(struct trtstate *state, int sock, int af, int inner) +{ + int r; + char line[80]; + + if (!state->tos) + return 0; /* Nothing to do */ + + if (state->response_in) + return 0; /* Nothing to do */ + + if (af == AF_INET6) + { + r= setsockopt(sock, IPPROTO_IPV6, IPV6_TCLASS, &state->tos, + sizeof(state->tos)); + } + else + { + r= setsockopt(sock, IPPROTO_IP, IP_TOS, &state->tos, + sizeof(state->tos)); + } + + if (r == -1) + { + crondlog(LVL7 "setting %s failed with '%s'", + af == AF_INET6 ? "traffic class" : "ToS", + strerror(errno)); + + snprintf(line, sizeof(line), + "%s" DBQ(error) ":" + DBQ(setting %s failed) + "%s", inner ? (state->sent ? " }, { " : "{ ") : ", ", + af == AF_INET6 ? "traffic class" : "ToS", + inner ? " } ] }" : " }"); + add_str(state, line); + report(state); + return -1; + } + + return 0; +} + +static void send_pkt(struct trtstate *state) +{ + int r, hop, len, on, sock, serrno; + uint16_t sum, val; + unsigned usum; + struct trtbase *base; + struct icmp *icmp_hdr; + struct icmp6_hdr *icmp6_hdr; + struct v6info *v6info; + struct tcphdr *tcphdr; + struct v4_ph v4_ph; + struct v6_ph v6_ph; + struct udphdr udp; + struct timeval interval; + struct sockaddr_in6 sin6copy; + char line[80]; + char id[]= "http://atlas.ripe.net Atlas says Hi!"; + struct r_errno + { + int r; + int error; + } r_errno; + + state->gotresp= 0; + + base= state->base; + + if (state->sent >= state->trtcount) + { + add_str(state, " } ] }"); + if (state->hop >= state->maxhops || + (state->done && !state->not_done)) + { + /* We are done */ + if (state->resp_file_out) + { + r= 0; + write_response(state->resp_file_out, + RESP_SENDTO, sizeof(r), &r); + } + report(state); + return; + } + + state->hop++; + state->sent= 0; + state->done= 0; + state->not_done= 0; + + if (state->hop - state->last_response_hop > + state->gaplimit) + { +#if 0 + printf("gaplimit reached: %d > %d + %d\n", + state->hop, state->last_response_hop, + state->gaplimit); +#endif + if (state->lastditch) + { + /* Also done with last-ditch probe. */ + if (state->resp_file_out) + { + r= 0; + write_response(state->resp_file_out, + RESP_SENDTO, sizeof(r), &r); + } + report(state); + return; + } + state->lastditch= 1; + state->hop= 255; + } + + snprintf(line, sizeof(line), + ", { " DBQ(hop) ":%d, " DBQ(result) ": [ ", state->hop); + add_str(state, line); + state->open_result= 0; + } + state->seq++; + + gettime_mono(&state->xmit_time); + + if (state->sin6.sin6_family == AF_INET6) + { + hop= state->hop; + + if (state->do_tcp) + { + if (state->response_in) + sock= open("/dev/null", O_RDWR); + else + { + sock= socket(AF_INET6, SOCK_RAW, IPPROTO_TCP); + } + if (sock == -1) + { + crondlog(DIE9 "socket failed"); + } + + on= 1; + setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, + sizeof(on)); + +#if 1 + if (state->hbhoptsize != 0) + { + do_ipv6_option(sock, 0 /* hbh */, + state->hbhoptsize); + } + if (state->destoptsize != 0) + { + do_ipv6_option(sock, 1 /* dest */, + state->destoptsize); + } +#endif + + if (set_tos(state, sock, AF_INET6, 1 /*inner*/) == -1) + { + close(sock); + return; + } + + /* Bind to source addr/port */ + if (state->response_in) + r= 0; /* No need to bind */ + else + { + r= bind(sock, + (struct sockaddr *)&state->loc_sin6, + state->loc_socklen); + } + if (r == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(bind failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + close(sock); + return; + } + + tcphdr= (struct tcphdr *)base->packet; + memset(tcphdr, '\0', sizeof(*tcphdr)); + + len= sizeof(*tcphdr); + + tcphdr->seq= htonl((state->index) << 16 | state->seq); + tcphdr->doff= len / 4; + tcphdr->syn= 1; + + if (len+state->curpacksize > sizeof(base->packet)) + { + crondlog( + DIE9 "base->packet too small, need at least %d", + len+state->curpacksize); + } + if (state->curpacksize > 0) + { + memset(&base->packet[len], '\0', + state->curpacksize); + strcpy((char *)&base->packet[len], id); + len += state->curpacksize; + } + + { + int offset = 2; + setsockopt(sock, IPPROTO_IPV6, IPV6_CHECKSUM, + &offset, sizeof(offset)); + } + + memset(&v6_ph, '\0', sizeof(v6_ph)); + v6_ph.src= state->loc_sin6.sin6_addr; + v6_ph.dst= state->sin6.sin6_addr; + v6_ph.len= htonl(len); + v6_ph.nxt= IPPROTO_TCP; + tcphdr->source= state->loc_sin6.sin6_port; + tcphdr->dest= state->sin6.sin6_port; + tcphdr->uh_sum= 0; + + sum= in_cksum_icmp6(&v6_ph, + (unsigned short *)base->packet, len); + + tcphdr->check= sum; + + /* Set hop count */ + setsockopt(sock, SOL_IPV6, IPV6_UNICAST_HOPS, + &hop, sizeof(hop)); + + /* Set/clear don't fragment */ + on= (state->dont_fragment ? IPV6_PMTUDISC_DO : + IPV6_PMTUDISC_DONT); + setsockopt(sock, IPPROTO_IPV6, + IPV6_MTU_DISCOVER, &on, sizeof(on)); + + sin6copy= state->sin6; + sin6copy.sin6_port= 0; + if (state->response_in) + { + size_t rlen; + + rlen= sizeof(r_errno); + read_response(state->socket_icmp, RESP_SENDTO, + &rlen, &r_errno); + if (rlen != sizeof(r_errno)) + { + crondlog(DIE9 + "send_pkt: error reading r_errno from '%s'", + state->response_in); + } + r= r_errno.r; + serrno= r_errno.error; + } + else + { + r= sendto(sock, base->packet, len, 0, + (struct sockaddr *)&sin6copy, + state->socklen); + serrno= errno; + if (state->resp_file_out) + { + r_errno.r= r; + r_errno.error= serrno; + write_response(state->resp_file_out, + RESP_SENDTO, + sizeof(r_errno), &r_errno); + } + } + +#if 0 + { static int doit=1; if (doit && r != -1) + { serrno= ENOSYS; r= -1; } doit= !doit; } +#endif + close(sock); + + if (r == -1) + { + if (serrno != EACCES && + serrno != ECONNREFUSED && + serrno != EMSGSIZE) + { + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(sendto failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + return; + } + } + } + else if (state->do_icmp) + { + /* Set hop count */ + setsockopt(state->socket_icmp, SOL_IPV6, + IPV6_UNICAST_HOPS, &hop, sizeof(hop)); + + /* Set/clear don't fragment */ + on= (state->dont_fragment ? IPV6_PMTUDISC_DO : + IPV6_PMTUDISC_DONT); + setsockopt(state->socket_icmp, IPPROTO_IPV6, + IPV6_MTU_DISCOVER, &on, sizeof(on)); + + do_ipv6_option(state->socket_icmp, 0 /* hbh */, + state->hbhoptsize); + do_ipv6_option(state->socket_icmp, 1 /* dest */, + state->destoptsize); + + icmp6_hdr= (struct icmp6_hdr *)base->packet; + icmp6_hdr->icmp6_type= ICMP6_ECHO_REQUEST; + icmp6_hdr->icmp6_code= 0; + icmp6_hdr->icmp6_cksum= 0; + icmp6_hdr->icmp6_id= htons(base->my_pid); + icmp6_hdr->icmp6_seq= htons(state->seq); + + v6info= (struct v6info *)&icmp6_hdr[1]; + v6info->fuzz= 0; + v6info->pid= htonl(base->my_pid); + v6info->id= htonl(state->index); + v6info->seq= htonl(state->seq); + v6info->tv= state->xmit_time; + + len= sizeof(*v6info); + + if (state->curpacksize < len) + state->curpacksize= len; + if (ICMP6_HDR+state->curpacksize > + sizeof(base->packet)) + { + crondlog( + DIE9 "base->packet too small, need at least %d", + ICMP6_HDR+state->curpacksize); + } + + if (state->curpacksize > len) + { + memset(&base->packet[ICMP6_HDR+len], '\0', + state->curpacksize-len); + strcpy((char *)&base->packet[ICMP6_HDR+len], + id); + len= state->curpacksize; + } + + len += ICMP6_HDR; + + if (state->parismod) + { + memset(&v6_ph, '\0', sizeof(v6_ph)); + v6_ph.src= state->loc_sin6.sin6_addr; + v6_ph.dst= state->sin6.sin6_addr; + v6_ph.len= htonl(len); + v6_ph.nxt= IPPROTO_ICMPV6; + + sum= in_cksum_icmp6(&v6_ph, + (unsigned short *)base->packet, len); + + /* Avoid 0 */ + val= state->paris + 1; + + sum= ntohs(sum); + usum= sum + (0xffff - val); + sum= usum + (usum >> 16); + + v6info->fuzz= htons(sum); + + sum= in_cksum_icmp6(&v6_ph, + (unsigned short *)base->packet, len); + +#if 0 + printf( + "send_pkt: seq %d, paris %d, cksum= htons(0x%x)\n", + state->seq, state->paris, + ntohs(sum)); +#endif + } + + memset(&sin6copy, '\0', sizeof(sin6copy)); + sin6copy.sin6_family= AF_INET6; + sin6copy.sin6_addr= state->sin6.sin6_addr; + + if (state->response_in) + { + size_t rlen; + + rlen= sizeof(r_errno); + read_response(state->socket_icmp, RESP_SENDTO, + &rlen, &r_errno); + if (rlen != sizeof(r_errno)) + { + crondlog(DIE9 + "send_pkt: error reading r_errno from '%s'", + state->response_in); + } + r= r_errno.r; + serrno= r_errno.error; + } + else + { + r= sendto(state->socket_icmp, base->packet, + len, 0, (struct sockaddr *)&sin6copy, + sizeof(sin6copy)); + serrno= errno; + if (state->resp_file_out) + { + r_errno.r= r; + r_errno.error= serrno; + write_response(state->resp_file_out, + RESP_SENDTO, + sizeof(r_errno), &r_errno); + } + } + +#if 0 + { static int doit=1; if (doit && r != -1) + { serrno= ENOSYS; r= -1; } doit= !doit; } +#endif + + if (r == -1) + { + if (serrno != EMSGSIZE) + { + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(sendto failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + return; + } + } + } + else if (state->do_udp) + { + if (state->response_in) + sock= open("/dev/null", O_RDWR); + else + { + sock= socket(AF_INET6, SOCK_DGRAM, 0); + } + if (sock == -1) + { + crondlog(DIE9 "socket failed"); + } + + on= 1; + setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, + sizeof(on)); + + if (state->hbhoptsize != 0) + { + do_ipv6_option(sock, 0 /* hbh */, + state->hbhoptsize); + } + if (state->destoptsize != 0) + { + do_ipv6_option(sock, 1 /* dest */, + state->destoptsize); + } + + if (set_tos(state, sock, AF_INET6, 1 /*inner*/) == -1) + { + close(sock); + return; + } + + /* Bind to source addr/port */ + if (state->response_in) + r= 0; /* No need to bind */ + else + { + r= bind(sock, + (struct sockaddr *)&state->loc_sin6, + state->loc_socklen); + } + if (r == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(bind failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + close(sock); + return; + } + + /* Set port */ + if (state->parismod) + { + state->sin6.sin6_port= htons(BASE_PORT + + state->paris); + } + else + { + state->sin6.sin6_port= htons(BASE_PORT + + state->seq); + } + + /* Set hop count */ + setsockopt(sock, SOL_IPV6, IPV6_UNICAST_HOPS, + &hop, sizeof(hop)); + + /* Set/clear don't fragment */ + on= (state->dont_fragment ? IPV6_PMTUDISC_DO : + IPV6_PMTUDISC_DONT); + setsockopt(sock, IPPROTO_IPV6, + IPV6_MTU_DISCOVER, &on, sizeof(on)); + + v6info= (struct v6info *)base->packet; + v6info->fuzz= 0; + v6info->pid= htonl(base->my_pid); + v6info->id= htonl(state->index); + v6info->seq= htonl(state->seq); + v6info->tv= state->xmit_time; + +#if 0 + printf( +"send_pkt: IPv6 UDP: pid = htonl(%d), id = htonl(%d), seq = htonl(%d)\n", + ntohl(v6info->pid), + ntohl(v6info->id), + ntohl(v6info->seq)); +#endif + + len= sizeof(*v6info); + + if (state->curpacksize < len) + state->curpacksize= len; + if (state->curpacksize > len) + { + memset(&base->packet[len], '\0', + state->curpacksize-len); + strcpy((char *)&base->packet[len], id); + len= state->curpacksize; + } + + if (state->response_in) + { + size_t rlen; + + rlen= sizeof(r_errno); + read_response(state->socket_icmp, RESP_SENDTO, + &rlen, &r_errno); + if (rlen != sizeof(r_errno)) + { + crondlog(DIE9 + "send_pkt: error reading r_errno from '%s'", + state->response_in); + } + r= r_errno.r; + serrno= r_errno.error; + } + else + { + r= sendto(sock, base->packet, len, 0, + (struct sockaddr *)&state->sin6, + state->socklen); + serrno= errno; + if (state->resp_file_out) + { + r_errno.r= r; + r_errno.error= serrno; + write_response(state->resp_file_out, + RESP_SENDTO, + sizeof(r_errno), &r_errno); + } + } + +#if 0 + { static int doit=1; if (doit && r != -1) + { serrno= ENOSYS; r= -1; } doit= !doit; } +#endif + close(sock); + + if (r == -1) + { + if (serrno != EACCES && + serrno != ECONNREFUSED && + serrno != EMSGSIZE) + { + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(sendto failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + return; + } + } + } + } + else + { +#if 0 + printf( +"send_pkt: sending IPv4 packet, do_icmp %d, parismod %d, index %d, state %p\n", + state->do_icmp, state->parismod, state->index, state); +#endif + + if (state->do_tcp) + { + if (state->response_in) + sock= open("/dev/null", O_RDWR); + else + { + sock= socket(AF_INET, SOCK_RAW, IPPROTO_TCP); + } + if (sock == -1) + { + crondlog(DIE9 "socket failed"); + } + + on= 1; + setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, + sizeof(on)); + + if (set_tos(state, sock, AF_INET, 1 /*inner*/) == -1) + { + close(sock); + return; + } + + /* Bind to source addr/port */ + if (state->response_in) + r= 0; + else + { + r= bind(sock, + (struct sockaddr *)&state->loc_sin6, + state->loc_socklen); + } +#if 0 + { static int doit=1; if (doit && r != -1) + { errno= ENOSYS; r= -1; } doit= !doit; } +#endif + if (r == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(bind failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + close(sock); + return; + } + + hop= state->hop; + + tcphdr= (struct tcphdr *)base->packet; + memset(tcphdr, '\0', sizeof(*tcphdr)); + + len= sizeof(*tcphdr); + + tcphdr->seq= htonl((state->index) << 16 | state->seq); + tcphdr->doff= len / 4; + tcphdr->syn= 1; + + if (len+state->curpacksize > sizeof(base->packet)) + { + crondlog( + DIE9 "base->packet too small, need at least %d", + len+state->curpacksize); + } + if (state->curpacksize > 0) + { + memset(&base->packet[len], '\0', + state->curpacksize); + strcpy((char *)&base->packet[len], id); + len += state->curpacksize; + } + + v4_ph.src= ((struct sockaddr_in *)&state->loc_sin6)-> + sin_addr; + v4_ph.dst= ((struct sockaddr_in *)&state->sin6)-> + sin_addr; + v4_ph.zero= 0; + v4_ph.proto= IPPROTO_TCP; + v4_ph.len= htons(len); + tcphdr->source= + ((struct sockaddr_in *)&state->loc_sin6)-> + sin_port; + tcphdr->dest= ((struct sockaddr_in *)&state->sin6)-> + sin_port; + tcphdr->uh_sum= 0; + + sum= in_cksum_udp(&v4_ph, NULL, + (unsigned short *)base->packet, len); + + tcphdr->check= sum; + +#if 0 + if (state->parismod) + { + /* Make sure that the sequence number ends + * up in the checksum field. We can't store + * 0. So we add 1. + */ + if (state->seq == 0) + state->seq++; + val= state->seq; + } + else + { + /* Use id+1 */ + val= state->index+1; + } + + sum= ntohs(sum); + usum= sum + (0xffff - val); + sum= usum + (usum >> 16); + + base->packet[0]= sum >> 8; + base->packet[1]= sum; + + sum= in_cksum_udp(&udp_ph, &udp, + (unsigned short *)base->packet, len); +#endif + + /* Set hop count */ + setsockopt(sock, IPPROTO_IP, IP_TTL, + &hop, sizeof(hop)); + + /* Set/clear don't fragment */ + on= (state->dont_fragment ? IP_PMTUDISC_DO : + IP_PMTUDISC_DONT); + setsockopt(sock, IPPROTO_IP, + IP_MTU_DISCOVER, &on, sizeof(on)); + + if (state->response_in) + { + size_t rlen; + + rlen= sizeof(r_errno); + read_response(state->socket_icmp, RESP_SENDTO, + &rlen, &r_errno); + if (rlen != sizeof(r_errno)) + { + crondlog(DIE9 + "send_pkt: error reading r_errno from '%s'", + state->response_in); + } + r= r_errno.r; + serrno= r_errno.error; + } + else + { + r= sendto(sock, base->packet, len, 0, + (struct sockaddr *)&state->sin6, + state->socklen); + serrno= errno; + if (state->resp_file_out) + { + r_errno.r= r; + r_errno.error= serrno; + write_response(state->resp_file_out, + RESP_SENDTO, + sizeof(r_errno), &r_errno); + } + } + +#if 0 + { static int doit=0; if (doit && r != -1) + { serrno= ENOSYS; r= -1; } doit= !doit; } +#endif + + close(sock); + if (r == -1) + { + if (serrno != EMSGSIZE) + { + serrno= errno; + + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(sendto failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + return; + } + } + } + else if (state->do_icmp) + { + hop= state->hop; + + icmp_hdr= (struct icmp *)base->packet; + icmp_hdr->icmp_type= ICMP_ECHO; + icmp_hdr->icmp_code= 0; + icmp_hdr->icmp_cksum= 0; + icmp_hdr->icmp_id= htons(state->index | + (instance_id << TRT_ICMP4_INSTANCE_ID_SHIFT)); + icmp_hdr->icmp_seq= htons(state->seq); + icmp_hdr->icmp_data[0]= '\0'; + icmp_hdr->icmp_data[1]= '\0'; + + len= offsetof(struct icmp, icmp_data[2]); + + /* currpacksize is the amount of data after the + * ICMP header. len is the minimal amount of data + * including the ICMP header. Later len becomes + * the packet size including ICMP header. + */ + if (ICMP_MINLEN+state->curpacksize < len) + state->curpacksize= len-ICMP_MINLEN; + if (ICMP_MINLEN+state->curpacksize > + sizeof(base->packet)) + { + crondlog( + DIE9 "base->packet too small, need at least %d", + ICMP_MINLEN+state->curpacksize); + } + if (ICMP_MINLEN+state->curpacksize > len) + { + memset(&base->packet[len], '\0', + ICMP_MINLEN+state->curpacksize-len); + strcpy((char *)&base->packet[len], id); + len= ICMP_MINLEN+state->curpacksize; + } + + if (state->parismod) + { + sum= in_cksum((unsigned short *)icmp_hdr, len); + + sum= ntohs(sum); + usum= sum + (0xffff - state->paris); + sum= usum + (usum >> 16); + + icmp_hdr->icmp_data[0]= sum >> 8; + icmp_hdr->icmp_data[1]= sum; + } + + icmp_hdr->icmp_cksum= + in_cksum((unsigned short *)icmp_hdr, len); + +#if 0 + printf( + "send_pkt: seq %d, paris %d, icmp_cksum= htons(%d)\n", + state->seq, state->paris, + ntohs(icmp_hdr->icmp_cksum)); +#endif + + /* Set hop count */ + setsockopt(state->socket_icmp, IPPROTO_IP, IP_TTL, + &hop, sizeof(hop)); + + /* Set/clear don't fragment */ + on= (state->dont_fragment ? IP_PMTUDISC_DO : + IP_PMTUDISC_DONT); + setsockopt(state->socket_icmp, IPPROTO_IP, + IP_MTU_DISCOVER, &on, sizeof(on)); + + if (state->response_in) + { + size_t rlen; + + rlen= sizeof(r_errno); + read_response(state->socket_icmp, RESP_SENDTO, + &rlen, &r_errno); + if (rlen != sizeof(r_errno)) + { + crondlog(DIE9 + "send_pkt: error reading r_errno from '%s'", + state->response_in); + } + r= r_errno.r; + serrno= r_errno.error; + } + else + { + r= sendto(state->socket_icmp, base->packet, + len, 0, + (struct sockaddr *)&state->sin6, + state->socklen); + serrno= errno; + if (state->resp_file_out) + { + r_errno.r= r; + r_errno.error= serrno; + write_response(state->resp_file_out, + RESP_SENDTO, + sizeof(r_errno), &r_errno); + } + } + +#if 0 + { static int doit=1; if (doit && r != -1) + { serrno= ENOSYS; r= -1; } doit= !doit; } +#endif + + if (r == -1) + { + if (serrno != EMSGSIZE) + { + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(sendto failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + return; + } + } + } + else if (state->do_udp) + { + sock= socket(AF_INET, SOCK_DGRAM, 0); + if (sock == -1) + { + crondlog(DIE9 "socket failed"); + } + + on= 1; + setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, + sizeof(on)); + + if (set_tos(state, sock, AF_INET, 1 /*inner*/) == -1) + { + close(sock); + return; + } + + /* Bind to source addr/port */ + if (state->response_in) + r= 0; + else + { + r= bind(sock, + (struct sockaddr *)&state->loc_sin6, + state->loc_socklen); + } +#if 0 + { static int doit=1; if (doit && r != -1) + { errno= ENOSYS; r= -1; } doit= !doit; } +#endif + if (r == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(bind failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + close(sock); + return; + } + + hop= state->hop; + + /* Set port */ + if (state->parismod) + { + ((struct sockaddr_in *)&state->sin6)->sin_port= + htons(BASE_PORT + state->paris); + } + else + { + ((struct sockaddr_in *)&state->sin6)->sin_port= + htons(BASE_PORT + state->seq); + } + + base->packet[0]= '\0'; + base->packet[1]= '\0'; + len= 2; /* We need to fudge checksum */ + + if (state->curpacksize < len) + state->curpacksize= len; + if (state->curpacksize > len) + { + memset(&base->packet[len], '\0', + state->curpacksize-len); + strcpy((char *)&base->packet[len], id); + len= state->curpacksize; + } + + v4_ph.src= ((struct sockaddr_in *)&state->loc_sin6)-> + sin_addr; + v4_ph.dst= ((struct sockaddr_in *)&state->sin6)-> + sin_addr; + v4_ph.zero= 0; + v4_ph.proto= IPPROTO_UDP; + v4_ph.len= htons(sizeof(udp)+len); + udp.uh_sport= + ((struct sockaddr_in *)&state->loc_sin6)-> + sin_port; + udp.uh_dport= ((struct sockaddr_in *)&state->sin6)-> + sin_port; + udp.uh_ulen= v4_ph.len; + udp.uh_sum= 0; + + sum= in_cksum_udp(&v4_ph, &udp, + (unsigned short *)base->packet, len); + + if (state->parismod) + { + /* Make sure that the sequence number ends + * up in the checksum field. We can't store + * 0. So we add 1. + */ + if (state->seq == 0) + state->seq++; + val= state->seq; + } + else + { + /* Use id+1 */ + val= state->index+1; + } + + sum= ntohs(sum); + usum= sum + (0xffff - val); + sum= usum + (usum >> 16); + + base->packet[0]= sum >> 8; + base->packet[1]= sum; + + sum= in_cksum_udp(&v4_ph, &udp, + (unsigned short *)base->packet, len); + + /* Set hop count */ + setsockopt(sock, IPPROTO_IP, IP_TTL, + &hop, sizeof(hop)); + + /* Set/clear don't fragment */ + on= (state->dont_fragment ? IP_PMTUDISC_DO : + IP_PMTUDISC_DONT); + setsockopt(sock, IPPROTO_IP, + IP_MTU_DISCOVER, &on, sizeof(on)); + + if (state->response_in) + { + size_t rlen; + + rlen= sizeof(r_errno); + read_response(state->socket_icmp, RESP_SENDTO, + &rlen, &r_errno); + if (rlen != sizeof(r_errno)) + { + crondlog(DIE9 + "send_pkt: error reading r_errno from '%s'", + state->response_in); + } + r= r_errno.r; + serrno= r_errno.error; + } + else + { + r= sendto(sock, base->packet, len, 0, + (struct sockaddr *)&state->sin6, + state->socklen); + serrno= errno; + if (state->resp_file_out) + { + r_errno.r= r; + r_errno.error= serrno; + write_response(state->resp_file_out, + RESP_SENDTO, + sizeof(r_errno), &r_errno); + } + } + +#if 0 + { static int doit=0; if (doit && r != -1) + { errno= ENOSYS; r= -1; } doit= !doit; } +#endif + close(sock); + if (r == -1) + { + if (serrno != EMSGSIZE) + { + serrno= errno; + + snprintf(line, sizeof(line), + "%s{ " DBQ(error) ":" DBQ(sendto failed: %s) " } ] }", + state->sent ? " }, " : "", + strerror(serrno)); + add_str(state, line); + report(state); + return; + } + } + } + } + + if (state->open_result) + add_str(state, " }, "); + add_str(state, "{ "); + state->open_result= 0; + + /* Increment packets sent */ + state->sent++; + + /* Set timer */ + interval.tv_sec= state->timeout/1000000; + interval.tv_usec= state->timeout % 1000000; + evtimer_add(&state->timer, &interval); + + if (state->response_in) + { + if (state->sin6.sin6_family == AF_INET6) + ready_callback6(0, 0, state); + else + ready_callback4(0, 0, state); + } +} + +static void do_mpls(struct trtstate *state, unsigned char *packet, + size_t size) +{ + int o, exp, s, ttl; + uint32_t v, label; + char line[256]; + + add_str(state, ", " DBQ(mpls) ": ["); + + for (o= 0; o+4 <= size; o += 4) + { + v= (ntohl(*(uint32_t *)&packet[o])); + label= (v >> MPLS_LABEL_SHIFT); + exp= ((v >> MPLS_EXT_SHIFT) & MPLS_EXT_MASK); + s= !!(v & MPLS_S_BIT); + ttl= (v & MPLS_TTL_MASK); + + snprintf(line, sizeof(line), "%s { " DBQ(label) ":%d, " + DBQ(exp) ":%d, " DBQ(s) ":%d, " DBQ(ttl) ":%d }", + o == 0 ? "" : ",", + label, exp, s, ttl); + add_str(state, line); + } + + add_str(state, " ]"); +} + +static void do_icmp_multi(struct trtstate *state, + unsigned char *packet, size_t size, int pre_rfc4884) +{ + int o, len; + uint16_t cksum; + uint8_t class, ctype, version; + char line[256]; + + if (size < 4) + { + printf("do_icmp_multi: not enough for ICMP extension header\n"); + return; + } + cksum= in_cksum((unsigned short *)packet, size); + if (cksum != 0) + { + /* There is also an option for a zero checksum. */ + if (!pre_rfc4884) + { +#if 0 + printf("do_icmp_multi: bad checksum\n"); +#endif + } + return; + } + + version= (*(uint8_t *)packet >> ICMPEXT_VERSION_SHIFT); + + snprintf(line, sizeof(line), ", " DBQ(icmpext) ": { " + DBQ(version) ":%d" ", " DBQ(rfc4884) ":%d", + version, !pre_rfc4884); + add_str(state, line); + + add_str(state, ", " DBQ(obj) ": ["); + + o= 4; + while (o+4 < size) + { + len= ntohs(*(uint16_t *)&packet[o]); + class= packet[o+2]; + ctype= packet[o+3]; + + snprintf(line, sizeof(line), "%s { " DBQ(class) ":%d, " + DBQ(type) ":%d", + o == 4 ? "" : ",", class, ctype); + add_str(state, line); + + if (len < 4 || o+len > size) + { + add_str(state, " }"); +#if 0 + printf("do_icmp_multi: bad len %d\n", len); +#endif + break; + } + if (class == ICMPEXT_MPLS && ctype == ICMPEXT_MPLS_IN) + do_mpls(state, packet+o+4, len-4); + o += len; + + add_str(state, " }"); + } + + add_str(state, " ] }"); +} + +static void ready_callback4(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + struct trtbase *base; + struct trtstate *state; + int hlen, ehlen, ind, nextmtu, late, isDup, icmp_prefixlen, offset; + unsigned seq, srcport; + ssize_t nrecv; + socklen_t slen; + struct ip *ip, *eip; + struct icmp *icmp, *eicmp; + struct tcphdr *etcp; + struct udphdr *eudp; + double ms; + struct timespec now; + struct timeval interval; + struct sockaddr_in remote; + char line[80]; + + state= s; + base= state->base; + + if (state->response_in) + { + int type; + uint8_t proto; + size_t len; + + peek_response(state->socket_icmp, &type); + if (type == RESP_SENDTO) + { + send_pkt(s); + return; + } + + /* Get proto before getting the time. The reason is that + * When creating the output file we directly go to + * ready_tcp4. + */ + + len= sizeof(proto); + read_response(state->socket_icmp, RESP_PROTO, + &len, &proto); + if (len != sizeof(proto)) + { + crondlog(DIE9 + "ready_callback4: error reading proto from '%s'", + state->response_in); + } + + if (proto == 0) + { + noreply_callback(0, 0, state); + return; /* Timeout */ + } + if (proto == 6) + { + ready_tcp4(0, 0, s); + return; + } + if (proto != 1) + { + fprintf(stderr, "ready_callback4: proto != 1\n"); + return; + } + } + + gettime_mono(&now); + + slen= sizeof(remote); + if (state->response_in) + { + size_t len; + + len= sizeof(base->packet); + read_response(state->socket_icmp, RESP_PACKET, + &len, base->packet); + nrecv= len; + + len= sizeof(remote); + read_response(state->socket_icmp, RESP_PEERNAME, + &len, &remote); + if (len != sizeof(remote)) + { + crondlog(DIE9 + "ready_callback4: error reading remote from '%s'", + state->response_in); + } + } + else + { + nrecv= recvfrom(state->socket_icmp, base->packet, sizeof(base->packet), + MSG_DONTWAIT, (struct sockaddr *)&remote, &slen); + } + if (nrecv == -1) + { + /* Strange, read error */ + printf("ready_callback4: read error '%s'\n", strerror(errno)); + return; + } + // printf("ready_callback4: got packet\n"); + + if (state->resp_file_out) + { + uint8_t proto= 1; + + write_response(state->resp_file_out, RESP_PROTO, + sizeof(proto), &proto); + write_response(state->resp_file_out, RESP_PACKET, + nrecv, base->packet); + write_response(state->resp_file_out, RESP_PEERNAME, + sizeof(remote), &remote); + } + + ip= (struct ip *)base->packet; + hlen= ip->ip_hl*4; + + if (nrecv < hlen + ICMP_MINLEN || ip->ip_hl < 5) + { + /* Short packet */ + printf("ready_callback4: too short %d\n", (int)nrecv); + return; + } + + icmp= (struct icmp *)(base->packet+hlen); + + if (icmp->icmp_type == ICMP_TIME_EXCEEDED || + icmp->icmp_type == ICMP_DEST_UNREACH) + { + eip= &icmp->icmp_ip; + ehlen= eip->ip_hl*4; + + /* Make sure the packet we have is big enough */ + if (nrecv < hlen + ICMP_MINLEN + ehlen || eip->ip_hl < 5) + { + printf("ready_callback4: too short %d\n", (int)nrecv); + return; + } + + if (eip->ip_p == IPPROTO_TCP) + { + /* Now check if there is also a TCP header in the + * packet + */ + if (nrecv < hlen + ICMP_MINLEN + ehlen + 8) + { + printf("ready_callback4: too short %d\n", + (int)nrecv); + return; + } + + /* ICMP only guarantees 8 bytes! */ + etcp= (struct tcphdr *)((char *)eip+ehlen); + + /* Quick check if the source port is in range */ + srcport= ntohs(etcp->source); + if (srcport < SRC_BASE_PORT || + srcport >= SRC_BASE_PORT+base->tabsiz) + { +#if 0 + printf( + "ready_callback4: unknown TCP port in ICMP: %d\n", srcport); +#endif + return; /* Not for us */ + } + + /* We store the id in high order 16 bits of the + * sequence number + */ + ind= ntohl(etcp->seq) >> 16; + + if (ind != state->index) + state= NULL; + if (state && state->sin6.sin6_family != AF_INET) + state= NULL; + if (state && !state->do_tcp) + state= NULL; + + if (!state) + { + /* Nothing here */ + printf( + "ready_callback4: no state for ind %d\n", + ind); + return; + } + +#if 0 + printf("ready_callback4: from %s", + inet_ntoa(remote.sin_addr)); + printf(" for %s hop %d\n", + inet_ntoa(((struct sockaddr_in *) + &state->sin6)->sin_addr), state->hop); +#endif + + if (!state->busy) + { +#if 0 + printf( + "ready_callback4: index (%d) is not busy\n", + ind); +#endif + return; + } + + late= 0; + isDup= 0; + + /* Sequence number is in seq field */ + seq= ntohl(etcp->seq) & 0xffff; + + if (state->open_result) + add_str(state, " }, { "); + + if (seq != state->seq) + { + if (seq > state->seq) + { +#if 0 + printf( + "ready_callback4: mismatch for seq, got 0x%x, expected 0x%x (for %s)\n", + seq, state->seq, + state->hostname); +#endif + return; + } + late= 1; + + snprintf(line, sizeof(line), DBQ(late) ":%d", + state->seq-seq); + add_str(state, line); + } + else if (state->gotresp) + { + isDup= 1; + add_str(state, " " DBQ(dup) ":true"); + } + + if (!late && !isDup) + state->last_response_hop= state->hop; + + ms= (now.tv_sec-state->xmit_time.tv_sec)*1000 + + (now.tv_nsec-state->xmit_time.tv_nsec)/1e6; + + snprintf(line, sizeof(line), + "%s" DBQ(from) ":" DBQ(%s), + (late || isDup) ? ", " : "", + inet_ntoa(remote.sin_addr)); + add_str(state, line); + snprintf(line, sizeof(line), + ", " DBQ(ttl) ":%d, " DBQ(size) ":%d", + ip->ip_ttl, (int)nrecv - IPHDR - ICMP_MINLEN); + add_str(state, line); + if (!late) + { + snprintf(line, sizeof(line), + ", " DBQ(rtt) ":%.3f", ms); + add_str(state, line); + } + + if (eip->ip_ttl != 1) + { + snprintf(line, sizeof(line), + ", " DBQ(ittl) ":%d", eip->ip_ttl); + add_str(state, line); + } + if (eip->ip_tos != 0 || state->tos != 0) + { + snprintf(line, sizeof(line), + ", " DBQ(itos) ":%d", eip->ip_tos); + add_str(state, line); + } + + if (memcmp(&eip->ip_src, + &((struct sockaddr_in *)&state->loc_sin6)-> + sin_addr, sizeof(eip->ip_src)) != 0) + { + printf("ready_callback4: changed source %s\n", + inet_ntoa(eip->ip_src)); + printf("ready_callback4: expected %s\n", + inet_ntoa(((struct sockaddr_in *)&state->loc_sin6)->sin_addr)); + } + if (memcmp(&eip->ip_dst, + &((struct sockaddr_in *)&state->sin6)-> + sin_addr, sizeof(eip->ip_dst)) != 0) + { + snprintf(line, sizeof(line), + ", " DBQ(edst) ":" DBQ(%s), + inet_ntoa(eip->ip_dst)); + add_str(state, line); + } + if (memcmp(&ip->ip_dst, + &((struct sockaddr_in *)&state->loc_sin6)-> + sin_addr, sizeof(eip->ip_src)) != 0) + { + printf("ready_callback4: weird destination %s\n", + inet_ntoa(ip->ip_dst)); + } + +#if 0 + printf("ready_callback4: from %s, ttl %d", + inet_ntoa(remote.sin_addr), ip->ip_ttl); + printf(" for %s hop %d\n", + inet_ntoa(((struct sockaddr_in *) + &state->sin6)->sin_addr), state->hop); +#endif + + if (icmp->icmp_type == ICMP_TIME_EXCEEDED) + { + if (!late) + state->not_done= 1; + } + else if (icmp->icmp_type == ICMP_DEST_UNREACH) + { + if (!late) + state->done= 1; + switch(icmp->icmp_code) + { + case ICMP_UNREACH_NET: + add_str(state, + ", " DBQ(err) ":" DBQ(N)); + break; + case ICMP_UNREACH_HOST: + add_str(state, + ", " DBQ(err) ":" DBQ(H)); + break; + case ICMP_UNREACH_PROTOCOL: + add_str(state, + ", " DBQ(err) ":" DBQ(P)); + break; + case ICMP_UNREACH_PORT: + break; + case ICMP_UNREACH_NEEDFRAG: + nextmtu= ntohs(icmp->icmp_nextmtu); + snprintf(line, sizeof(line), + ", " DBQ(mtu) ":%d", + nextmtu); + add_str(state, line); + if (!late && nextmtu >= sizeof(*ip)+ + sizeof(*etcp)) + { + nextmtu -= sizeof(*ip)+ + sizeof(*etcp); + if (nextmtu < + state->curpacksize) + { + state->curpacksize= + nextmtu; + } + } + if (!late) + state->not_done= 1; + break; + case ICMP_UNREACH_FILTER_PROHIB: + add_str(state, + ", " DBQ(err) ":" DBQ(A)); + break; + default: + snprintf(line, sizeof(line), + ", " DBQ(err) ":%d", + icmp->icmp_code); + add_str(state, line); + break; + } + } + } + else if (eip->ip_p == IPPROTO_UDP) + { + /* Now check if there is also a UDP header in the + * packet + */ + if (nrecv < hlen + ICMP_MINLEN + ehlen + sizeof(*eudp)) + { + printf("ready_callback4: too short %d\n", + (int)nrecv); + return; + } + + eudp= (struct udphdr *)((char *)eip+ehlen); + + /* We store the id in the source port. + */ + ind= ntohs(eudp->uh_sport) - SRC_BASE_PORT; + + if (ind != state->index) + state= NULL; + if (state && state->sin6.sin6_family != AF_INET) + state= NULL; + if (state && state->do_icmp) + state= NULL; + + if (!state) + { + /* Nothing here */ + // printf("ready_callback4: no state\n"); + return; + } + +#if 0 + printf("ready_callback4: from %s", + inet_ntoa(remote.sin_addr)); + printf(" for %s hop %d\n", + inet_ntoa(((struct sockaddr_in *) + &state->sin6)->sin_addr), state->hop); +#endif + + if (!state->busy) + { +#if 0 + printf( + "ready_callback4: index (%d) is not busy\n", + ind); +#endif + return; + } + + late= 0; + isDup= 0; + if (state->parismod) + { + /* Sequence number is in checksum field */ + seq= ntohs(eudp->uh_sum); + + /* Unfortunately, cheap home routers may + * forget to restore the checksum field + * when they are doing NAT. Ignore the + * sequence number if it seems wrong. + */ + if (seq > state->seq) + seq= state->seq; + } + else + { + /* Sequence number is in destination field */ + seq= ntohs(eudp->uh_dport)-BASE_PORT; + } + + if (state->open_result) + add_str(state, " }, { "); + + if (seq != state->seq) + { + if (seq > state->seq) + { +#if 0 + printf( + "ready_callback4: mismatch for seq, got 0x%x, expected 0x%x (for %s)\n", + seq, state->seq, + state->hostname); +#endif + return; + } + late= 1; + + snprintf(line, sizeof(line), DBQ(late) ":%d", + state->seq-seq); + add_str(state, line); + } + else if (state->gotresp) + { + isDup= 1; + add_str(state, " " DBQ(dup) ":true"); + } + + if (!late && !isDup) + state->last_response_hop= state->hop; + + ms= (now.tv_sec-state->xmit_time.tv_sec)*1000 + + (now.tv_nsec-state->xmit_time.tv_nsec)/1e6; + + snprintf(line, sizeof(line), + "%s" DBQ(from) ":" DBQ(%s), + (late || isDup) ? ", " : "", + inet_ntoa(remote.sin_addr)); + add_str(state, line); + snprintf(line, sizeof(line), + ", " DBQ(ttl) ":%d, " DBQ(size) ":%d", + ip->ip_ttl, (int)nrecv-IPHDR-ICMP_MINLEN); + add_str(state, line); + if (!late) + { + snprintf(line, sizeof(line), + ", " DBQ(rtt) ":%.3f", ms); + add_str(state, line); + } + if (eip->ip_ttl != 1) + { + snprintf(line, sizeof(line), + ", " DBQ(ittl) ":%d", eip->ip_ttl); + add_str(state, line); + } + if (eip->ip_tos != 0 || state->tos != 0) + { + snprintf(line, sizeof(line), + ", " DBQ(itos) ":%d", eip->ip_tos); + add_str(state, line); + } + + if (memcmp(&eip->ip_src, + &((struct sockaddr_in *)&state->loc_sin6)-> + sin_addr, sizeof(eip->ip_src)) != 0) + { + printf("ready_callback4: changed source %s\n", + inet_ntoa(eip->ip_src)); + } + if (memcmp(&eip->ip_dst, + &((struct sockaddr_in *)&state->sin6)-> + sin_addr, sizeof(eip->ip_dst)) != 0) + { + snprintf(line, sizeof(line), + ", " DBQ(edst) ":" DBQ(%s), + inet_ntoa(eip->ip_dst)); + add_str(state, line); + } + if (memcmp(&ip->ip_dst, + &((struct sockaddr_in *)&state->loc_sin6)-> + sin_addr, sizeof(eip->ip_src)) != 0) + { + printf("ready_callback4: weird destination %s\n", + inet_ntoa(ip->ip_dst)); + } + +#if 0 + printf("ready_callback4: from %s, ttl %d", + inet_ntoa(remote.sin_addr), ip->ip_ttl); + printf(" for %s hop %d\n", + inet_ntoa(((struct sockaddr_in *) + &state->sin6)->sin_addr), state->hop); +#endif + + if (icmp->icmp_type == ICMP_TIME_EXCEEDED) + { + if (!late) + state->not_done= 1; + } + else if (icmp->icmp_type == ICMP_DEST_UNREACH) + { + if (!late) + state->done= 1; + switch(icmp->icmp_code) + { + case ICMP_UNREACH_NET: + add_str(state, + ", " DBQ(err) ":" DBQ(N)); + break; + case ICMP_UNREACH_HOST: + add_str(state, + ", " DBQ(err) ":" DBQ(H)); + break; + case ICMP_UNREACH_PROTOCOL: + add_str(state, + ", " DBQ(err) ":" DBQ(P)); + break; + case ICMP_UNREACH_PORT: + break; + case ICMP_UNREACH_NEEDFRAG: + nextmtu= ntohs(icmp->icmp_nextmtu); + snprintf(line, sizeof(line), + ", " DBQ(mtu) ":%d", nextmtu); + add_str(state, line); + if (!late && nextmtu >= sizeof(*ip)+ + sizeof(*eudp)) + { + nextmtu -= sizeof(*ip)+ + sizeof(*eudp); + if (nextmtu < + state->curpacksize) + { + state->curpacksize= + nextmtu; + } + } + if (!late) + state->not_done= 1; + break; + case ICMP_UNREACH_FILTER_PROHIB: + add_str(state, + ", " DBQ(err) ":" DBQ(A)); + break; + default: + snprintf(line, sizeof(line), + ", " DBQ(err) ":%d", + icmp->icmp_code); + add_str(state, line); + break; + } + } + } + else if (eip->ip_p == IPPROTO_ICMP) + { + /* Now check if there is also an ICMP header in the + * packet + */ + if (nrecv < hlen + ICMP_MINLEN + ehlen + + offsetof(struct icmp, icmp_data[0])) + { + printf("ready_callback4: too short %d\n", + (int)nrecv); + return; + } + + eicmp= (struct icmp *)((char *)eip+ehlen); + + if (eicmp->icmp_type != ICMP_ECHO || + eicmp->icmp_code != 0) + { + printf("ready_callback4: not ECHO\n"); + return; + } + + ind= ntohs(eicmp->icmp_id); + if ((ind >> TRT_ICMP4_INSTANCE_ID_SHIFT) != instance_id) + { + // printf("wrong instance id\n"); + return; + } + ind &= ~TRT_ICMP4_INSTANCE_ID_MASK; + + if (ind >= base->tabsiz) + { + /* Out of range */ +#if 0 + printf( + "ready_callback4: index out of range (%d)\n", + ind); +#endif + return; + } + + if (ind != state->index) + { + /* Nothing here */ +#if 0 + printf( + "ready_callback4: nothing at index (%d)\n", + ind); +#endif + return; + } + + if (state->sin6.sin6_family != AF_INET) + { + // printf("ready_callback4: bad family\n"); + return; + } + + if (!state->do_icmp) + { + printf( + "ready_callback4: index (%d) is not doing ICMP\n", + ind); + return; + } + if (!state->busy) + { + printf( + "ready_callback4: index (%d) is not busy\n", + ind); + return; + } + + if (state->parismod && + ntohs(eicmp->icmp_cksum) != state->paris) + { + printf( + "ready_callback4: mismatch for paris, got 0x%x, expected 0x%x (%s)\n", + ntohs(eicmp->icmp_cksum), + state->paris, state->hostname); + } + + if (state->open_result) + add_str(state, " }, { "); + + late= 0; + isDup= 0; + seq= ntohs(eicmp->icmp_seq); + if (seq != state->seq) + { + if (seq > state->seq) + { +#if 0 + printf( + "ready_callback4: mismatch for seq, got 0x%x, expected 0x%x (for %s)\n", + seq, state->seq, + state->hostname); +#endif + return; + } + late= 1; + + snprintf(line, sizeof(line), DBQ(late) ":%d", + state->seq-seq); + add_str(state, line); + } + else if (state->gotresp) + { + isDup= 1; + add_str(state, DBQ(dup) ":true"); + } + + if (!late && !isDup) + state->last_response_hop= state->hop; + + ms= (now.tv_sec-state->xmit_time.tv_sec)*1000 + + (now.tv_nsec-state->xmit_time.tv_nsec)/1e6; + + snprintf(line, sizeof(line), "%s" DBQ(from) ":" DBQ(%s), + (late || isDup) ? ", " : "", + inet_ntoa(remote.sin_addr)); + add_str(state, line); + snprintf(line, sizeof(line), + ", " DBQ(ttl) ":%d, " DBQ(size) ":%d", + ip->ip_ttl, (int)nrecv-IPHDR-ICMP_MINLEN); + add_str(state, line); + if (!late) + { + snprintf(line, sizeof(line), + ", " DBQ(rtt) ":%.3f", ms); + add_str(state, line); + } + + if (eip->ip_ttl != 1) + { + snprintf(line, sizeof(line), + ", " DBQ(ittl) ":%d", eip->ip_ttl); + add_str(state, line); + } + if (eip->ip_tos != 0 || state->tos != 0) + { + snprintf(line, sizeof(line), + ", " DBQ(itos) ":%d", eip->ip_tos); + add_str(state, line); + } + + if (memcmp(&eip->ip_src, + &((struct sockaddr_in *)&state->loc_sin6)-> + sin_addr, sizeof(eip->ip_src)) != 0) + { + printf("ready_callback4: changed source %s\n", + inet_ntoa(eip->ip_src)); + printf("ready_callback4: expected %s\n", + inet_ntoa(((struct sockaddr_in *)&state->loc_sin6)->sin_addr)); + } + if (memcmp(&eip->ip_dst, + &((struct sockaddr_in *)&state->sin6)-> + sin_addr, sizeof(eip->ip_dst)) != 0) + { + snprintf(line, sizeof(line), + ", " DBQ(edst) ":" DBQ(%s), + inet_ntoa(eip->ip_dst)); + add_str(state, line); + } + if (memcmp(&ip->ip_dst, + &((struct sockaddr_in *)&state->loc_sin6)-> + sin_addr, sizeof(eip->ip_src)) != 0) + { + printf("ready_callback4: weird destination %s\n", + inet_ntoa(ip->ip_dst)); + } + +#if 0 + printf("ready_callback4: from %s, ttl %d", + inet_ntoa(remote.sin_addr), ip->ip_ttl); + printf(" for %s hop %d\n", + inet_ntoa(((struct sockaddr_in *) + &state->sin6)->sin_addr), state->hop); +#endif + + if (icmp->icmp_type == ICMP_TIME_EXCEEDED) + { + if (!late && !isDup) + state->not_done= 1; + } + else if (icmp->icmp_type == ICMP_DEST_UNREACH) + { + if (!late) + state->done= 1; + switch(icmp->icmp_code) + { + case ICMP_UNREACH_NET: + add_str(state, + ", " DBQ(err) ":" DBQ(N)); + break; + case ICMP_UNREACH_HOST: + add_str(state, + ", " DBQ(err) ":" DBQ(H)); + break; + case ICMP_UNREACH_PROTOCOL: + add_str(state, + ", " DBQ(err) ":" DBQ(P)); + break; + case ICMP_UNREACH_PORT: + add_str(state, + ", " DBQ(err) ":" DBQ(p)); + break; + case ICMP_UNREACH_NEEDFRAG: + nextmtu= ntohs(icmp->icmp_nextmtu); + snprintf(line, sizeof(line), + ", " DBQ(mtu) ":%d", + nextmtu); + add_str(state, line); + if (!late && nextmtu >= sizeof(*ip) + + ICMP_MINLEN) + { + nextmtu -= sizeof(*ip) + + ICMP_MINLEN; + if (nextmtu < + state->curpacksize) + { + state->curpacksize= + nextmtu; + } + } + if (!late) + state->not_done= 1; + break; + case ICMP_UNREACH_FILTER_PROHIB: + add_str(state, + ", " DBQ(err) ":" DBQ(A)); + break; + default: + snprintf(line, sizeof(line), + ", " DBQ(err) ":%d", + icmp->icmp_code); + add_str(state, line); + break; + } + } + else + { + printf("imcp type %d\n", icmp->icmp_type); + } + } + else + { + printf("ready_callback4: not TCP, UDP or ICMP (%d\n", + eip->ip_p); + return; + } + + /* RFC-4884, Multi-Part ICMP messages */ + icmp_prefixlen= (ntohs(icmp->icmp_pmvoid) & 0xff) * 4; + if (icmp_prefixlen != 0) + { + +#if 0 + printf("icmp_pmvoid: 0x%x for %s\n", icmp->icmp_pmvoid, state->hostname); + printf("icmp_prefixlen: 0x%x for %s\n", icmp_prefixlen, inet_ntoa(remote.sin_addr)); +#endif + offset= hlen + ICMP_MINLEN + icmp_prefixlen; + if (nrecv > offset) + { + do_icmp_multi(state, base->packet+offset, + nrecv-offset, 0 /*!pre_rfc4884*/); + } + else + { +#if 0 + printf( + "ready_callback4: too short %d (Multi-Part ICMP)\n", + (int)nrecv); +#endif + } + } + else if (nrecv > hlen + ICMP_MINLEN + 128) + { + /* Try old style extensions */ + icmp_prefixlen= 128; + offset= hlen + ICMP_MINLEN + icmp_prefixlen; + if (nrecv > offset) + { + do_icmp_multi(state, base->packet+offset, + nrecv-offset, 1 /*pre_rfc4884*/); + } + else + { + printf( + "ready_callback4: too short %d (Multi-Part ICMP)\n", + (int)nrecv); + } + } + + state->open_result= 1; + + if (!late && !isDup) + { + if (state->duptimeout) + { + state->gotresp= 1; + interval.tv_sec= state->duptimeout/1000000; + interval.tv_usec= state->duptimeout % 1000000; + evtimer_add(&state->timer, &interval); + } + else + { + send_pkt(state); + } + } + } + else if (icmp->icmp_type == ICMP_ECHOREPLY) + { + if (icmp->icmp_code != 0) + { + printf("ready_callback4: not proper ECHO REPLY\n"); + return; + } + + ind= ntohs(icmp->icmp_id); + if ((ind >> TRT_ICMP4_INSTANCE_ID_SHIFT) != instance_id) + { + // printf("wrong instance id\n"); + return; + } + ind &= ~TRT_ICMP4_INSTANCE_ID_MASK; + + if (ind >= base->tabsiz) + { + /* Out of range */ +#if 0 + printf( + "ready_callback4: index out of range (%d)\n", + ind); +#endif + return; + } + + if (ind != state->index) + { + /* Nothing here */ +#if 0 + printf( + "ready_callback4: nothing at index (%d)\n", + ind); +#endif + return; + } + + if (state->sin6.sin6_family != AF_INET) + { + // printf("ready_callback4: bad family\n"); + return; + } + + if (!state->busy) + { + printf( + "ready_callback4: index (%d) is not busy\n", + ind); + return; + } + + if (state->open_result) + add_str(state, " }, { "); + + late= 0; + isDup= 0; + seq= ntohs(icmp->icmp_seq); + if (seq != state->seq) + { + if (seq > state->seq) + { +#if 0 + printf( +"ready_callback4: mismatch for seq, got 0x%x, expected 0x%x, for %s\n", + seq, state->seq, state->hostname); +#endif + return; + } + late= 1; + + snprintf(line, sizeof(line), DBQ(late) ":%d", + state->seq-seq); + add_str(state, line); + } + else if (state->gotresp) + { + isDup= 1; + add_str(state, DBQ(dup) ":true"); + } + + if (memcmp(&ip->ip_dst, + &((struct sockaddr_in *)&state->loc_sin6)-> + sin_addr, sizeof(eip->ip_src)) != 0) + { + printf("ready_callback4: weird destination %s\n", + inet_ntoa(ip->ip_dst)); + } + + ms= (now.tv_sec-state->xmit_time.tv_sec)*1000 + + (now.tv_nsec-state->xmit_time.tv_nsec)/1e6; + + snprintf(line, sizeof(line), "%s" DBQ(from) ":" DBQ(%s), + (late || isDup) ? ", " : "", + inet_ntoa(remote.sin_addr)); + add_str(state, line); + snprintf(line, sizeof(line), + ", " DBQ(ttl) ":%d, " DBQ(size) ":%d", + ip->ip_ttl, (int)nrecv - IPHDR - ICMP_MINLEN); + add_str(state, line); + if (ip->ip_tos != 0 || state->tos != 0) + { + snprintf(line, sizeof(line), ", " DBQ(tos) ":%d", + ip->ip_tos); + add_str(state, line); + } + if (!late) + { + snprintf(line, sizeof(line), ", " DBQ(rtt) ":%.3f", ms); + add_str(state, line); + } + +#if 0 + printf("ready_callback4: from %s, ttl %d", + inet_ntoa(remote.sin_addr), ip->ip_ttl); + printf(" for %s hop %d\n", + inet_ntoa(((struct sockaddr_in *) + &state->sin6)->sin_addr), state->hop); +#endif + + /* Done */ + state->done= 1; + + state->open_result= 1; + + if (!late && !isDup) + { + if (state->duptimeout) + { + state->gotresp= 1; + interval.tv_sec= state->duptimeout/1000000; + interval.tv_usec= state->duptimeout % 1000000; + evtimer_add(&state->timer, &interval); + } + else + { + send_pkt(state); + } + } + + return; + } + else if (icmp->icmp_type == ICMP_ECHO || + icmp->icmp_type == ICMP_ROUTERADVERT) + { + /* No need to do anything */ + } + else + { + printf("ready_callback4: got type %d\n", icmp->icmp_type); + return; + } +} + +static void report_hdropts(struct trtstate *state, unsigned char *s, + unsigned char *e) +{ + int o, len, mss; + unsigned char *orig_s; + char line[80]; + + add_str(state, ", " DBQ(hdropts) ": [ "); + orig_s= s; + while (s e) + { + printf("report_hdropts: bad option\n"); + break; + } + mss= (s[2] << 8) | s[3]; + snprintf(line, sizeof(line), + "%s{ " DBQ(mss) ":%d }", + s != orig_s ? ", " : "", mss); + add_str(state, line); + s += len; + continue; + default: + snprintf(line, sizeof(line), + "%s{ " DBQ(unknown-opt) ":%d }", + s != orig_s ? ", " : "", o); + add_str(state, line); + break; + } + break; + } + add_str(state, " ]"); +} + +static void ready_tcp4(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + uint16_t myport; + socklen_t slen; + int hlen, late, isDup, tcp_hlen; + unsigned ind, seq; + ssize_t nrecv; + struct trtbase *base; + struct trtstate *state; + struct ip *ip; + double ms; + struct tcphdr *tcphdr; + unsigned char *e, *p; + struct sockaddr_in remote; + struct timespec now; + struct timeval interval; + char line[80]; + + gettime_mono(&now); + + state= s; + base= state->base; + + slen= sizeof(remote); + + if (state->response_in) + { + size_t len; + + /* Proto is eaten by ready_callback4 */ + len= sizeof(base->packet); + read_response(state->socket_icmp, RESP_PACKET, + &len, base->packet); + nrecv= len; + + len= sizeof(remote); + read_response(state->socket_icmp, RESP_PEERNAME, + &len, &remote); + if (len != sizeof(remote)) + { + crondlog(DIE9 + "ready_tcp4: error reading remote from '%s'", + state->response_in); + } + } + else + { + nrecv= recvfrom(state->socket_tcp, base->packet, + sizeof(base->packet), MSG_DONTWAIT, + (struct sockaddr *)&remote, &slen); + } + if (nrecv == -1) + { + /* Strange, read error */ + printf("ready_tcp4: read error '%s'\n", strerror(errno)); + return; + } + + if (state->resp_file_out) + { + uint8_t proto= 6; + + write_response(state->resp_file_out, RESP_PROTO, + sizeof(proto), &proto); + write_response(state->resp_file_out, RESP_PACKET, + nrecv, base->packet); + write_response(state->resp_file_out, RESP_PEERNAME, + sizeof(remote), &remote); + } + + ip= (struct ip *)base->packet; + hlen= ip->ip_hl*4; + + if (nrecv < hlen + sizeof(*tcphdr) || ip->ip_hl < 5) + { + /* Short packet */ + printf("ready_tcp4: too short %d\n", (int)nrecv); + return; + } + + tcphdr= (struct tcphdr *)(base->packet+hlen); + + tcp_hlen= tcphdr->doff * 4; + if (nrecv < hlen + tcp_hlen || tcphdr->doff < 5) + { + /* Short packet */ + printf("ready_tcp4: too short %d\n", (int)nrecv); + return; + } + + /* Quick check if the port is in range */ + myport= ntohs(tcphdr->dest); + if (myport < SRC_BASE_PORT || myport >= SRC_BASE_PORT+base->tabsiz) + { + return; /* Not for us */ + } + + /* We store the id in high order 16 bits of the sequence number */ + ind= ntohl(tcphdr->ack_seq) >> 16; + + if (ind != state->index) + state= NULL; + if (state && state->sin6.sin6_family != AF_INET) + state= NULL; + if (state && !state->do_tcp) + state= NULL; + + if (!state) + { + /* Nothing here */ + printf("ready_tcp4: no state for index %d\n", ind); + return; + } + + if (!state->busy) + { + printf( + "ready_callback4: index (%d) is not busy\n", + ind); + return; + } + + late= 0; + isDup= 0; + + if (state->open_result) + add_str(state, " }, { "); + + /* Only check if the ack is without 64k of what we expect */ + seq= ntohl(tcphdr->ack_seq) & 0xffff; + if (seq-state->seq > 0x2000) + { + if (seq > state->seq) + { +#if 0 + printf( +"ready_callback4: mismatch for seq, got 0x%x, expected 0x%x, for %s\n", + seq, state->seq, state->hostname); +#endif + return; + } + late= 1; + + snprintf(line, sizeof(line), DBQ(late) ":%d", + state->seq-seq); + add_str(state, line); + } + else if (state->gotresp) + { + isDup= 1; + add_str(state, DBQ(dup) ":true"); + } + + ms= (now.tv_sec-state->xmit_time.tv_sec)*1000 + + (now.tv_nsec-state->xmit_time.tv_nsec)/1e6; + + snprintf(line, sizeof(line), "%s" DBQ(from) ":" DBQ(%s), + (late || isDup) ? ", " : "", + inet_ntoa(remote.sin_addr)); + add_str(state, line); + snprintf(line, sizeof(line), ", " DBQ(ttl) ":%d, " DBQ(size) ":%d", + ip->ip_ttl, (int)(nrecv - IPHDR - sizeof(*tcphdr))); + add_str(state, line); + if (ip->ip_tos != 0 || state->tos != 0) + { + snprintf(line, sizeof(line), ", " DBQ(tos) ":%d", ip->ip_tos); + add_str(state, line); + } + snprintf(line, sizeof(line), ", " DBQ(flags) ":" DBQ(%s%s%s%s%s%s), + (tcphdr->fin ? "F" : ""), + (tcphdr->syn ? "S" : ""), + (tcphdr->rst ? "R" : ""), + (tcphdr->psh ? "P" : ""), + (tcphdr->ack ? "A" : ""), + (tcphdr->urg ? "U" : "")); + add_str(state, line); + + if (tcp_hlen > sizeof(*tcphdr)) + { + p= (unsigned char *)&tcphdr[1]; + e= ((unsigned char *)tcphdr) + tcp_hlen; + report_hdropts(state, p, e); + } + + if (!late) + { + snprintf(line, sizeof(line), ", " DBQ(rtt) ":%.3f", ms); + add_str(state, line); + } + +#if 0 + printf("ready_callback4: from %s, ttl %d", + inet_ntoa(remote.sin_addr), ip->ip_ttl); + printf(" for %s hop %d\n", + inet_ntoa(((struct sockaddr_in *) + &state->sin6)->sin_addr), state->hop); +#endif + + /* Done */ + state->done= 1; + + state->open_result= 1; + + if (!late && !isDup) + { + if (state->duptimeout) + { + state->gotresp= 1; + interval.tv_sec= state->duptimeout/1000000; + interval.tv_usec= state->duptimeout % 1000000; + evtimer_add(&state->timer, &interval); + } + else + { + send_pkt(state); + } + } + + return; +} + +static void ready_tcp6(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + uint16_t myport; + int late, isDup, rcvdttl, rcvdtclass, tcp_hlen; + unsigned ind, seq; + ssize_t nrecv; + struct trtbase *base; + struct trtstate *state; + double ms; + unsigned char *e, *p; + struct tcphdr *tcphdr; + struct cmsghdr *cmsgptr; + struct msghdr msg; + struct iovec iov[1]; + struct sockaddr_in6 remote; + struct in6_addr dstaddr; + struct timespec now; + struct timeval interval; + char buf[INET6_ADDRSTRLEN]; + char line[80]; + char cmsgbuf[256]; + + gettime_mono(&now); + + state= s; + base= state->base; + + iov[0].iov_base= base->packet; + iov[0].iov_len= sizeof(base->packet); + msg.msg_name= &remote; + msg.msg_namelen= sizeof(remote); + msg.msg_iov= iov; + msg.msg_iovlen= 1; + msg.msg_control= cmsgbuf; + msg.msg_controllen= sizeof(cmsgbuf); + msg.msg_flags= 0; /* Not really needed */ + + if (state->response_in) + { + size_t len; + + /* Proto is eaten by ready_callback6 */ + len= sizeof(base->packet); + read_response(state->socket_icmp, RESP_PACKET, + &len, base->packet); + nrecv= len; + + len= sizeof(remote); + read_response(state->socket_icmp, RESP_PEERNAME, + &len, &remote); + if (len != sizeof(remote)) + { + crondlog(DIE9 + "ready_tcp6: error reading remote from '%s'", + state->response_in); + } + } + else + nrecv= recvmsg(state->socket_tcp, &msg, MSG_DONTWAIT); + if (nrecv == -1) + { + /* Strange, read error */ + printf("ready_tcp6: read error '%s'\n", strerror(errno)); + return; + } + + if (state->resp_file_out) + { + uint8_t proto= 6; + + write_response(state->resp_file_out, RESP_PROTO, + sizeof(proto), &proto); + write_response(state->resp_file_out, RESP_PACKET, + nrecv, base->packet); + write_response(state->resp_file_out, RESP_PEERNAME, + sizeof(remote), &remote); + } + + rcvdttl= -42; /* To spot problems */ + rcvdtclass= -42; /* To spot problems */ + memset(&dstaddr, '\0', sizeof(dstaddr)); + for (cmsgptr= CMSG_FIRSTHDR(&msg); cmsgptr; + cmsgptr= CMSG_NXTHDR(&msg, cmsgptr)) + { + if (cmsgptr->cmsg_len == 0) + break; /* Can this happen? */ + if (cmsgptr->cmsg_level == IPPROTO_IPV6 && + cmsgptr->cmsg_type == IPV6_HOPLIMIT) + { + rcvdttl= *(int *)CMSG_DATA(cmsgptr); + } + if (cmsgptr->cmsg_level == IPPROTO_IPV6 && + cmsgptr->cmsg_type == IPV6_PKTINFO) + { + dstaddr= ((struct in6_pktinfo *) + CMSG_DATA(cmsgptr))->ipi6_addr; + } + if (cmsgptr->cmsg_level == IPPROTO_IPV6 && + cmsgptr->cmsg_type == IPV6_TCLASS) + { + rcvdtclass= *(int *)CMSG_DATA(cmsgptr); + } + } + + if (state->response_in) + { + size_t len; + + len= sizeof(rcvdttl); + read_response(state->socket_icmp, RESP_RCVDTTL, + &len, &rcvdttl); + if (len != sizeof(rcvdttl)) + { + crondlog(DIE9 + "ready_tcp6: error reading ttl from '%s'", + state->response_in); + } + len= sizeof(rcvdtclass); + read_response(state->socket_icmp, RESP_RCVDTCLASS, + &len, &rcvdtclass); + if (len != sizeof(rcvdtclass)) + { + crondlog(DIE9 + "ready_tcp6: error reading traffic class from '%s'", + state->response_in); + } + } + if (state->response_out) + { + write_response(state->resp_file_out, RESP_RCVDTTL, + sizeof(rcvdttl), &rcvdttl); + write_response(state->resp_file_out, RESP_RCVDTCLASS, + sizeof(rcvdtclass), &rcvdtclass); + } + + tcphdr= (struct tcphdr *)(base->packet); + + tcp_hlen= tcphdr->doff * 4; + if (nrecv < tcp_hlen || tcphdr->doff < 5) + { + /* Short packet */ + printf("ready_tcp6: too short %d\n", (int)nrecv); + return; + } + + /* Quick check if the port is in range */ + myport= ntohs(tcphdr->dest); + if (myport < SRC_BASE_PORT || myport >= SRC_BASE_PORT+base->tabsiz) + { + return; /* Not for us */ + } + + /* We store the id in high order 16 bits of the sequence number */ + ind= ntohl(tcphdr->ack_seq) >> 16; + + if (ind != state->index) + state= NULL; + if (state && state->sin6.sin6_family != AF_INET6) + state= NULL; + if (state && !state->do_tcp) + state= NULL; + + if (!state) + { + /* Nothing here */ + printf("ready_tcp6: no state for index %d\n", ind); + return; + } + + if (!state->busy) + { + printf("ready_tcp6: index (%d) is not busy\n", ind); + return; + } + + late= 0; + isDup= 0; + + if (state->open_result) + add_str(state, " }, { "); + + /* Only check if the ack is within 64k of what we expect */ + seq= ntohl(tcphdr->ack_seq) & 0xffff; + if (seq-state->seq > 0x2000) + { + if (seq > state->seq) + { +#if 0 + printf( +"ready_tcp6: mismatch for seq, got 0x%x, expected 0x%x, for %s\n", + seq, state->seq, state->hostname); +#endif + return; + } + late= 1; + + snprintf(line, sizeof(line), DBQ(late) ":%d", + state->seq-seq); + add_str(state, line); + } + else if (state->gotresp) + { + isDup= 1; + add_str(state, DBQ(dup) ":true"); + } + + ms= (now.tv_sec-state->xmit_time.tv_sec)*1000 + + (now.tv_nsec-state->xmit_time.tv_nsec)/1e6; + + snprintf(line, sizeof(line), "%s" DBQ(from) ":" DBQ(%s), + (late || isDup) ? ", " : "", + inet_ntop(AF_INET6, &remote.sin6_addr, buf, sizeof(buf))); + add_str(state, line); + snprintf(line, sizeof(line), ", " DBQ(ttl) ":%d, " DBQ(size) ":%d", + rcvdttl, (int)(nrecv - sizeof(*tcphdr))); + add_str(state, line); + if (rcvdtclass != 0 || state->tos != 0) + { + snprintf(line, sizeof(line), ", " DBQ(tos) ":%d", + rcvdtclass); + add_str(state, line); + } + snprintf(line, sizeof(line), ", " DBQ(flags) ":" DBQ(%s%s%s%s%s%s), + (tcphdr->fin ? "F" : ""), + (tcphdr->syn ? "S" : ""), + (tcphdr->rst ? "R" : ""), + (tcphdr->psh ? "P" : ""), + (tcphdr->ack ? "A" : ""), + (tcphdr->urg ? "U" : "")); + add_str(state, line); + + if (tcp_hlen > sizeof(*tcphdr)) + { + p= (unsigned char *)&tcphdr[1]; + e= ((unsigned char *)tcphdr) + tcp_hlen; + report_hdropts(state, p, e); + } + + if (!late) + { + snprintf(line, sizeof(line), ", " DBQ(rtt) ":%.3f", ms); + add_str(state, line); + } + +#if 0 + printf("ready_tcp6: from %s, ttl %d", + inet_ntoa(remote.sin_addr), ip->ip_ttl); + printf(" for %s hop %d\n", + inet_ntoa(((struct sockaddr_in *) + &state->sin6)->sin_addr), state->hop); +#endif + + /* Done */ + state->done= 1; + + state->open_result= 1; + + if (!late && !isDup) + { + if (state->duptimeout) + { + state->gotresp= 1; + interval.tv_sec= state->duptimeout/1000000; + interval.tv_usec= state->duptimeout % 1000000; + evtimer_add(&state->timer, &interval); + } + else + { + send_pkt(state); + } + } + + return; +} + +static void ready_callback6(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + ssize_t nrecv; + int ind, rcvdttl, late, isDup, nxt, icmp_prefixlen, offset, rcvdtclass; + unsigned nextmtu, seq, optlen, hbhoptsize, dstoptsize; + size_t v6info_siz, siz; + struct trtbase *base; + struct trtstate *state; + struct ip6_hdr *eip; + struct ip6_frag *frag; + struct ip6_ext *opthdr; + struct icmp6_hdr *icmp, *eicmp; + struct tcphdr *etcp; + struct udphdr *eudp; + struct v6info *v6info; + struct cmsghdr *cmsgptr; + void *ptr; + double ms= -42; /* lint, to spot problems */ + struct timespec now; + struct sockaddr_in6 remote; + struct in6_addr dstaddr; + struct msghdr msg; + struct iovec iov[1]; + struct timeval interval; + char buf[INET6_ADDRSTRLEN]; + char line[80]; + char cmsgbuf[256]; + + state= s; + base= state->base; + + if (state->response_in) + { + int type; + uint8_t proto; + size_t len; + + peek_response(state->socket_icmp, &type); + if (type == RESP_SENDTO) + { + send_pkt(s); + return; + } + + /* Get proto before we get the time because at response_out + * we don't get here when a TCP packet arrives. + */ + len= sizeof(proto); + read_response(state->socket_icmp, RESP_PROTO, + &len, &proto); + if (len != sizeof(proto)) + { + crondlog(DIE9 + "ready_callback6: error reading proto from '%s'", + state->response_in); + } + + if (proto == 0) + { + noreply_callback(0, 0, state); + return; /* Timeout */ + } + if (proto == 6) + { + ready_tcp6(0, 0, s); + return; + } + if (proto != 1) + { + fprintf(stderr, "ready_callback6: proto != 1\n"); + return; + } + } + + gettime_mono(&now); + + iov[0].iov_base= base->packet; + iov[0].iov_len= sizeof(base->packet); + msg.msg_name= &remote; + msg.msg_namelen= sizeof(remote); + msg.msg_iov= iov; + msg.msg_iovlen= 1; + msg.msg_control= cmsgbuf; + msg.msg_controllen= sizeof(cmsgbuf); + msg.msg_flags= 0; /* Not really needed */ + + /* Receive data from the network */ + if (state->response_in) + { + size_t len; + + len= sizeof(base->packet); + read_response(state->socket_icmp, RESP_PACKET, + &len, base->packet); + nrecv= len; + + len= sizeof(remote); + read_response(state->socket_icmp, RESP_PEERNAME, + &len, &remote); + if (len != sizeof(remote)) + { + crondlog(DIE9 + "ready_callback6: error reading remote from '%s'", + state->response_in); + } + + + /* Do not try to fuzz the cmsgbuf. We assume stuff returned by + * the kernel can be trusted. + */ + memset(cmsgbuf, '\0', sizeof(cmsgbuf)); + } + else + nrecv= recvmsg(state->socket_icmp, &msg, MSG_DONTWAIT); + if (nrecv == -1) + { + /* Strange, read error */ + fprintf(stderr, "ready_callback6: read error '%s'\n", + strerror(errno)); + return; + } + + if (state->response_out) + { + uint8_t proto= 1; + + write_response(state->resp_file_out, RESP_PROTO, + sizeof(proto), &proto); + write_response(state->resp_file_out, RESP_PACKET, + nrecv, base->packet); + write_response(state->resp_file_out, RESP_PEERNAME, + sizeof(remote), &remote); + } + + rcvdttl= -42; /* To spot problems */ + rcvdtclass= -42; /* To spot problems */ + memset(&dstaddr, '\0', sizeof(dstaddr)); + for (cmsgptr= CMSG_FIRSTHDR(&msg); cmsgptr; + cmsgptr= CMSG_NXTHDR(&msg, cmsgptr)) + { + if (cmsgptr->cmsg_len == 0) + break; /* Can this happen? */ + if (cmsgptr->cmsg_level == IPPROTO_IPV6 && + cmsgptr->cmsg_type == IPV6_HOPLIMIT) + { + rcvdttl= *(int *)CMSG_DATA(cmsgptr); + } + if (cmsgptr->cmsg_level == IPPROTO_IPV6 && + cmsgptr->cmsg_type == IPV6_PKTINFO) + { + dstaddr= ((struct in6_pktinfo *) + CMSG_DATA(cmsgptr))->ipi6_addr; + } + if (cmsgptr->cmsg_level == IPPROTO_IPV6 && + cmsgptr->cmsg_type == IPV6_TCLASS) + { + rcvdtclass= *(int *)CMSG_DATA(cmsgptr); + } + } + + if (state->response_in) + { + size_t len; + + len= sizeof(rcvdttl); + read_response(state->socket_icmp, RESP_RCVDTTL, + &len, &rcvdttl); + if (len != sizeof(rcvdttl)) + { + crondlog(DIE9 + "ready_callback6: error reading ttl from '%s'", + state->response_in); + } + len= sizeof(rcvdtclass); + read_response(state->socket_icmp, RESP_RCVDTCLASS, + &len, &rcvdtclass); + if (len != sizeof(rcvdtclass)) + { + crondlog(DIE9 + "ready_callback6: error reading traffic class from '%s'", + state->response_in); + } + } + if (state->response_out) + { + write_response(state->resp_file_out, RESP_RCVDTTL, + sizeof(rcvdttl), &rcvdttl); + write_response(state->resp_file_out, RESP_RCVDTCLASS, + sizeof(rcvdtclass), &rcvdtclass); + } + + if (nrecv < sizeof(*icmp)) + { + /* Short packet */ +#if 0 + fprintf(stderr, "ready_callback6: too short %d (icmp)\n", + (int)nrecv); +#endif + return; + } + + icmp= (struct icmp6_hdr *)&base->packet; + + hbhoptsize= 0; + dstoptsize= 0; + if (icmp->icmp6_type == ICMP6_DST_UNREACH || + icmp->icmp6_type == ICMP6_PACKET_TOO_BIG || + icmp->icmp6_type == ICMP6_TIME_EXCEEDED) + { + eip= (struct ip6_hdr *)&icmp[1]; + + /* Make sure the packet we have is big enough */ + if (nrecv < sizeof(*icmp) + sizeof(*eip)) + { +#if 0 + fprintf(stderr, + "ready_callback6: too short %d (icmp_ip)\n", + (int)nrecv); +#endif + return; + } + + /* Make sure we have TCP, UDP, ICMP, a fragment header or + * an options header */ + if (eip->ip6_nxt == IPPROTO_FRAGMENT || + eip->ip6_nxt == IPPROTO_HOPOPTS || + eip->ip6_nxt == IPPROTO_DSTOPTS || + eip->ip6_nxt == IPPROTO_TCP || + eip->ip6_nxt == IPPROTO_UDP || + eip->ip6_nxt == IPPROTO_ICMPV6) + { + frag= NULL; + nxt= eip->ip6_nxt; + ptr= &eip[1]; + if (nxt == IPPROTO_HOPOPTS) + { + /* Make sure the options header is completely + * there. + */ + offset= (u_char *)ptr - base->packet; + if (offset + sizeof(*opthdr) > nrecv) + { +#if 0 + fprintf(stderr, + "ready_callback6: too short %d (HOPOPTS)\n", + (int)nrecv); +#endif + return; + } + opthdr= (struct ip6_ext *)ptr; + hbhoptsize= 8*opthdr->ip6e_len; + optlen= hbhoptsize+8; + if (offset + optlen > nrecv) + { + /* Does not contain the full header */ + return; + } + nxt= opthdr->ip6e_nxt; + ptr= ((char *)opthdr)+optlen; + } + if (nxt == IPPROTO_FRAGMENT) + { + /* Make sure the fragment header is completely + * there. + */ + offset= (u_char *)ptr - base->packet; + if (offset + sizeof(*frag) > nrecv) + { +#if 0 + fprintf(stderr, + "ready_callback6: too short %d (FRAGMENT)\n", + (int)nrecv); +#endif + return; + } + frag= (struct ip6_frag *)ptr; + if ((ntohs(frag->ip6f_offlg) & ~3) != 0) + { + /* Not first fragment, just ignore + * it. + */ + if (state->response_in) + { + /* Try again for the next + * packet + */ + ready_callback6(0, 0, state); + } + return; + } + nxt= frag->ip6f_nxt; + ptr= &frag[1]; + } + if (nxt == IPPROTO_DSTOPTS) + { + /* Make sure the options header is completely + * there. + */ + offset= (u_char *)ptr - base->packet; + if (offset + sizeof(*opthdr) > nrecv) + { +#if 0 + printf( + "ready_callback6: too short %d (DSTOPTS)\n", + (int)nrecv); +#endif + return; + } + opthdr= (struct ip6_ext *)ptr; + dstoptsize= 8*opthdr->ip6e_len; + optlen= dstoptsize+8; + if (offset + optlen > nrecv) + { + /* Does not contain the full header */ +#if 0 + printf( + "ready_callback6: too short %d (full DSTOPTS)\n", + (int)nrecv); +#endif + return; + } + nxt= opthdr->ip6e_nxt; + ptr= ((char *)opthdr)+optlen; + } + + v6info_siz= sizeof(*v6info); + if (nxt == IPPROTO_TCP) + { + siz= sizeof(*etcp); + v6info_siz= 0; + } + else if (nxt == IPPROTO_UDP) + siz= sizeof(*eudp); + else + siz= sizeof(*eicmp); + + /* Now check if there is also a header in the + * packet. + */ + offset= (u_char *)ptr - base->packet; + if (offset + siz + v6info_siz > nrecv) + { +#if 0 + printf( + "ready_callback6: too short %d (all) from %s\n", + (int)nrecv, inet_ntop(AF_INET6, + &remote.sin6_addr, buf, sizeof(buf))); +#endif + return; + } + + etcp= NULL; + eudp= NULL; + eicmp= NULL; + v6info= NULL; + if (nxt == IPPROTO_TCP) + { + etcp= (struct tcphdr *)ptr; + } + else if (nxt == IPPROTO_UDP) + { + eudp= (struct udphdr *)ptr; + v6info= (struct v6info *)&eudp[1]; + } + else + { + eicmp= (struct icmp6_hdr *)ptr; + v6info= (struct v6info *)&eicmp[1]; + } + +#if 0 + if (v6info) + { + printf( +"ready_callback6: pid = htonl(%d), id = htonl(%d), seq = htonl(%d)\n", + ntohl(v6info->pid), + ntohl(v6info->id), + ntohl(v6info->seq)); + } +#endif + + if (etcp) + { + /* We store the id in high order 16 bits of the + * sequence number + */ + ind= ntohl(etcp->seq) >> 16; + } + else + { + if (ntohl(v6info->pid) != base->my_pid) + { + /* From a different process */ + return; + } + + ind= ntohl(v6info->id); + } + + if (ind != state->index) + state= NULL; + + if (state && state->sin6.sin6_family != AF_INET6) + state= NULL; + + if (state) + { + if ((etcp && !state->do_tcp) || + (eudp && !state->do_udp) || + (eicmp && !state->do_icmp)) + { + state= NULL; + } + } + + if (!state) + { + /* Nothing here */ + return; + } + +#if 0 + printf("ready_callback6: from %s", + inet_ntop(AF_INET6, &remote.sin6_addr, + buf, sizeof(buf))); + printf(" for %s hop %d\n", + inet_ntop(AF_INET6, &state->sin6.sin6_addr, + buf, sizeof(buf)), state->hop); +#endif + + if (!state->busy) + { + printf( + "ready_callback6: index (%d) is not busy\n", + ind); + return; + } + + late= 0; + isDup= 0; + if (etcp) + { + /* Sequence number is in seq field */ + seq= ntohl(etcp->seq) & 0xffff; + } + else + seq= ntohl(v6info->seq); + + if (state->open_result) + add_str(state, " }, { "); + + if (seq != state->seq) + { + if (seq > state->seq) + { +#if 0 + printf( + "ready_callback6: mismatch for seq, got 0x%x, expected 0x%x\n", + seq, + state->seq); +#endif + return; + } + late= 1; + + snprintf(line, sizeof(line), DBQ(late) ":%d", + state->seq-seq); + add_str(state, line); + } else if (state->gotresp) + { + isDup= 1; + add_str(state, DBQ(dup) ":true"); + } + + if (!late && !isDup) + state->last_response_hop= state->hop; + + if (memcmp(&eip->ip6_src, + &state->loc_sin6.sin6_addr, + sizeof(eip->ip6_src)) != 0) + { + printf("ready_callback6: changed source %s\n", + inet_ntop(AF_INET6, &eip->ip6_src, + buf, sizeof(buf))); + } + if (memcmp(&eip->ip6_dst, + &state->sin6.sin6_addr, + sizeof(eip->ip6_dst)) != 0) + { + printf( + "ready_callback6: changed destination %s for %s\n", + inet_ntop(AF_INET6, &eip->ip6_dst, + buf, sizeof(buf)), + state->hostname); + } + if (memcmp(&dstaddr, + &state->loc_sin6.sin6_addr, + sizeof(dstaddr)) != 0) + { +#if 0 + printf("ready_callback6: weird destination %s\n", + inet_ntop(AF_INET6, &dstaddr, + buf, sizeof(buf))); +#endif + } + + if (eicmp && state->parismod && + ntohs(eicmp->icmp6_cksum) != + state->paris + 1) + { + fprintf(stderr, + "ready_callback6: got checksum 0x%x, expected 0x%x\n", + ntohs(eicmp->icmp6_cksum), + state->paris + 1); + } + + if (!late) + { + ms= (now.tv_sec-state->xmit_time.tv_sec)*1000 + + (now.tv_nsec-state->xmit_time.tv_nsec)/ + 1e6; + } + else if (v6info) + { + ms= (now.tv_sec-v6info->tv.tv_sec)*1000 + + (now.tv_nsec-v6info->tv.tv_nsec)/1e6; + } + + snprintf(line, sizeof(line), "%s" DBQ(from) ":" DBQ(%s), + (late || isDup) ? ", " : "", + inet_ntop(AF_INET6, &remote.sin6_addr, + buf, sizeof(buf))); + add_str(state, line); + snprintf(line, sizeof(line), + ", " DBQ(ttl)":%d, " DBQ(rtt) ":%.3f, " + DBQ(size) ":%d", + rcvdttl, ms, (int)(nrecv-ICMP6_HDR)); + add_str(state, line); + if (eip->ip6_hops != 1) + { + snprintf(line, sizeof(line), + ", " DBQ(ittl) ":%d", eip->ip6_hops); + add_str(state, line); + } + if (IP6_TOS(eip) != 0 || state->tos != 0) + { + snprintf(line, sizeof(line), + ", " DBQ(itos) ":%d", IP6_TOS(eip)); + add_str(state, line); + } + + if (hbhoptsize) + { + snprintf(line, sizeof(line), + ", " DBQ(hbhoptsize) ":%d", + hbhoptsize); + add_str(state, line); + } + if (dstoptsize) + { + snprintf(line, sizeof(line), + ", " DBQ(dstoptsize) ":%d", + dstoptsize); + add_str(state, line); + } + +#if 0 + printf("ready_callback6: from %s, ttl %d", + inet_ntop(AF_INET6, &remote.sin6_addr, buf, + sizeof(buf)), rcvdttl); + printf(" for %s hop %d\n", + inet_ntop(AF_INET6, &state->sin6.sin6_addr, buf, + sizeof(buf)), state->hop); +#endif + + if (icmp->icmp6_type == ICMP6_TIME_EXCEEDED) + { + if (!late && !isDup) + state->not_done= 1; + } + else if (icmp->icmp6_type == ICMP6_PACKET_TOO_BIG) + { + nextmtu= ntohl(icmp->icmp6_mtu); + snprintf(line, sizeof(line), + ", " DBQ(mtu) ":%d", nextmtu); + add_str(state, line); + siz= sizeof(*eip); + if (eudp) + siz += sizeof(*eudp); + else if (eicmp) + siz += sizeof(*eicmp); + else if (etcp) + siz += sizeof(*etcp); + if (nextmtu < 1200) + { + /* This is IPv6, no need to go + * below 1280. Use 1200 to deal with + * off by one error or weird tunnels. + */ + nextmtu= 1200; + } + if (!late && nextmtu >= siz) + { + nextmtu -= siz; + if (nextmtu < state->curpacksize) + state->curpacksize= nextmtu; + } + if (!late) + state->not_done= 1; + } + else if (icmp->icmp6_type == ICMP6_DST_UNREACH) + { + if (!late) + state->done= 1; + switch(icmp->icmp6_code) + { + case ICMP6_DST_UNREACH_NOROUTE: /* 0 */ + add_str(state, + ", " DBQ(err) ":" DBQ(N)); + break; + case ICMP6_DST_UNREACH_ADMIN: /* 1 */ + add_str(state, + ", " DBQ(err) ":" DBQ(A)); + break; + case ICMP6_DST_UNREACH_BEYONDSCOPE: /* 2 */ + add_str(state, + ", " DBQ(err) ":" DBQ(h)); + break; + case ICMP6_DST_UNREACH_ADDR: /* 3 */ + add_str(state, + ", " DBQ(err) ":" DBQ(H)); + break; + case ICMP6_DST_UNREACH_NOPORT: /* 4 */ + break; + default: + snprintf(line, sizeof(line), + ", " DBQ(err) ":%d", + icmp->icmp6_code); + add_str(state, line); + break; + } + } + } + else + { + fprintf(stderr, + "ready_callback6: not UDP or ICMP (ip6_nxt = %d)\n", + eip->ip6_nxt); + return; + } + + /* RFC-4884, Multi-Part ICMP messages */ + icmp_prefixlen= icmp->icmp6_data8[0] * 8; + if (icmp_prefixlen != 0) + { + + printf("icmp6_data8[0]: 0x%x for %s\n", icmp->icmp6_data8[0], state->hostname); + printf("icmp_prefixlen: 0x%x for %s\n", icmp_prefixlen, inet_ntop(AF_INET6, &state->sin6.sin6_addr, buf, sizeof(buf))); + offset= sizeof(*icmp) + icmp_prefixlen; + if (nrecv > offset) + { + do_icmp_multi(state, base->packet+offset, + nrecv-offset, 0 /*!pre_rfc4884*/); + } + else + { +#if 0 + printf( + "ready_callback6: too short %d (Multi-Part ICMP)\n", + (int)nrecv); +#endif + } + } + else if (nrecv > 128) + { + /* Try old style extensions */ + icmp_prefixlen= 128; + offset= sizeof(*icmp) + icmp_prefixlen; + if (nrecv > offset) + { + do_icmp_multi(state, base->packet+offset, + nrecv-offset, 1 /*pre_rfc4884*/); + } + else + { + printf( + "ready_callback6: too short %d (Multi-Part ICMP)\n", + (int)nrecv); + } + } + + state->open_result= 1; + + if (!late && !isDup) + { + if (state->duptimeout) + { + state->gotresp= 1; + interval.tv_sec= state->duptimeout/1000000; + interval.tv_usec= state->duptimeout % 1000000; + evtimer_add(&state->timer, &interval); + } + else + { + send_pkt(state); + } + } + } + else if (icmp->icmp6_type == ICMP6_ECHO_REPLY) + { + eip= NULL; + + /* Now check if there is also a header in the packet */ + if (nrecv < sizeof(*icmp) + sizeof(*v6info)) + { +#if 0 + printf("ready_callback6: too short %d (echo reply)\n", + (int)nrecv); +#endif + return; + } + + eudp= NULL; + eicmp= NULL; + + v6info= (struct v6info *)&icmp[1]; + + if (ntohl(v6info->pid) != base->my_pid) + { + /* From a different process */ + return; + } + + ind= ntohl(v6info->id); + + if (ind != state->index) + state= NULL; + if (state && state->sin6.sin6_family != AF_INET6) + state= NULL; + + if (state && !state->do_icmp) + { + state= NULL; + } + + if (!state) + { + /* Nothing here */ + return; + } + +#if 0 + printf("ready_callback6: from %s", + inet_ntop(AF_INET6, &remote.sin6_addr, + buf, sizeof(buf))); + printf(" for %s hop %d\n", + inet_ntop(AF_INET6, &state->sin6.sin6_addr, + buf, sizeof(buf)), state->hop); +#endif + + if (!state->busy) + { + printf( + "ready_callback6: index (%d) is not busy\n", + ind); + return; + } + + if (state->open_result) + add_str(state, " }, { "); + + late= 0; + isDup= 0; + seq= ntohl(v6info->seq); + if (seq != state->seq) + { + if (seq > state->seq) + { + printf( +"ready_callback6: mismatch for seq, got 0x%x, expected 0x%x\n", + ntohl(v6info->seq), + state->seq); + return; + } + late= 1; + + snprintf(line, sizeof(line), DBQ(late) ":%d", + state->seq-seq); + add_str(state, line); + } + else if (state->gotresp) + { + isDup= 1; + add_str(state, DBQ(dup) ":true"); + } + + if (!late && !isDup) + { + state->last_response_hop= state->hop; + state->done= 1; + } + + if (memcmp(&dstaddr, &state->loc_sin6.sin6_addr, + sizeof(dstaddr)) != 0) + { +#if 0 + printf("ready_callback6: weird destination %s\n", + inet_ntop(AF_INET6, &dstaddr, + buf, sizeof(buf))); +#endif + } + + if (!late) + { + ms= (now.tv_sec-state->xmit_time.tv_sec)*1000 + + (now.tv_nsec-state->xmit_time.tv_nsec)/1e6; + } + else + { + ms= (now.tv_sec-v6info->tv.tv_sec)*1000 + + (now.tv_nsec-v6info->tv.tv_nsec)/1e6; + } + + snprintf(line, sizeof(line), "%s" DBQ(from) ":" DBQ(%s), + (late || isDup) ? ", " : "", + inet_ntop(AF_INET6, &remote.sin6_addr, + buf, sizeof(buf))); + add_str(state, line); + snprintf(line, sizeof(line), + ", " DBQ(ttl) ":%d, " DBQ(rtt) ":%.3f, " DBQ(size) ":%d", + rcvdttl, ms, (int)(nrecv - ICMP6_HDR)); + add_str(state, line); + if (rcvdtclass != 0 || state->tos != 0) + { + snprintf(line, sizeof(line), ", " DBQ(itos) ":%d", + rcvdtclass); + add_str(state, line); + } + +#if 0 + printf("ready_callback6: from %s, ttl %d", + inet_ntop(AF_INET6, &remote.sin6_addr, buf, + sizeof(buf)), rcvdttl); + printf(" for %s hop %d\n", + inet_ntop(AF_INET6, &state->sin6.sin6_addr, buf, + sizeof(buf)), state->hop); +#endif + + state->open_result= 1; + + if (!late && !isDup) + { + if (state->duptimeout) + { + state->gotresp= 1; + interval.tv_sec= state->duptimeout/1000000; + interval.tv_usec= state->duptimeout % 1000000; + evtimer_add(&state->timer, &interval); + } + else + { + send_pkt(state); + } + } + } + else if (icmp->icmp6_type == ICMP6_ECHO_REQUEST /* 128 */ || + icmp->icmp6_type == MLD_LISTENER_QUERY /* 130 */ || + icmp->icmp6_type == MLD_LISTENER_REPORT /* 131 */ || + icmp->icmp6_type == ND_ROUTER_ADVERT /* 134 */ || + icmp->icmp6_type == ND_NEIGHBOR_SOLICIT /* 135 */ || + icmp->icmp6_type == ND_NEIGHBOR_ADVERT /* 136 */ || + icmp->icmp6_type == ND_REDIRECT /* 137 */) + { + if (state->response_in) + { + /* Try again for the next packet */ + ready_callback6(0, 0, state); + } + + /* No need to do anything */ + } + else + { + printf("ready_callback6: got type %d\n", icmp->icmp6_type); + return; + } +} + +static struct trtbase *traceroute_base_new(struct event_base + *event_base) +{ + struct trtbase *base; + + base= xzalloc(sizeof(*base)); + + base->event_base= event_base; + + base->tabsiz= 10; + base->table= xzalloc(base->tabsiz * sizeof(*base->table)); + + base->my_pid= getpid(); + + return base; +} + +static void noreply_callback(int __attribute((unused)) unused, + const short __attribute((unused)) event, void *s) +{ + struct trtstate *state; + + state= s; + + if (!state->gotresp) + { + if (state->open_result) + add_str(state, " }, { "); + add_str(state, DBQ(x) ":" DBQ(*)); + state->open_result= 1; + + if (state->resp_file_out) + { + /* Use a zero proto to signal a timeout */ + uint8_t proto= 0; + + write_response(state->resp_file_out, RESP_PROTO, + sizeof(proto), &proto); + } + } + + if (state->response_in) + { + if (state->sin6.sin6_family == AF_INET6) + ready_callback6(0, 0, state); + else + ready_callback4(0, 0, state); + } + else + { + send_pkt(state); + } +} + +static void *traceroute_init(int __attribute((unused)) argc, char *argv[], + void (*done)(void *state, int error)) +{ + uint16_t destport; + uint32_t opt; + int i, do_icmp, do_v6, dont_fragment, delay_name_res, do_tcp, do_udp; + int tos; + unsigned count, duptimeout, firsthop, gaplimit, maxhops, maxpacksize, + hbhoptsize, destoptsize, parismod, parisbase, timeout; + /* must be int-sized */ + size_t newsiz; + char *str_Atlas; + char *str_bundle; + const char *hostname; + char *out_filename; + const char *destportstr; + char *response_in, *response_out; + char *interface; + char *check; + char *validated_response_in= NULL; + char *validated_response_out= NULL; + char *validated_out_filename= NULL; + struct trtstate *state; + sa_family_t af; + len_and_sockaddr *lsa; + FILE *fh; + + if (!trt_base) + { + trt_base= traceroute_base_new(EventBase); + if (!trt_base) + crondlog(DIE9 "traceroute_base_new failed"); + } + + /* Parse arguments */ + count= 3; + firsthop= 1; + gaplimit= 5; + interface= NULL; + maxhops= 32; + maxpacksize= 40; + destportstr= "80"; + duptimeout= 10; + timeout= 1000; + parismod= 16; + parisbase= 0; + hbhoptsize= 0; + destoptsize= 0; + tos= 0; + str_Atlas= NULL; + str_bundle= NULL; + out_filename= NULL; + response_in= NULL; + response_out= NULL; + opt_complementary = "=1:4--6:i--u:a+:b+:c+:f+:g+:m+:t+:w+:z+:S+:H+:D+"; + + opt = getopt32(argv, TRACEROUTE_OPT_STRING, &parismod, &parisbase, + &count, + &firsthop, &gaplimit, &interface, &maxhops, &destportstr, + &tos, &timeout, &duptimeout, + &str_Atlas, &str_bundle, &out_filename, &maxpacksize, + &hbhoptsize, &destoptsize, &response_in, &response_out); + hostname = argv[optind]; + + if (opt == 0xffffffff) + { + crondlog(LVL8 "bad options"); + return NULL; + } + + do_icmp= !!(opt & OPT_I); + do_v6= !!(opt & OPT_6); + dont_fragment= !!(opt & OPT_F); + delay_name_res= !!(opt & OPT_r); + delay_name_res= 1; /* Always enabled, leave the old code in + * place for now. + */ + do_tcp= !!(opt & OPT_T); + do_udp= !(do_icmp || do_tcp); + if (maxpacksize > MAX_DATA_SIZE) + { + crondlog(LVL8 "max. packet size too big"); + return NULL; + } + + if (response_in) + { + validated_response_in= rebased_validated_filename(ATLAS_SPOOLDIR, + response_in, ATLAS_FUZZING_REL); + if (!validated_response_in) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + response_in); + goto err; + } + } + if (response_out) + { + validated_response_out= rebased_validated_filename(ATLAS_SPOOLDIR, + response_out, ATLAS_FUZZING_REL); + if (!validated_response_out) + { + crondlog(LVL8 "insecure fuzzing file '%s'", + response_out); + goto err; + } + } + + if (out_filename) + { + validated_out_filename= rebased_validated_filename(ATLAS_SPOOLDIR, + out_filename, SAFE_PREFIX_REL); + if (!validated_out_filename) + { + crondlog(LVL8 "insecure file '%s'", out_filename); + goto err; + } + fh= fopen(validated_out_filename, "a"); + if (!fh) + { + crondlog(LVL8 "traceroute: unable to append to '%s'", + validated_out_filename); + goto err; + } + fclose(fh); + } + + if (str_Atlas) + { + if (!validate_atlas_id(str_Atlas)) + { + crondlog(LVL8 "bad atlas ID '%s'", str_Atlas); + goto err; + } + } + if (str_bundle) + { + if (!validate_atlas_id(str_bundle)) + { + crondlog(LVL8 "bad bundle ID '%s'", str_bundle); + goto err; + } + } + + if (!delay_name_res) + { + /* Attempt to resolve 'name' */ + af= do_v6 ? AF_INET6 : AF_INET; + destport= strtoul(destportstr, &check, 0); + if (check[0] != '\0' || destport == 0) + goto err; + lsa= host_and_af2sockaddr(hostname, destport, af); + if (!lsa) + goto err; + + if (lsa->len > sizeof(state->sin6)) + { + free(lsa); + goto err; + } + + if (atlas_check_addr(&lsa->u.sa, lsa->len) == -1) + { + free(lsa); + goto err; + } + } + else + { + /* lint */ + lsa= NULL; + af= -1; + } + + state= xzalloc(sizeof(*state)); + state->parismod= parismod; + state->parisbase= parisbase; + state->trtcount= count; + state->firsthop= firsthop; + state->maxpacksize= maxpacksize; + state->maxhops= maxhops; + state->gaplimit= gaplimit; + state->interface= interface ? strdup(interface) : NULL; + state->destportstr= strdup(destportstr); + state->duptimeout= duptimeout*1000; + state->timeout= timeout*1000; + state->tos= tos; + state->atlas= str_Atlas ? strdup(str_Atlas) : NULL; + state->bundle_id= str_bundle ? strdup(str_bundle) : NULL; + state->hostname= strdup(hostname); + state->do_icmp= do_icmp; + state->do_tcp= do_tcp; + state->do_udp= do_udp; + state->do_v6= do_v6; + state->dont_fragment= dont_fragment; + state->delay_name_res= delay_name_res; + state->hbhoptsize= hbhoptsize; + state->destoptsize= destoptsize; + state->out_filename= validated_out_filename; + validated_out_filename= NULL; + state->response_in= validated_response_in; + validated_response_in= NULL; + state->response_out= validated_response_out; + validated_response_out= NULL; + state->base= trt_base; + state->paris= state->parisbase; + state->busy= 0; + state->result= NULL; + state->reslen= 0; + state->resmax= 0; + state->socket_icmp= -1; + state->socket_tcp= -1; + + if (response_in || response_out) + trt_base->my_pid= 42; + + for (i= 0; itabsiz; i++) + { + if (trt_base->table[i] == NULL) + break; + } + if (i >= trt_base->tabsiz) + { + newsiz= 2*trt_base->tabsiz; + trt_base->table= xrealloc(trt_base->table, + newsiz*sizeof(*trt_base->table)); + for (i= trt_base->tabsiz; itable[i]= NULL; + i= trt_base->tabsiz; + trt_base->tabsiz= newsiz; + } + state->index= i; + trt_base->table[i]= state; + trt_base->done= done; + + memset(&state->loc_sin6, '\0', sizeof(state->loc_sin6)); + state->loc_socklen= 0; + + if (!delay_name_res) + { + memcpy(&state->sin6, &lsa->u.sa, lsa->len); + state->socklen= lsa->len; + free(lsa); lsa= NULL; + if (af == AF_INET6) + { + char buf[INET6_ADDRSTRLEN]; + printf("traceroute_init: %s, len %d for %s\n", + inet_ntop(AF_INET6, &state->sin6.sin6_addr, + buf, sizeof(buf)), state->socklen, + state->hostname); + } + } + + evtimer_assign(&state->timer, state->base->event_base, + noreply_callback, state); + + return state; + +err: + if (validated_response_in) free(validated_response_in); + if (validated_response_out) free(validated_response_out); + if (validated_out_filename) free(validated_out_filename); + return NULL; +} + +static void traceroute_start2(void *state) +{ + struct trtstate *trtstate; + char line[80]; + + trtstate= state; + + if (!trtstate->busy) + { + printf("traceroute_start: not busy, can't continue\n"); + return; + } + trtstate->busy= 1; + + trtstate->min= ULONG_MAX; + trtstate->max= 0; + trtstate->sum= 0; + trtstate->sentpkts= 0; + trtstate->rcvdpkts= 0; + trtstate->duppkts= 0; + + trtstate->hop= trtstate->firsthop; + trtstate->sent= 0; + trtstate->seq= 0; + if (trtstate->parismod) + { + trtstate->paris= (trtstate->paris-trtstate->parisbase+1+ + trtstate->parismod) % trtstate->parismod + + trtstate->parisbase; + } + trtstate->last_response_hop= + (trtstate->firsthop > 1 ? trtstate->firsthop-1 : 0); + trtstate->done= 0; + trtstate->not_done= 0; + trtstate->lastditch= 0; + trtstate->curpacksize= trtstate->maxpacksize; + + if (trtstate->result) free(trtstate->result); + trtstate->resmax= 80; + trtstate->result= xmalloc(trtstate->resmax); + trtstate->reslen= 0; + trtstate->open_result= 0; + trtstate->starttime= atlas_time(); + + trtstate->socket_tcp= -1; + + snprintf(line, sizeof(line), "{ " DBQ(hop) ":%d", trtstate->hop); + add_str(trtstate, line); + + if (trtstate->do_icmp) + { + if (create_socket(trtstate, 0 /*do_tcp*/) == -1) + return; + } + else if (trtstate->do_udp) + { + if (create_socket(trtstate, 0 /*do_tcp*/) == -1) + return; + if (trtstate->do_v6) + { + trtstate->loc_sin6.sin6_port= htons(SRC_BASE_PORT + + trtstate->index); + } + else + { + ((struct sockaddr_in *)(&trtstate->loc_sin6))-> + sin_port= htons(SRC_BASE_PORT + + trtstate->index); + } + } + else if (trtstate->do_tcp) + { + if (create_socket(trtstate, 1 /*do_tcp*/) == -1) + return; + + if (trtstate->do_v6) + { + trtstate->loc_sin6.sin6_port= htons(SRC_BASE_PORT + + trtstate->index); + } + else + { + ((struct sockaddr_in *)(&trtstate->loc_sin6))-> + sin_port= htons(SRC_BASE_PORT + + trtstate->index); + } + } + + add_str(trtstate, ", " DBQ(result) ": [ "); + + send_pkt(trtstate); + + if (trtstate->response_in) + { + for (;;) + { + if (trtstate->sin6.sin6_family == AF_INET6) + ready_callback6(0, 0, state); + else + ready_callback4(0, 0, state); + } + } +} + +static int create_socket(struct trtstate *state, int do_tcp) +{ + int af, type, protocol; + int r, on, serrno; + char line[80]; + + af= (state->do_v6 ? AF_INET6 : AF_INET); + type= SOCK_RAW; + protocol= (af == AF_INET6 ? IPPROTO_ICMPV6 : IPPROTO_ICMP); + + if (!state->response_in) + state->socket_icmp= xsocket(af, type, protocol); + if (state->socket_icmp == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + ", " DBQ(error) ":" DBQ(socket failed: %s) " }", + strerror(serrno)); + add_str(state, line); + report(state); + return -1; + } + + if (state->interface) + { + if (bind_interface(state->socket_icmp, + af, state->interface) == -1) + { + crondlog(LVL7 "binding to interface '%s' failed with '%s'", state->interface, strerror(errno)); + + snprintf(line, sizeof(line), + ", " DBQ(error) ":" DBQ(bind_interface failed) " }"); + add_str(state, line); + report(state); + return -1; + } + } + + if (state->response_in) + r= 0; /* Don't try to connect when reading from a file */ + else + { + r= connect(state->socket_icmp, + (struct sockaddr *)&state->sin6, + state->socklen); + } +#if 0 + { errno= ENOSYS; r= -1; } +#endif + if (r == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + ", " DBQ(error) ":" DBQ(connect failed: %s) " }", + strerror(serrno)); + add_str(state, line); + report(state); + return -1; + } + if (state->response_in) + { + size_t len; + + len= sizeof(state->loc_sin6); + read_response(state->socket_icmp, RESP_SOCKNAME, + &len, &state->loc_sin6); + state->loc_socklen= len; + } + else + { + state->loc_socklen= sizeof(state->loc_sin6); + if (!state->response_in && getsockname(state->socket_icmp, + (struct sockaddr *)&state->loc_sin6, + &state->loc_socklen) == -1) + { + crondlog(DIE9 "getsockname failed"); + } + } + if (state->resp_file_out) + { + write_response(state->resp_file_out, RESP_SOCKNAME, + state->loc_socklen, &state->loc_sin6); + } + + if (!state->response_in) + { + close(state->socket_icmp); + state->socket_icmp= xsocket(af, type, + protocol); + } + if (state->socket_icmp == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + ", " DBQ(error) ":" DBQ(socket failed: %s) " }", + strerror(serrno)); + add_str(state, line); + report(state); + return -1; + } + + if (af == AF_INET6) + { + on = 1; + setsockopt(state->socket_icmp, IPPROTO_IPV6, + IPV6_RECVPKTINFO, &on, sizeof(on)); + + on = 1; + setsockopt(state->socket_icmp, IPPROTO_IPV6, + IPV6_RECVHOPLIMIT, &on, sizeof(on)); + + on = 1; + setsockopt(state->socket_icmp, IPPROTO_IPV6, + IPV6_RECVTCLASS, &on, sizeof(on)); + } + + if (state->interface) + { + if (bind_interface(state->socket_icmp, + af, state->interface) == -1) + { + snprintf(line, sizeof(line), + ", " DBQ(error) ":" DBQ(bind_interface failed) " }"); + add_str(state, line); + report(state); + return -1; + } + } + + if (set_tos(state, state->socket_icmp, af, 0 /*!inner*/) == -1) + return -1; + + event_assign(&state->event_icmp, state->base->event_base, + state->socket_icmp, + EV_READ | EV_PERSIST, + (af == AF_INET6 ? ready_callback6 : ready_callback4), + state); + if (!state->response_in) + event_add(&state->event_icmp, NULL); + + if (do_tcp) + { + if (state->response_in) + state->socket_tcp= open("/dev/null", O_RDWR); + else + state->socket_tcp= xsocket(af, SOCK_RAW, IPPROTO_TCP); + if (state->socket_tcp == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + ", " DBQ(error) ":" DBQ(socket failed: %s) " }", + strerror(serrno)); + add_str(state, line); + report(state); + return -1; + } + + if (af == AF_INET6) + { + on = 1; + setsockopt(state->socket_tcp, IPPROTO_IPV6, + IPV6_RECVHOPLIMIT, &on, sizeof(on)); + on = 1; + setsockopt(state->socket_tcp, IPPROTO_IPV6, + IPV6_RECVTCLASS, &on, sizeof(on)); + } + + if (state->interface) + { + if (bind_interface(state->socket_tcp, + af, state->interface) == -1) + { + snprintf(line, sizeof(line), + ", " DBQ(error) ":" DBQ(bind_interface failed) " }"); + add_str(state, line); + report(state); + return -1; + } + } + + if (state->response_in) + r= 0; /* No need to connect */ + else + { + r= connect(state->socket_tcp, + (struct sockaddr *)&state->sin6, + state->socklen); + } +#if 0 + { errno= ENOSYS; r= -1; } +#endif + if (r == -1) + { + serrno= errno; + + snprintf(line, sizeof(line), + ", " DBQ(error) ":" DBQ(connect failed: %s) " }", + strerror(serrno)); + add_str(state, line); + report(state); + return -1; + } + + event_assign(&state->event_tcp, state->base->event_base, + state->socket_tcp, + EV_READ | EV_PERSIST, + (af == AF_INET6 ? ready_tcp6 : ready_tcp4), + state); + if (!state->response_in) + event_add(&state->event_tcp, NULL); + } + + return 0; +} + +static void dns_cb(int result, struct evutil_addrinfo *res, void *ctx) +{ + int r; + size_t tmp_len; + struct trtstate *env; + double nsecs; + struct timespec now, elapsed; + char line[160]; + struct addrinfo tmp_res; + struct sockaddr_storage tmp_sockaddr; + + env= ctx; + + if (!env->dnsip) + { + crondlog(LVL7 + "dns_cb: in dns_cb but not doing dns at this time"); + if (res) + evutil_freeaddrinfo(res); + return; + } + + gettime_mono(&now); + elapsed.tv_sec= now.tv_sec - env->start_time.tv_sec; + if (now.tv_nsec < env->start_time.tv_sec) + { + elapsed.tv_sec--; + now.tv_nsec += 1000000000; + } + elapsed.tv_nsec= now.tv_nsec - env->start_time.tv_nsec; + nsecs= (elapsed.tv_sec * 1e9 + elapsed.tv_nsec); + env->ttr= nsecs/1e6; + + if (result != 0) + { + /* Hmm, great. Where do we put this init code */ + if (env->result) free(env->result); + env->resmax= 80; + env->result= xmalloc(env->resmax); + env->reslen= 0; + + env->starttime= time(NULL); + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" DBQ(name resolution failed: %s) " }", + evutil_gai_strerror(result)); + add_str(env, line); + report(env); + return; + } + + env->dnsip= 0; + env->no_src= 0; + + env->dns_res= res; + env->dns_curr= res; + + if (env->response_in) + { + env->socket_icmp= open(env->response_in, O_RDONLY); + if (env->socket_icmp == -1) + { + crondlog(DIE9 "unable to open '%s'", + env->response_in); + } + + tmp_len= sizeof(tmp_res); + read_response(env->socket_icmp, RESP_ADDRINFO, + &tmp_len, &tmp_res); + assert(tmp_len == sizeof(tmp_res)); + tmp_len= sizeof(tmp_sockaddr); + read_response(env->socket_icmp, RESP_ADDRINFO_SA, + &tmp_len, &tmp_sockaddr); + assert(tmp_len == tmp_res.ai_addrlen); + tmp_res.ai_addr= (struct sockaddr *)&tmp_sockaddr; + env->dns_curr= &tmp_res; + } + + + while (env->dns_curr) + { + if (env->response_out) + { + write_response(env->resp_file_out, RESP_ADDRINFO, + sizeof(*env->dns_curr), env->dns_curr); + write_response(env->resp_file_out, RESP_ADDRINFO_SA, + env->dns_curr->ai_addrlen, + env->dns_curr->ai_addr); + } + + env->socklen= env->dns_curr->ai_addrlen; + if (env->socklen > sizeof(env->sin6)) + break; /* Weird */ + memcpy(&env->sin6, env->dns_curr->ai_addr, + env->socklen); + + r= atlas_check_addr((struct sockaddr *)&env->sin6, + env->socklen); + if (r == -1) + { + if (env->result) free(env->result); + env->resmax= 80; + env->result= xmalloc(env->resmax); + env->reslen= 0; + + env->starttime= time(NULL); + snprintf(line, sizeof(line), + "{ " DBQ(error) ":" DBQ(address not allowed) " }"); + add_str(env, line); + env->no_src= 1; + report(env); + return; + } + + traceroute_start2(env); + + if (!env->response_in) + evutil_freeaddrinfo(env->dns_res); + + env->dns_res= NULL; + env->dns_curr= NULL; + + return; + } + + /* Something went wrong */ + if (!env->response_in) + evutil_freeaddrinfo(env->dns_res); + env->dns_res= NULL; + env->dns_curr= NULL; + snprintf(line, sizeof(line), +"%s{ " DBQ(error) ":" DBQ(name resolution failed: out of addresses) " } ] }", + env->sent ? " }, " : ""); + add_str(env, line); + report(env); +} + +static void traceroute_start(void *state) +{ + struct trtstate *trtstate; + struct evutil_addrinfo hints; + + trtstate= state; + + if (trtstate->busy) + { + printf("traceroute_start: busy, can't start\n"); + return; + } + trtstate->busy= 1; + trtstate->socket_icmp= -1; + + if (trtstate->response_out) + { + trtstate->resp_file_out= fopen(trtstate->response_out, "w"); + if (!trtstate->resp_file_out) + { + crondlog(DIE9 "unable to write to '%s'", + trtstate->response_out); + } + } + + + if (!trtstate->delay_name_res) + { + traceroute_start2(state); + return; + } + + gettime_mono(&trtstate->start_time); + trtstate->dnsip= 1; + if (trtstate->response_in) + { + dns_cb(0, 0, trtstate); + } + else + { + memset(&hints, '\0', sizeof(hints)); + hints.ai_socktype= SOCK_DGRAM; + hints.ai_family= trtstate->do_v6 ? AF_INET6 : AF_INET; + (void) evdns_getaddrinfo(DnsBase, trtstate->hostname, + trtstate->destportstr, &hints, dns_cb, trtstate); + } +} + +static int traceroute_delete(void *state) +{ + int ind; + struct trtstate *trtstate; + struct trtbase *base; + + trtstate= state; + + printf("traceroute_delete: state %p, index %d, busy %d\n", + state, trtstate->index, trtstate->busy); + + if (trtstate->busy) + return 0; + + base= trtstate->base; + ind= trtstate->index; + + if (base->table[ind] != trtstate) + crondlog(DIE9 "strange, state not in table"); + base->table[ind]= NULL; + + event_del(&trtstate->timer); + + free(trtstate->atlas); + trtstate->atlas= NULL; + free(trtstate->interface); + trtstate->interface= NULL; + free(trtstate->bundle_id); + trtstate->bundle_id= NULL; + free(trtstate->hostname); + trtstate->hostname= NULL; + free(trtstate->destportstr); + trtstate->destportstr= NULL; + free(trtstate->out_filename); + trtstate->out_filename= NULL; + + free(trtstate); + + return 1; +} + +struct testops traceroute_ops = { traceroute_init, traceroute_start, + traceroute_delete }; + diff --git a/probe-busybox/include/.gitignore b/probe-busybox/include/.gitignore new file mode 100644 index 00000000..75afff9c --- /dev/null +++ b/probe-busybox/include/.gitignore @@ -0,0 +1,11 @@ +/config + +/applets.h +/applet_tables.h +/autoconf.h +/bbconfigopts_bz2.h +/bbconfigopts.h +/NUM_APPLETS.h +/usage_compressed.h +/usage.h +/common_bufsiz.h* diff --git a/probe-busybox/include/applet_metadata.h b/probe-busybox/include/applet_metadata.h new file mode 100644 index 00000000..566ef351 --- /dev/null +++ b/probe-busybox/include/applet_metadata.h @@ -0,0 +1,30 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#ifndef APPLET_METADATA_H +#define APPLET_METADATA_H 1 + +/* Note: can be included by both host and target builds! */ + +/* order matters: used as index into "install_dir[]" in appletlib.c */ +typedef enum bb_install_loc_t { + BB_DIR_ROOT = 0, + BB_DIR_BIN, + BB_DIR_SBIN, +#if ENABLE_INSTALL_NO_USR + BB_DIR_USR_BIN = BB_DIR_BIN, + BB_DIR_USR_SBIN = BB_DIR_SBIN, +#else + BB_DIR_USR_BIN, + BB_DIR_USR_SBIN, +#endif +} bb_install_loc_t; + +typedef enum bb_suid_t { + BB_SUID_DROP = 0, + BB_SUID_MAYBE, + BB_SUID_REQUIRE +} bb_suid_t; + +#endif diff --git a/probe-busybox/include/applets.h.sh b/probe-busybox/include/applets.h.sh new file mode 100755 index 00000000..be117cf8 --- /dev/null +++ b/probe-busybox/include/applets.h.sh @@ -0,0 +1,23 @@ +#!/bin/sh +# +# This script allows to check whether every applet has a separate option +# enabling it. Run it after applets.h is generated. + +# CONFIG_applet names +grep ^IF_ applets.h | grep -v ^IF_FEATURE_ | sed 's/IF_\([A-Z0-9._-]*\)(.*/\1/' \ +| sort | uniq \ +>applets_APP1 + +# command line applet names +grep ^IF_ applets.h | sed -e's/ //g' -e's/.*(\([a-z[][^,]*\),.*/\1/' \ +| grep -v '^bash$' \ +| grep -v '^sh$' \ +| tr a-z A-Z \ +| sed 's/^SYSCTL$/BB_SYSCTL/' \ +| sed 's/^\[\[$/TEST1/' \ +| sed 's/^\[$/TEST2/' \ +| sort | uniq \ +>applets_APP2 + +diff -u applets_APP1 applets_APP2 >applets_APP.diff +#rm applets_APP1 applets_APP2 diff --git a/probe-busybox/include/applets.src.h b/probe-busybox/include/applets.src.h new file mode 100644 index 00000000..2ddf120a --- /dev/null +++ b/probe-busybox/include/applets.src.h @@ -0,0 +1,86 @@ +/* vi: set sw=4 ts=4: */ +/* + * applets.h - a listing of all busybox applets. + * + * If you write a new applet, you need to add an entry to this list to make + * busybox aware of it. + */ + +/* +name - applet name as it is typed on command line +help - applet name, converted to C (ether-wake: help = ether_wake) +main - corresponding _main to call (bzcat: main = bunzip2) +l - location to install link to: [/usr]/[s]bin +s - suid type: + BB_SUID_REQUIRE: will complain if busybox isn't suid + and is run by non-root (applet_main() will not be called at all) + BB_SUID_DROP: will drop suid prior to applet_main() + BB_SUID_MAYBE: neither of the above + (every instance of BB_SUID_REQUIRE and BB_SUID_MAYBE + needs to be justified in comment) + NB: please update FEATURE_SUID help text whenever you add/remove + BB_SUID_REQUIRE or BB_SUID_MAYBE applet. +*/ + +#if defined(PROTOTYPES) +# define APPLET(name,l,s) int name##_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +# define APPLET_ODDNAME(name,main,l,s,help) int main##_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +# define APPLET_NOEXEC(name,main,l,s,help) int main##_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +# define APPLET_NOFORK(name,main,l,s,help) int main##_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; + +#elif defined(NAME_MAIN) +# define APPLET(name,l,s) name name##_main +# define APPLET_ODDNAME(name,main,l,s,help) name main##_main +# define APPLET_NOEXEC(name,main,l,s,help) name main##_main +# define APPLET_NOFORK(name,main,l,s,help) name main##_main + +#elif defined(MAKE_USAGE) && ENABLE_FEATURE_VERBOSE_USAGE +# define APPLET(name,l,s) MAKE_USAGE(#name, name##_trivial_usage name##_full_usage) +# define APPLET_ODDNAME(name,main,l,s,help) MAKE_USAGE(#name, help##_trivial_usage help##_full_usage) +# define APPLET_NOEXEC(name,main,l,s,help) MAKE_USAGE(#name, help##_trivial_usage help##_full_usage) +# define APPLET_NOFORK(name,main,l,s,help) MAKE_USAGE(#name, help##_trivial_usage help##_full_usage) + +#elif defined(MAKE_USAGE) && !ENABLE_FEATURE_VERBOSE_USAGE +# define APPLET(name,l,s) MAKE_USAGE(#name, name##_trivial_usage) +# define APPLET_ODDNAME(name,main,l,s,help) MAKE_USAGE(#name, help##_trivial_usage) +# define APPLET_NOEXEC(name,main,l,s,help) MAKE_USAGE(#name, help##_trivial_usage) +# define APPLET_NOFORK(name,main,l,s,help) MAKE_USAGE(#name, help##_trivial_usage) + +#elif defined(MAKE_LINKS) +# define APPLET(name,l,c) LINK l name +# define APPLET_ODDNAME(name,main,l,s,help) LINK l name +# define APPLET_NOEXEC(name,main,l,s,help) LINK l name +# define APPLET_NOFORK(name,main,l,s,help) LINK l name + +#elif defined(MAKE_SUID) +# define APPLET(name,l,s) SUID s l name +# define APPLET_ODDNAME(name,main,l,s,help) SUID s l name +# define APPLET_NOEXEC(name,main,l,s,help) SUID s l name +# define APPLET_NOFORK(name,main,l,s,help) SUID s l name + +#else + static struct bb_applet applets[] = { /* name, main, location, need_suid */ +# define APPLET(name,l,s) { #name, #name, l, s }, +# define APPLET_ODDNAME(name,main,l,s,help) { #name, #main, l, s }, +# define APPLET_NOEXEC(name,main,l,s,help) { #name, #main, l, s, 1 }, +# define APPLET_NOFORK(name,main,l,s,help) { #name, #main, l, s, 1, 1 }, +#endif + +#if ENABLE_INSTALL_NO_USR +# define BB_DIR_USR_BIN BB_DIR_BIN +# define BB_DIR_USR_SBIN BB_DIR_SBIN +#endif + + +INSERT + + +#if !defined(PROTOTYPES) && !defined(NAME_MAIN) && !defined(MAKE_USAGE) \ + && !defined(MAKE_LINKS) && !defined(MAKE_SUID) +}; +#endif + +#undef APPLET +#undef APPLET_ODDNAME +#undef APPLET_NOEXEC +#undef APPLET_NOFORK diff --git a/probe-busybox/include/ar.h b/probe-busybox/include/ar.h new file mode 100644 index 00000000..386fe045 --- /dev/null +++ b/probe-busybox/include/ar.h @@ -0,0 +1,26 @@ +/* + * busybox ar archive data structures + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#ifndef AR_H +#define AR_H + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +struct ar_header { + char name[16]; + char date[12]; + char uid[6]; + char gid[6]; + char mode[8]; + char size[10]; + char magic[2]; +}; + +#define AR_HEADER_LEN sizeof(struct ar_header) +#define AR_MAGIC "!" +#define AR_MAGIC_LEN 7 + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/include/atlas_path.h.in b/probe-busybox/include/atlas_path.h.in new file mode 100644 index 00000000..dd1bc488 --- /dev/null +++ b/probe-busybox/include/atlas_path.h.in @@ -0,0 +1,23 @@ +/* vi: set sw=4 ts=4: */ +/* + * Atlas directory paths. + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#ifndef _ATLAS_PATH_H_ +#define _ATLAS_PATH_H_ + +#define ATLAS_LIBEXECDIR "@atlas_libexecdir@" +#define ATLAS_DATADIR "@atlas_datadir@" +#define ATLAS_SYSCONFDIR "@atlas_sysconfdir@" +#define ATLAS_SPOOLDIR "@atlas_spooldir@" +#define ATLAS_RUNDIR "@atlas_rundir@" +#define ATLAS_MEASUREMENT "@atlas_measurementdir@" +#define ATLAS_SCRIPTS "@atlas_scriptsdir@" +#define ATLAS_TMP "@tmpdir@" + +#define ATLAS_CRONS "@atlas_spooldir@/crons" +#define ATLAS_DATA "@atlas_spooldir@/data" +#define ATLAS_PIDS "@atlas_rundir@/pids" +#define ATLAS_STATUS "@atlas_rundir@/status" + +#endif /* _ATLAS_PATH_H_ */ diff --git a/probe-busybox/include/bb_archive.h b/probe-busybox/include/bb_archive.h new file mode 100644 index 00000000..d7ac1999 --- /dev/null +++ b/probe-busybox/include/bb_archive.h @@ -0,0 +1,296 @@ +/* vi: set sw=4 ts=4: */ +#ifndef UNARCHIVE_H +#define UNARCHIVE_H 1 + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +enum { +#if BB_BIG_ENDIAN + COMPRESS_MAGIC = 0x1f9d, + GZIP_MAGIC = 0x1f8b, + BZIP2_MAGIC = 256 * 'B' + 'Z', + /* .xz signature: 0xfd, '7', 'z', 'X', 'Z', 0x00 */ + /* More info at: http://tukaani.org/xz/xz-file-format.txt */ + XZ_MAGIC1 = 256 * 0xfd + '7', + XZ_MAGIC2 = 256 * (unsigned)(256 * (256 * 'z' + 'X') + 'Z') + 0, + /* Different form: 32 bits, then 16 bits: */ + /* (unsigned) cast suppresses "integer overflow in expression" warning */ + XZ_MAGIC1a = 256 * (unsigned)(256 * (256 * 0xfd + '7') + 'z') + 'X', + XZ_MAGIC2a = 256 * 'Z' + 0, +#else + COMPRESS_MAGIC = 0x9d1f, + GZIP_MAGIC = 0x8b1f, + BZIP2_MAGIC = 'B' + 'Z' * 256, + XZ_MAGIC1 = 0xfd + '7' * 256, + XZ_MAGIC2 = 'z' + ('X' + ('Z' + 0 * 256) * 256) * 256, + XZ_MAGIC1a = 0xfd + ('7' + ('z' + 'X' * 256) * 256) * 256, + XZ_MAGIC2a = 'Z' + 0 * 256, +#endif +}; + +#ifndef ENABLE_FEATURE_TAR_UNAME_GNAME +#define ENABLE_FEATURE_TAR_UNAME_GNAME 0 +#endif +typedef struct file_header_t { + char *name; + char *link_target; +#if ENABLE_FEATURE_TAR_UNAME_GNAME + char *tar__uname; + char *tar__gname; +#endif + off_t size; + uid_t uid; + gid_t gid; + mode_t mode; + time_t mtime; + dev_t device; +} file_header_t; + +struct hardlinks_t; + +#ifndef ENABLE_TAR +#define ENABLE_TAR 0 +#endif +#ifndef ENABLE_DPKG +#define ENABLE_DPKG 0 +#endif +#ifndef ENABLE_DPKG_DEB +#define ENABLE_DPKG_DEB 0 +#endif +#ifndef ENABLE_FEATURE_TAR_LONG_OPTIONS +#define ENABLE_FEATURE_TAR_LONG_OPTIONS 0 +#endif +#ifndef ENABLE_CPIO +#define ENABLE_CPIO 0 +#endif +#ifndef ENABLE_RPM2CPIO +#define ENABLE_RPM2CPIO 0 +#endif +#ifndef ENABLE_RPM +#define ENABLE_RPM 0 +#endif +#ifndef ENABLE_FEATURE_AR_CREATE +#define ENABLE_FEATURE_AR_CREATE 0 +#endif +typedef struct archive_handle_t { + /* Flags. 1st since it is most used member */ + unsigned ah_flags; + + /* The raw stream as read from disk or stdin */ + int src_fd; + + /* Define if the header and data component should be processed */ + char FAST_FUNC (*filter)(struct archive_handle_t *); + /* List of files that have been accepted */ + llist_t *accept; + /* List of files that have been rejected */ + llist_t *reject; + /* List of files that have successfully been worked on */ + llist_t *passed; + + /* Currently processed file's header */ + file_header_t *file_header; + + /* Process the header component, e.g. tar -t */ + void FAST_FUNC (*action_header)(const file_header_t *); + + /* Process the data component, e.g. extract to filesystem */ + void FAST_FUNC (*action_data)(struct archive_handle_t *); + + /* Function that skips data */ + void FAST_FUNC (*seek)(int fd, off_t amount); + + /* Count processed bytes */ + off_t offset; + + /* Archiver specific. Can make it a union if it ever gets big */ +#if ENABLE_FEATURE_TAR_LONG_OPTIONS + unsigned tar__strip_components; +#endif +#define PAX_NEXT_FILE 0 +#define PAX_GLOBAL 1 +#if ENABLE_TAR || ENABLE_DPKG || ENABLE_DPKG_DEB + smallint tar__end; +# if ENABLE_FEATURE_TAR_GNU_EXTENSIONS + char* tar__longname; + char* tar__linkname; +# endif +# if ENABLE_FEATURE_TAR_TO_COMMAND + char* tar__to_command; + const char* tar__to_command_shell; +# endif +# if ENABLE_FEATURE_TAR_SELINUX + char* tar__sctx[2]; +# endif +#endif +#if ENABLE_CPIO || ENABLE_RPM2CPIO || ENABLE_RPM + uoff_t cpio__blocks; + struct bb_uidgid_t cpio__owner; + struct hardlinks_t *cpio__hardlinks_to_create; + struct hardlinks_t *cpio__created_hardlinks; +#endif +#if ENABLE_DPKG || ENABLE_DPKG_DEB + /* Temporary storage */ + char *dpkg__buffer; + /* How to process any sub archive, e.g. get_header_tar_gz */ + char FAST_FUNC (*dpkg__action_data_subarchive)(struct archive_handle_t *); + /* Contains the handle to a sub archive */ + struct archive_handle_t *dpkg__sub_archive; +#endif +#if ENABLE_FEATURE_AR_CREATE + const char *ar__name; + struct archive_handle_t *ar__out; +#endif +} archive_handle_t; +/* bits in ah_flags */ +#define ARCHIVE_RESTORE_DATE (1 << 0) +#define ARCHIVE_CREATE_LEADING_DIRS (1 << 1) +#define ARCHIVE_UNLINK_OLD (1 << 2) +#define ARCHIVE_EXTRACT_QUIET (1 << 3) +#define ARCHIVE_EXTRACT_NEWER (1 << 4) +#define ARCHIVE_DONT_RESTORE_OWNER (1 << 5) +#define ARCHIVE_DONT_RESTORE_PERM (1 << 6) +#define ARCHIVE_NUMERIC_OWNER (1 << 7) +#define ARCHIVE_O_TRUNC (1 << 8) +#define ARCHIVE_REMEMBER_NAMES (1 << 9) +#if ENABLE_RPM +#define ARCHIVE_REPLACE_VIA_RENAME (1 << 10) +#endif + + +/* POSIX tar Header Block, from POSIX 1003.1-1990 */ +#define TAR_BLOCK_SIZE 512 +#define NAME_SIZE 100 +#define NAME_SIZE_STR "100" +typedef struct tar_header_t { /* byte offset */ + char name[NAME_SIZE]; /* 0-99 */ + char mode[8]; /* 100-107 */ + char uid[8]; /* 108-115 */ + char gid[8]; /* 116-123 */ + char size[12]; /* 124-135 */ + char mtime[12]; /* 136-147 */ + char chksum[8]; /* 148-155 */ + char typeflag; /* 156-156 */ + char linkname[NAME_SIZE]; /* 157-256 */ + /* POSIX: "ustar" NUL "00" */ + /* GNU tar: "ustar " NUL */ + /* Normally it's defined as magic[6] followed by + * version[2], but we put them together to save code. + */ + char magic[8]; /* 257-264 */ + char uname[32]; /* 265-296 */ + char gname[32]; /* 297-328 */ + char devmajor[8]; /* 329-336 */ + char devminor[8]; /* 337-344 */ + char prefix[155]; /* 345-499 */ + char padding[12]; /* 500-512 (pad to exactly TAR_BLOCK_SIZE) */ +} tar_header_t; +struct BUG_tar_header { + char c[sizeof(tar_header_t) == TAR_BLOCK_SIZE ? 1 : -1]; +}; + + +extern const char cpio_TRAILER[]; + + +archive_handle_t *init_handle(void) FAST_FUNC; + +char filter_accept_all(archive_handle_t *archive_handle) FAST_FUNC; +char filter_accept_list(archive_handle_t *archive_handle) FAST_FUNC; +char filter_accept_list_reassign(archive_handle_t *archive_handle) FAST_FUNC; +char filter_accept_reject_list(archive_handle_t *archive_handle) FAST_FUNC; + +void unpack_ar_archive(archive_handle_t *ar_archive) FAST_FUNC; + +void data_skip(archive_handle_t *archive_handle) FAST_FUNC; +void data_extract_all(archive_handle_t *archive_handle) FAST_FUNC; +void data_extract_to_stdout(archive_handle_t *archive_handle) FAST_FUNC; +void data_extract_to_command(archive_handle_t *archive_handle) FAST_FUNC; + +void header_skip(const file_header_t *file_header) FAST_FUNC; +void header_list(const file_header_t *file_header) FAST_FUNC; +void header_verbose_list(const file_header_t *file_header) FAST_FUNC; + +char get_header_ar(archive_handle_t *archive_handle) FAST_FUNC; +char get_header_cpio(archive_handle_t *archive_handle) FAST_FUNC; +char get_header_tar(archive_handle_t *archive_handle) FAST_FUNC; +char get_header_tar_gz(archive_handle_t *archive_handle) FAST_FUNC; +char get_header_tar_bz2(archive_handle_t *archive_handle) FAST_FUNC; +char get_header_tar_lzma(archive_handle_t *archive_handle) FAST_FUNC; +char get_header_tar_xz(archive_handle_t *archive_handle) FAST_FUNC; + +void seek_by_jump(int fd, off_t amount) FAST_FUNC; +void seek_by_read(int fd, off_t amount) FAST_FUNC; + +const char *strip_unsafe_prefix(const char *str) FAST_FUNC; + +void data_align(archive_handle_t *archive_handle, unsigned boundary) FAST_FUNC; +const llist_t *find_list_entry(const llist_t *list, const char *filename) FAST_FUNC; +const llist_t *find_list_entry2(const llist_t *list, const char *filename) FAST_FUNC; + +/* A bit of bunzip2 internals are exposed for compressed help support: */ +typedef struct bunzip_data bunzip_data; +int start_bunzip(bunzip_data **bdp, int in_fd, const void *inbuf, int len) FAST_FUNC; +/* NB: read_bunzip returns < 0 on error, or the number of *unfilled* bytes + * in outbuf. IOW: on EOF returns len ("all bytes are not filled"), not 0: */ +int read_bunzip(bunzip_data *bd, char *outbuf, int len) FAST_FUNC; +void dealloc_bunzip(bunzip_data *bd) FAST_FUNC; + +/* Meaning and direction (input/output) of the fields are transformer-specific */ +typedef struct transformer_state_t { + smallint signature_skipped; /* most often referenced member */ + + IF_DESKTOP(long long) int FAST_FUNC (*xformer)(struct transformer_state_t *xstate); + USE_FOR_NOMMU(const char *xformer_prog;) + + /* Source */ + int src_fd; + /* Output */ + int dst_fd; + size_t mem_output_size_max; /* if non-zero, decompress to RAM instead of fd */ + size_t mem_output_size; + char *mem_output_buf; + + off_t bytes_out; + off_t bytes_in; /* used in unzip code only: needs to know packed size */ + uint32_t crc32; + time_t mtime; /* gunzip code may set this on exit */ +} transformer_state_t; + +void init_transformer_state(transformer_state_t *xstate) FAST_FUNC; +ssize_t transformer_write(transformer_state_t *xstate, const void *buf, size_t bufsize) FAST_FUNC; +ssize_t xtransformer_write(transformer_state_t *xstate, const void *buf, size_t bufsize) FAST_FUNC; +int check_signature16(transformer_state_t *xstate, unsigned magic16) FAST_FUNC; + +IF_DESKTOP(long long) int inflate_unzip(transformer_state_t *xstate) FAST_FUNC; +IF_DESKTOP(long long) int unpack_Z_stream(transformer_state_t *xstate) FAST_FUNC; +IF_DESKTOP(long long) int unpack_gz_stream(transformer_state_t *xstate) FAST_FUNC; +IF_DESKTOP(long long) int unpack_bz2_stream(transformer_state_t *xstate) FAST_FUNC; +IF_DESKTOP(long long) int unpack_lzma_stream(transformer_state_t *xstate) FAST_FUNC; +IF_DESKTOP(long long) int unpack_xz_stream(transformer_state_t *xstate) FAST_FUNC; + +char* append_ext(char *filename, const char *expected_ext) FAST_FUNC; +int bbunpack(char **argv, + IF_DESKTOP(long long) int FAST_FUNC (*unpacker)(transformer_state_t *xstate), + char* FAST_FUNC (*make_new_name)(char *filename, const char *expected_ext), + const char *expected_ext +) FAST_FUNC; + +void check_errors_in_children(int signo); +#if BB_MMU +void fork_transformer(int fd, + int signature_skipped, + IF_DESKTOP(long long) int FAST_FUNC (*transformer)(transformer_state_t *xstate) +) FAST_FUNC; +#define fork_transformer_with_sig(fd, transformer, transform_prog) fork_transformer((fd), 0, (transformer)) +#define fork_transformer_with_no_sig(fd, transformer) fork_transformer((fd), 1, (transformer)) +#else +void fork_transformer(int fd, const char *transform_prog) FAST_FUNC; +#define fork_transformer_with_sig(fd, transformer, transform_prog) fork_transformer((fd), (transform_prog)) +/* fork_transformer_with_no_sig() does not exist on NOMMU */ +#endif + + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/include/bb_e2fs_defs.h b/probe-busybox/include/bb_e2fs_defs.h new file mode 100644 index 00000000..3f5e3c45 --- /dev/null +++ b/probe-busybox/include/bb_e2fs_defs.h @@ -0,0 +1,602 @@ +/* vi: set sw=4 ts=4: */ +/* + * linux/include/linux/ext2_fs.h + * + * Copyright (C) 1992, 1993, 1994, 1995 + * Remy Card (card@masi.ibp.fr) + * Laboratoire MASI - Institut Blaise Pascal + * Universite Pierre et Marie Curie (Paris VI) + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#ifndef LINUX_EXT2_FS_H +#define LINUX_EXT2_FS_H 1 + +/* + * Special inode numbers + */ +#define EXT2_BAD_INO 1 /* Bad blocks inode */ +#define EXT2_ROOT_INO 2 /* Root inode */ +#define EXT2_ACL_IDX_INO 3 /* ACL inode */ +#define EXT2_ACL_DATA_INO 4 /* ACL inode */ +#define EXT2_BOOT_LOADER_INO 5 /* Boot loader inode */ +#define EXT2_UNDEL_DIR_INO 6 /* Undelete directory inode */ +#define EXT2_RESIZE_INO 7 /* Reserved group descriptors inode */ +#define EXT2_JOURNAL_INO 8 /* Journal inode */ + +/* First non-reserved inode for old ext2 filesystems */ +#define EXT2_GOOD_OLD_FIRST_INO 11 + +/* + * The second extended file system magic number + */ +#define EXT2_SUPER_MAGIC 0xEF53 + +/* Assume that user mode programs are passing in an ext2fs superblock, not + * a kernel struct super_block. This will allow us to call the feature-test + * macros from user land. */ +#define EXT2_SB(sb) (sb) + +/* + * Maximal count of links to a file + */ +#define EXT2_LINK_MAX 32000 + +/* + * Macro-instructions used to manage several block sizes + */ +#define EXT2_MIN_BLOCK_LOG_SIZE 10 /* 1024 */ +#define EXT2_MAX_BLOCK_LOG_SIZE 16 /* 65536 */ +#define EXT2_MIN_BLOCK_SIZE (1 << EXT2_MIN_BLOCK_LOG_SIZE) +#define EXT2_MAX_BLOCK_SIZE (1 << EXT2_MAX_BLOCK_LOG_SIZE) +#define EXT2_BLOCK_SIZE(s) (EXT2_MIN_BLOCK_SIZE << (s)->s_log_block_size) +#define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10) +#define EXT2_INODE_SIZE(s) (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \ + EXT2_GOOD_OLD_INODE_SIZE : (s)->s_inode_size) +#define EXT2_FIRST_INO(s) (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \ + EXT2_GOOD_OLD_FIRST_INO : (s)->s_first_ino) +#define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof(uint32_t)) + +/* + * Macro-instructions used to manage fragments + */ +#define EXT2_MIN_FRAG_SIZE EXT2_MIN_BLOCK_SIZE +#define EXT2_MAX_FRAG_SIZE EXT2_MAX_BLOCK_SIZE +#define EXT2_MIN_FRAG_LOG_SIZE EXT2_MIN_BLOCK_LOG_SIZE +#define EXT2_FRAG_SIZE(s) (EXT2_MIN_FRAG_SIZE << (s)->s_log_frag_size) +#define EXT2_FRAGS_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / EXT2_FRAG_SIZE(s)) + +/* + * ACL structures + */ +struct ext2_acl_header { /* Header of Access Control Lists */ + uint32_t aclh_size; + uint32_t aclh_file_count; + uint32_t aclh_acle_count; + uint32_t aclh_first_acle; +}; + +struct ext2_acl_entry { /* Access Control List Entry */ + uint32_t acle_size; + uint16_t acle_perms; /* Access permissions */ + uint16_t acle_type; /* Type of entry */ + uint16_t acle_tag; /* User or group identity */ + uint16_t acle_pad1; + uint32_t acle_next; /* Pointer on next entry for the */ + /* same inode or on next free entry */ +}; + +/* + * Structure of a blocks group descriptor + */ +struct ext2_group_desc { + uint32_t bg_block_bitmap; /* Blocks bitmap block */ + uint32_t bg_inode_bitmap; /* Inodes bitmap block */ + uint32_t bg_inode_table; /* Inodes table block */ + uint16_t bg_free_blocks_count; /* Free blocks count */ + uint16_t bg_free_inodes_count; /* Free inodes count */ + uint16_t bg_used_dirs_count; /* Directories count */ + uint16_t bg_pad; + uint32_t bg_reserved[3]; +}; + +/* + * Data structures used by the directory indexing feature + * + * Note: all of the multibyte integer fields are little endian. + */ + +/* + * Note: dx_root_info is laid out so that if it should somehow get + * overlaid by a dirent the two low bits of the hash version will be + * zero. Therefore, the hash version mod 4 should never be 0. + * Sincerely, the paranoia department. + */ +struct ext2_dx_root_info { + uint32_t reserved_zero; + uint8_t hash_version; /* 0 now, 1 at release */ + uint8_t info_length; /* 8 */ + uint8_t indirect_levels; + uint8_t unused_flags; +}; + +#define EXT2_HASH_LEGACY 0 +#define EXT2_HASH_HALF_MD4 1 +#define EXT2_HASH_TEA 2 + +#define EXT2_HASH_FLAG_INCOMPAT 0x1 + +struct ext2_dx_entry { + uint32_t hash; + uint32_t block; +}; + +struct ext2_dx_countlimit { + uint16_t limit; + uint16_t count; +}; + + +/* + * Macro-instructions used to manage group descriptors + */ +#define EXT2_BLOCKS_PER_GROUP(s) (EXT2_SB(s)->s_blocks_per_group) +#define EXT2_INODES_PER_GROUP(s) (EXT2_SB(s)->s_inodes_per_group) +#define EXT2_INODES_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s)/EXT2_INODE_SIZE(s)) +/* limits imposed by 16-bit value gd_free_{blocks,inode}_count */ +#define EXT2_MAX_BLOCKS_PER_GROUP(s) ((1 << 16) - 8) +#define EXT2_MAX_INODES_PER_GROUP(s) ((1 << 16) - EXT2_INODES_PER_BLOCK(s)) +#define EXT2_DESC_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (struct ext2_group_desc)) + +/* + * Constants relative to the data blocks + */ +#define EXT2_NDIR_BLOCKS 12 +#define EXT2_IND_BLOCK EXT2_NDIR_BLOCKS +#define EXT2_DIND_BLOCK (EXT2_IND_BLOCK + 1) +#define EXT2_TIND_BLOCK (EXT2_DIND_BLOCK + 1) +#define EXT2_N_BLOCKS (EXT2_TIND_BLOCK + 1) + +/* + * Inode flags + */ +#define EXT2_SECRM_FL 0x00000001 /* Secure deletion */ +#define EXT2_UNRM_FL 0x00000002 /* Undelete */ +#define EXT2_COMPR_FL 0x00000004 /* Compress file */ +#define EXT2_SYNC_FL 0x00000008 /* Synchronous updates */ +#define EXT2_IMMUTABLE_FL 0x00000010 /* Immutable file */ +#define EXT2_APPEND_FL 0x00000020 /* writes to file may only append */ +#define EXT2_NODUMP_FL 0x00000040 /* do not dump file */ +#define EXT2_NOATIME_FL 0x00000080 /* do not update atime */ +/* Reserved for compression usage... */ +#define EXT2_DIRTY_FL 0x00000100 +#define EXT2_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ +#define EXT2_NOCOMPR_FL 0x00000400 /* Access raw compressed data */ +#define EXT2_ECOMPR_FL 0x00000800 /* Compression error */ +/* End compression flags --- maybe not all used */ +#define EXT2_BTREE_FL 0x00001000 /* btree format dir */ +#define EXT2_INDEX_FL 0x00001000 /* hash-indexed directory */ +#define EXT2_IMAGIC_FL 0x00002000 +#define EXT3_JOURNAL_DATA_FL 0x00004000 /* file data should be journaled */ +#define EXT2_NOTAIL_FL 0x00008000 /* file tail should not be merged */ +#define EXT2_DIRSYNC_FL 0x00010000 /* Synchronous directory modifications */ +#define EXT2_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ +#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */ +#define EXT2_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ + +#define EXT2_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ +#define EXT2_FL_USER_MODIFIABLE 0x000080FF /* User modifiable flags */ + +/* + * ioctl commands + */ +#define EXT2_IOC_GETFLAGS _IOR('f', 1, long) +#define EXT2_IOC_SETFLAGS _IOW('f', 2, long) +#define EXT2_IOC_GETVERSION _IOR('v', 1, long) +#define EXT2_IOC_SETVERSION _IOW('v', 2, long) + +/* + * Structure of an inode on the disk + */ +struct ext2_inode { + uint16_t i_mode; /* File mode */ + uint16_t i_uid; /* Low 16 bits of Owner Uid */ + uint32_t i_size; /* Size in bytes */ + uint32_t i_atime; /* Access time */ + uint32_t i_ctime; /* Creation time */ + uint32_t i_mtime; /* Modification time */ + uint32_t i_dtime; /* Deletion Time */ + uint16_t i_gid; /* Low 16 bits of Group Id */ + uint16_t i_links_count; /* Links count */ + uint32_t i_blocks; /* Blocks count */ + uint32_t i_flags; /* File flags */ + union { + struct { + uint32_t l_i_reserved1; + } linux1; + struct { + uint32_t h_i_translator; + } hurd1; + struct { + uint32_t m_i_reserved1; + } masix1; + } osd1; /* OS dependent 1 */ + uint32_t i_block[EXT2_N_BLOCKS];/* Pointers to blocks */ + uint32_t i_generation; /* File version (for NFS) */ + uint32_t i_file_acl; /* File ACL */ + uint32_t i_dir_acl; /* Directory ACL */ + uint32_t i_faddr; /* Fragment address */ + union { + struct { + uint8_t l_i_frag; /* Fragment number */ + uint8_t l_i_fsize; /* Fragment size */ + uint16_t i_pad1; + uint16_t l_i_uid_high; /* these 2 fields */ + uint16_t l_i_gid_high; /* were reserved2[0] */ + uint32_t l_i_reserved2; + } linux2; + struct { + uint8_t h_i_frag; /* Fragment number */ + uint8_t h_i_fsize; /* Fragment size */ + uint16_t h_i_mode_high; + uint16_t h_i_uid_high; + uint16_t h_i_gid_high; + uint32_t h_i_author; + } hurd2; + struct { + uint8_t m_i_frag; /* Fragment number */ + uint8_t m_i_fsize; /* Fragment size */ + uint16_t m_pad1; + uint32_t m_i_reserved2[2]; + } masix2; + } osd2; /* OS dependent 2 */ +}; + +/* + * Permanent part of an large inode on the disk + */ +struct ext2_inode_large { + uint16_t i_mode; /* File mode */ + uint16_t i_uid; /* Low 16 bits of Owner Uid */ + uint32_t i_size; /* Size in bytes */ + uint32_t i_atime; /* Access time */ + uint32_t i_ctime; /* Creation time */ + uint32_t i_mtime; /* Modification time */ + uint32_t i_dtime; /* Deletion Time */ + uint16_t i_gid; /* Low 16 bits of Group Id */ + uint16_t i_links_count; /* Links count */ + uint32_t i_blocks; /* Blocks count */ + uint32_t i_flags; /* File flags */ + union { + struct { + uint32_t l_i_reserved1; + } linux1; + struct { + uint32_t h_i_translator; + } hurd1; + struct { + uint32_t m_i_reserved1; + } masix1; + } osd1; /* OS dependent 1 */ + uint32_t i_block[EXT2_N_BLOCKS];/* Pointers to blocks */ + uint32_t i_generation; /* File version (for NFS) */ + uint32_t i_file_acl; /* File ACL */ + uint32_t i_dir_acl; /* Directory ACL */ + uint32_t i_faddr; /* Fragment address */ + union { + struct { + uint8_t l_i_frag; /* Fragment number */ + uint8_t l_i_fsize; /* Fragment size */ + uint16_t i_pad1; + uint16_t l_i_uid_high; /* these 2 fields */ + uint16_t l_i_gid_high; /* were reserved2[0] */ + uint32_t l_i_reserved2; + } linux2; + struct { + uint8_t h_i_frag; /* Fragment number */ + uint8_t h_i_fsize; /* Fragment size */ + uint16_t h_i_mode_high; + uint16_t h_i_uid_high; + uint16_t h_i_gid_high; + uint32_t h_i_author; + } hurd2; + struct { + uint8_t m_i_frag; /* Fragment number */ + uint8_t m_i_fsize; /* Fragment size */ + uint16_t m_pad1; + uint32_t m_i_reserved2[2]; + } masix2; + } osd2; /* OS dependent 2 */ + uint16_t i_extra_isize; + uint16_t i_pad1; +}; + +#define i_size_high i_dir_acl + +/* + * File system states + */ +#define EXT2_VALID_FS 0x0001 /* Unmounted cleanly */ +#define EXT2_ERROR_FS 0x0002 /* Errors detected */ + +/* + * Mount flags + */ +#define EXT2_MOUNT_CHECK 0x0001 /* Do mount-time checks */ +#define EXT2_MOUNT_GRPID 0x0004 /* Create files with directory's group */ +#define EXT2_MOUNT_DEBUG 0x0008 /* Some debugging messages */ +#define EXT2_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ +#define EXT2_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ +#define EXT2_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ +#define EXT2_MOUNT_MINIX_DF 0x0080 /* Mimics the Minix statfs */ +#define EXT2_MOUNT_NO_UID32 0x0200 /* Disable 32-bit UIDs */ + +#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt +#define set_opt(o, opt) o |= EXT2_MOUNT_##opt +#define test_opt(sb, opt) (EXT2_SB(sb)->s_mount_opt & \ + EXT2_MOUNT_##opt) +/* + * Maximal mount counts between two filesystem checks + */ +#define EXT2_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */ +#define EXT2_DFL_CHECKINTERVAL 0 /* Don't use interval check */ + +/* + * Behaviour when detecting errors + */ +#define EXT2_ERRORS_CONTINUE 1 /* Continue execution */ +#define EXT2_ERRORS_RO 2 /* Remount fs read-only */ +#define EXT2_ERRORS_PANIC 3 /* Panic */ +#define EXT2_ERRORS_DEFAULT EXT2_ERRORS_CONTINUE + +/* + * Structure of the super block + */ +struct ext2_super_block { + uint32_t s_inodes_count; /* Inodes count */ + uint32_t s_blocks_count; /* Blocks count */ + uint32_t s_r_blocks_count; /* Reserved blocks count */ + uint32_t s_free_blocks_count; /* Free blocks count */ + uint32_t s_free_inodes_count; /* Free inodes count */ + uint32_t s_first_data_block; /* First Data Block */ + uint32_t s_log_block_size; /* Block size */ + int32_t s_log_frag_size; /* Fragment size */ + uint32_t s_blocks_per_group; /* # Blocks per group */ + uint32_t s_frags_per_group; /* # Fragments per group */ + uint32_t s_inodes_per_group; /* # Inodes per group */ + uint32_t s_mtime; /* Mount time */ + uint32_t s_wtime; /* Write time */ + uint16_t s_mnt_count; /* Mount count */ + int16_t s_max_mnt_count; /* Maximal mount count */ + uint16_t s_magic; /* Magic signature */ + uint16_t s_state; /* File system state */ + uint16_t s_errors; /* Behaviour when detecting errors */ + uint16_t s_minor_rev_level; /* minor revision level */ + uint32_t s_lastcheck; /* time of last check */ + uint32_t s_checkinterval; /* max. time between checks */ + uint32_t s_creator_os; /* OS */ + uint32_t s_rev_level; /* Revision level */ + uint16_t s_def_resuid; /* Default uid for reserved blocks */ + uint16_t s_def_resgid; /* Default gid for reserved blocks */ + /* + * These fields are for EXT2_DYNAMIC_REV superblocks only. + * + * Note: the difference between the compatible feature set and + * the incompatible feature set is that if there is a bit set + * in the incompatible feature set that the kernel doesn't + * know about, it should refuse to mount the filesystem. + * + * e2fsck's requirements are more strict; if it doesn't know + * about a feature in either the compatible or incompatible + * feature set, it must abort and not try to meddle with + * things it doesn't understand... + */ + uint32_t s_first_ino; /* First non-reserved inode */ + uint16_t s_inode_size; /* size of inode structure */ + uint16_t s_block_group_nr; /* block group # of this superblock */ + uint32_t s_feature_compat; /* compatible feature set */ + uint32_t s_feature_incompat; /* incompatible feature set */ + uint32_t s_feature_ro_compat; /* readonly-compatible feature set */ + uint8_t s_uuid[16]; /* 128-bit uuid for volume */ + char s_volume_name[16]; /* volume name */ + char s_last_mounted[64]; /* directory where last mounted */ + uint32_t s_algorithm_usage_bitmap; /* For compression */ + /* + * Performance hints. Directory preallocation should only + * happen if the EXT2_FEATURE_COMPAT_DIR_PREALLOC flag is on. + */ + uint8_t s_prealloc_blocks; /* Nr of blocks to try to preallocate*/ + uint8_t s_prealloc_dir_blocks; /* Nr to preallocate for dirs */ + uint16_t s_reserved_gdt_blocks; /* Per group table for online growth */ + /* + * Journaling support valid if EXT2_FEATURE_COMPAT_HAS_JOURNAL set. + */ +/*D0*/ uint8_t s_journal_uuid[16]; /* uuid of journal superblock */ +/*E0*/ uint32_t s_journal_inum; /* inode number of journal file */ + uint32_t s_journal_dev; /* device number of journal file */ + uint32_t s_last_orphan; /* start of list of inodes to delete */ + uint32_t s_hash_seed[4]; /* HTREE hash seed */ + uint8_t s_def_hash_version; /* Default hash version to use */ + uint8_t s_jnl_backup_type; /* Default type of journal backup */ + uint16_t s_reserved_word_pad; +/*100*/ uint32_t s_default_mount_opts; + uint32_t s_first_meta_bg; /* First metablock group */ + /* ext3 additions */ + uint32_t s_mkfs_time; /* When the filesystem was created */ + uint32_t s_jnl_blocks[17]; /* Backup of the journal inode */ + /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */ +/*150*/ uint32_t s_blocks_count_hi; /* Blocks count */ + uint32_t s_r_blocks_count_hi; /* Reserved blocks count */ + uint32_t s_free_blocks_count_hi; /* Free blocks count */ + uint16_t s_min_extra_isize; /* All inodes have at least # bytes */ + uint16_t s_want_extra_isize; /* New inodes should reserve # bytes */ + uint32_t s_flags; /* Miscellaneous flags */ + uint16_t s_raid_stride; /* RAID stride */ + uint16_t s_mmp_interval; /* # seconds to wait in MMP checking */ + uint64_t s_mmp_block; /* Block for multi-mount protection */ + uint32_t s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ + uint8_t s_log_groups_per_flex; /* FLEX_BG group size */ + uint8_t s_reserved_char_pad2; + uint16_t s_reserved_pad; + uint32_t s_reserved[162]; /* Padding to the end of the block */ +}; +struct BUG_ext2_super_block { + char bug[sizeof(struct ext2_super_block) == 1024 ? 1 : -1]; +}; + +/* + * Codes for operating systems + */ +#define EXT2_OS_LINUX 0 +#define EXT2_OS_HURD 1 +#define EXT2_OS_MASIX 2 +#define EXT2_OS_FREEBSD 3 +#define EXT2_OS_LITES 4 + +/* + * Revision levels + */ +#define EXT2_GOOD_OLD_REV 0 /* The good old (original) format */ +#define EXT2_DYNAMIC_REV 1 /* V2 format w/ dynamic inode sizes */ + +#define EXT2_CURRENT_REV EXT2_GOOD_OLD_REV +#define EXT2_MAX_SUPP_REV EXT2_DYNAMIC_REV + +#define EXT2_GOOD_OLD_INODE_SIZE 128 + +/* + * Journal inode backup types + */ +#define EXT3_JNL_BACKUP_BLOCKS 1 + +/* + * Feature set definitions + */ + +#define EXT2_HAS_COMPAT_FEATURE(sb,mask) \ + ( EXT2_SB(sb)->s_feature_compat & (mask) ) +#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask) \ + ( EXT2_SB(sb)->s_feature_ro_compat & (mask) ) +#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask) \ + ( EXT2_SB(sb)->s_feature_incompat & (mask) ) + +/* for s_feature_compat */ +#define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001 +#define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002 +#define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004 +#define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008 +#define EXT2_FEATURE_COMPAT_RESIZE_INO 0x0010 +#define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020 + +/* for s_feature_ro_compat */ +#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 +#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 +#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 /* not used */ +#define EXT4_FEATURE_RO_COMPAT_HUGE_FILE 0x0008 +#define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 +#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 +#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 + +/* for s_feature_incompat */ +#define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001 +#define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002 +#define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 +#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 +#define EXT2_FEATURE_INCOMPAT_META_BG 0x0010 +#define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 +#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 +#define EXT4_FEATURE_INCOMPAT_MMP 0x0100 +#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 + + +#define EXT2_FEATURE_COMPAT_SUPP 0 +#define EXT2_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \ + EXT2_FEATURE_RO_COMPAT_BTREE_DIR) +#define EXT2_FEATURE_INCOMPAT_SUPP (EXT2_FEATURE_INCOMPAT_FILETYPE| \ + EXT2_FEATURE_INCOMPAT_META_BG) +#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED (~EXT2_FEATURE_INCOMPAT_SUPP) +#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED (~EXT2_FEATURE_RO_COMPAT_SUPP) + +#define EXT3_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \ + EXT2_FEATURE_RO_COMPAT_BTREE_DIR) +#define EXT3_FEATURE_INCOMPAT_SUPP (EXT2_FEATURE_INCOMPAT_FILETYPE| \ + EXT3_FEATURE_INCOMPAT_RECOVER| \ + EXT2_FEATURE_INCOMPAT_META_BG) +#define EXT3_FEATURE_INCOMPAT_UNSUPPORTED (~EXT3_FEATURE_INCOMPAT_SUPP) +#define EXT3_FEATURE_RO_COMPAT_UNSUPPORTED (~EXT3_FEATURE_RO_COMPAT_SUPP) + + +/* + * Default values for user and/or group using reserved blocks + */ +#define EXT2_DEF_RESUID 0 +#define EXT2_DEF_RESGID 0 + +/* + * Default mount options + */ +#define EXT2_DEFM_DEBUG 0x0001 +#define EXT2_DEFM_BSDGROUPS 0x0002 +#define EXT2_DEFM_XATTR_USER 0x0004 +#define EXT2_DEFM_ACL 0x0008 +#define EXT2_DEFM_UID16 0x0010 +#define EXT3_DEFM_JMODE 0x0060 +#define EXT3_DEFM_JMODE_DATA 0x0020 +#define EXT3_DEFM_JMODE_ORDERED 0x0040 +#define EXT3_DEFM_JMODE_WBACK 0x0060 + +/* + * Structure of a directory entry + */ +#define EXT2_NAME_LEN 255 + +struct ext2_dir_entry { + uint32_t inode; /* Inode number */ + uint16_t rec_len; /* Directory entry length */ + uint16_t name_len; /* Name length */ + char name[EXT2_NAME_LEN]; /* File name */ +}; + +/* + * The new version of the directory entry. Since EXT2 structures are + * stored in intel byte order, and the name_len field could never be + * bigger than 255 chars, it's safe to reclaim the extra byte for the + * file_type field. + */ +struct ext2_dir_entry_2 { + uint32_t inode; /* Inode number */ + uint16_t rec_len; /* Directory entry length */ + uint8_t name_len; /* Name length */ + uint8_t file_type; + char name[EXT2_NAME_LEN]; /* File name */ +}; + +/* + * Ext2 directory file types. Only the low 3 bits are used. The + * other bits are reserved for now. + */ +#define EXT2_FT_UNKNOWN 0 +#define EXT2_FT_REG_FILE 1 +#define EXT2_FT_DIR 2 +#define EXT2_FT_CHRDEV 3 +#define EXT2_FT_BLKDEV 4 +#define EXT2_FT_FIFO 5 +#define EXT2_FT_SOCK 6 +#define EXT2_FT_SYMLINK 7 + +#define EXT2_FT_MAX 8 + +/* + * EXT2_DIR_PAD defines the directory entries boundaries + * + * NOTE: It must be a multiple of 4 + */ +#define EXT2_DIR_PAD 4 +#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1) +#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \ + ~EXT2_DIR_ROUND) + +#endif diff --git a/probe-busybox/include/busybox.h b/probe-busybox/include/busybox.h new file mode 100644 index 00000000..abeef0de --- /dev/null +++ b/probe-busybox/include/busybox.h @@ -0,0 +1,64 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#ifndef BUSYBOX_H +#define BUSYBOX_H 1 + +#include "libbb.h" +/* BB_DIR_foo and BB_SUID_bar constants: */ +#include "applet_metadata.h" + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +/* Defined in appletlib.c (by including generated applet_tables.h) */ +/* Keep in sync with applets/applet_tables.c! */ +extern const char applet_names[] ALIGN1; +extern int (*const applet_main[])(int argc, char **argv); +extern const uint8_t applet_flags[] ALIGN1; +extern const uint8_t applet_suid[] ALIGN1; +extern const uint8_t applet_install_loc[] ALIGN1; + +#ifndef ENABLE_FEATURE_SH_STANDALONE +#define ENABLE_FEATURE_SH_STANDALONE 0 +#endif +#ifndef ENABLE_FEATURE_SH_NOFORK +#define ENABLE_FEATURE_SH_NOFORK 0 +#endif +#if ENABLE_FEATURE_PREFER_APPLETS \ + || ENABLE_FEATURE_SH_STANDALONE \ + || ENABLE_FEATURE_SH_NOFORK +# define APPLET_IS_NOFORK(i) (applet_flags[(i)/4] & (1 << (2 * ((i)%4)))) +# define APPLET_IS_NOEXEC(i) (applet_flags[(i)/4] & (1 << ((2 * ((i)%4))+1))) +#else +# define APPLET_IS_NOFORK(i) 0 +# define APPLET_IS_NOEXEC(i) 0 +#endif + +#if ENABLE_FEATURE_SUID +# define APPLET_SUID(i) ((applet_suid[(i)/4] >> (2 * ((i)%4)) & 3)) +#endif + +#if ENABLE_FEATURE_INSTALLER +#define APPLET_INSTALL_LOC(i) ({ \ + unsigned v = (i); \ + if (v & 1) v = applet_install_loc[v/2] >> 4; \ + else v = applet_install_loc[v/2] & 0xf; \ + v; }) +#endif + + +/* Length of these names has effect on size of libbusybox + * and "individual" binaries. Keep them short. + */ +#if ENABLE_BUILD_LIBBUSYBOX +#if ENABLE_FEATURE_SHARED_BUSYBOX +int lbb_main(char **argv) EXTERNALLY_VISIBLE; +#else +int lbb_main(char **argv); +#endif +#endif + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/include/cmdtable.h b/probe-busybox/include/cmdtable.h new file mode 100644 index 00000000..fa9014c5 --- /dev/null +++ b/probe-busybox/include/cmdtable.h @@ -0,0 +1,39 @@ +/* +cmdtable.h + +Commands for perd and ooqd +*/ + +int condmv_main(int argc, char *argv[]); +int httppost_main(int argc, char *argv[]); +#if 0 +int httpget_main(int argc, char *argv[]); +int nslookup_main(int argc, char *argv[]); +int ping6_main(int argc, char *argv[]); +int ping_main(int argc, char *argv[]); +int sslgetcert_main(int argc, char *argv[]); +int tdig_main(int argc, char *argv[]); +int traceroute_main(int argc, char *argv[]); +#endif +int wifimsm_main(int argc, char *argv[]); + +static struct builtin +{ + const char *cmd; + int (*func)(int argc, char *argv[]); +} builtin_cmds[]= +{ + { "condmv", condmv_main }, + { "httppost", httppost_main }, +#if 0 + { "ping6", ping6_main }, + { "ping", ping_main }, + { "sslgetcert", sslgetcert_main }, + { "traceroute", traceroute_main }, +#endif +#if ENABLE_OOQD + { "wifimsm", wifimsm_main }, +#endif + { NULL, 0 } +}; + diff --git a/probe-busybox/include/dump.h b/probe-busybox/include/dump.h new file mode 100644 index 00000000..4c237ef0 --- /dev/null +++ b/probe-busybox/include/dump.h @@ -0,0 +1,56 @@ +/* vi: set sw=4 ts=4: */ + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +#define F_IGNORE 0x01 /* %_A */ +#define F_SETREP 0x02 /* rep count set, not default */ +#define F_ADDRESS 0x001 /* print offset */ +#define F_BPAD 0x002 /* blank pad */ +#define F_C 0x004 /* %_c */ +#define F_CHAR 0x008 /* %c */ +#define F_DBL 0x010 /* %[EefGf] */ +#define F_INT 0x020 /* %[di] */ +#define F_P 0x040 /* %_p */ +#define F_STR 0x080 /* %s */ +#define F_U 0x100 /* %_u */ +#define F_UINT 0x200 /* %[ouXx] */ +#define F_TEXT 0x400 /* no conversions */ + +enum dump_vflag_t { ALL, DUP, FIRST, WAIT }; /* -v values */ + +typedef struct PR { + struct PR *nextpr; /* next print unit */ + unsigned flags; /* flag values */ + int bcnt; /* byte count */ + char *cchar; /* conversion character */ + char *fmt; /* printf format */ + char *nospace; /* no whitespace version */ +} PR; + +typedef struct FU { + struct FU *nextfu; /* next format unit */ + struct PR *nextpr; /* next print unit */ + unsigned flags; /* flag values */ + int reps; /* repetition count */ + int bcnt; /* byte count */ + char *fmt; /* format string */ +} FU; + +typedef struct FS { /* format strings */ + struct FS *nextfs; /* linked list of format strings */ + struct FU *nextfu; /* linked list of format units */ + int bcnt; +} FS; + +typedef struct dumper_t { + off_t dump_skip; /* bytes to skip */ + int dump_length; /* max bytes to read */ + smallint dump_vflag; /*enum dump_vflag_t*/ + FS *fshead; +} dumper_t; + +dumper_t* alloc_dumper(void) FAST_FUNC; +extern void bb_dump_add(dumper_t *dumper, const char *fmt) FAST_FUNC; +extern int bb_dump_dump(dumper_t *dumper, char **argv) FAST_FUNC; + +POP_SAVED_FUNCTION_VISIBILITY diff --git a/probe-busybox/include/fix_u32.h b/probe-busybox/include/fix_u32.h new file mode 100644 index 00000000..a2ba6d0a --- /dev/null +++ b/probe-busybox/include/fix_u32.h @@ -0,0 +1,55 @@ +/* vi: set sw=4 ts=4: */ +/* + * This header makes it easier to include kernel headers + * which use u32 and such. + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#ifndef FIX_U32_H +#define FIX_U32_H 1 + +/* Try hard to pull in u32 types and such. + * Otherwise, #include "fix_u32.h" + #include + * may end up typedef'ing bb_hack_u32 inside foo.h, + * and repeated typedefs aren't allowed in C/C++. + */ +#include +#include + +/* In case above includes still failed to provide the types, + * provide them ourself + */ +#undef __u64 +#undef u64 +#undef u32 +#undef u16 +#undef u8 +#undef __s64 +#undef s64 +#undef s32 +#undef s16 +#undef s8 + +#define __u64 bb_hack___u64 +#define u64 bb_hack_u64 +#define u32 bb_hack_u32 +#define u16 bb_hack_u16 +#define u8 bb_hack_u8 +#define __s64 bb_hack___s64 +#define s64 bb_hack_s64 +#define s32 bb_hack_s32 +#define s16 bb_hack_s16 +#define s8 bb_hack_s8 + +typedef uint64_t __u64; +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; +typedef int64_t __s64; +typedef int64_t s64; +typedef int32_t s32; +typedef int16_t s16; +typedef int8_t s8; + +#endif diff --git a/probe-busybox/include/grp_.h b/probe-busybox/include/grp_.h new file mode 100644 index 00000000..db13ce3b --- /dev/null +++ b/probe-busybox/include/grp_.h @@ -0,0 +1,66 @@ +/* vi: set sw=4 ts=4: */ +/* Copyright (C) 1991,92,95,96,97,98,99,2000,01 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. + */ +/* + * POSIX Standard: 9.2.1 Group Database Access + */ +#ifndef BB_GRP_H +#define BB_GRP_H 1 + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +/* This file is #included after #include + * We will use libc-defined structures, but will #define function names + * so that function calls are directed to bb_internal_XXX replacements + */ +#undef endgrent +#define endgrent bb_internal_endgrent +#define getgrgid bb_internal_getgrgid +#define getgrnam bb_internal_getgrnam +#define getgrouplist bb_internal_getgrouplist +#define initgroups bb_internal_initgroups + +/* All function names below should be remapped by #defines above + * in order to not collide with libc names. */ + +/* Close the group-file stream. */ +void FAST_FUNC endgrent(void); + +/* Search for an entry with a matching group ID. */ +struct group* FAST_FUNC getgrgid(gid_t __gid); + +/* Search for an entry with a matching group name. */ +struct group* FAST_FUNC getgrnam(const char *__name); + +/* Reentrant versions of some of the functions above. */ + +/* Store at most *NGROUPS members of the group set for USER into + *GROUPS. Also include GROUP. The actual number of groups found is + returned in *NGROUPS. Return -1 if the if *NGROUPS is too small. */ +int FAST_FUNC getgrouplist(const char *__user, gid_t __group, + gid_t *__groups, int *__ngroups); + +/* Initialize the group set for the current user + by reading the group database and using all groups + of which USER is a member. Also include GROUP. */ +int FAST_FUNC initgroups(const char *__user, gid_t __group); + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/include/inet_common.h b/probe-busybox/include/inet_common.h new file mode 100644 index 00000000..4638aa9e --- /dev/null +++ b/probe-busybox/include/inet_common.h @@ -0,0 +1,32 @@ +/* vi: set sw=4 ts=4: */ +/* + * stolen from net-tools-1.59 and stripped down for busybox by + * Erik Andersen + * + * Heavily modified by Manuel Novoa III Mar 12, 2001 + * + */ +#ifndef INET_COMMON_H +#define INET_COMMON_H 1 + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +/* hostfirst!=0 If we expect this to be a hostname, + try hostname database first + */ +int INET_resolve(const char *name, struct sockaddr_in *s_in, int hostfirst) FAST_FUNC; + +/* numeric: & 0x8000: "default" instead of "*", + * & 0x4000: host instead of net, + * & 0x0fff: don't resolve + */ + +int INET6_resolve(const char *name, struct sockaddr_in6 *sin6) FAST_FUNC; + +/* These return malloced string */ +char *INET_rresolve(struct sockaddr_in *s_in, int numeric, uint32_t netmask) FAST_FUNC; +char *INET6_rresolve(struct sockaddr_in6 *sin6, int numeric) FAST_FUNC; + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/include/libbb.h b/probe-busybox/include/libbb.h new file mode 100644 index 00000000..ce643ff9 --- /dev/null +++ b/probe-busybox/include/libbb.h @@ -0,0 +1,2314 @@ +/* vi: set sw=4 ts=4: */ +/* + * Busybox main internal header file + * + * Based in part on code from sash, Copyright (c) 1999 by David I. Bell + * Permission has been granted to redistribute this code under GPL. + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#ifndef LIBBB_H +#define LIBBB_H 1 + +#include "platform.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined __UCLIBC__ /* TODO: and glibc? */ +/* use inlined versions of these: */ +# define sigfillset(s) __sigfillset(s) +# define sigemptyset(s) __sigemptyset(s) +# define sigisemptyset(s) __sigisemptyset(s) +#endif +#include +#include +#include +#include +#include +#include +/* There are two incompatible basename's, let's not use them! */ +/* See the dirname/basename man page for details */ +#include /* dirname,basename */ +#undef basename +#define basename dont_use_basename +#include +#include +#include +#include +#include +#include +#include +#if !defined(major) || defined(__GLIBC__) +# include +#endif +#include +#include +#include +#include +#include +#include +#ifndef ENABLE_FEATURE_SHADOWPASSWDS +#define ENABLE_FEATURE_SHADOWPASSWDS 0 +#endif +#if ENABLE_FEATURE_SHADOWPASSWDS +# if !ENABLE_USE_BB_SHADOW +/* If using busybox's shadow implementation, do not include the shadow.h + * header as the toolchain may not provide it at all. + */ +# include +# endif +#endif +#if defined(ANDROID) || defined(__ANDROID__) +# define endpwent() ((void)0) +# define endgrent() ((void)0) +#endif +#ifdef HAVE_MNTENT_H +# include +#endif +#ifdef HAVE_SYS_STATFS_H +# include +#endif +/* Don't do this here: + * #include + * Some linux/ includes pull in conflicting definition + * of struct sysinfo (only in some toolchanins), which breaks build. + * Include sys/sysinfo.h only in those files which need it. + */ +#if ENABLE_SELINUX +# include +# include +#endif +#if ENABLE_FEATURE_UTMP +# if defined __UCLIBC__ && ( \ + (UCLIBC_VERSION >= KERNEL_VERSION(0, 9, 32) \ + && UCLIBC_VERSION < KERNEL_VERSION(0, 9, 34) \ + && defined __UCLIBC_HAS_UTMPX__ \ + ) || ( \ + UCLIBC_VERSION >= KERNEL_VERSION(0, 9, 34) \ + ) \ + ) +# include +# elif defined __UCLIBC__ +# include +# define utmpx utmp +# define setutxent setutent +# define endutxent endutent +# define getutxent getutent +# define getutxid getutid +# define getutxline getutline +# define pututxline pututline +# define utmpxname utmpname +# define updwtmpx updwtmp +# define _PATH_UTMPX _PATH_UTMP +# else +# include +# include +# if defined _PATH_UTMP && !defined _PATH_UTMPX +# define _PATH_UTMPX _PATH_UTMP +# endif +# endif +#endif +#if ENABLE_LOCALE_SUPPORT +# include +#else +# define setlocale(x,y) ((void)0) +#endif +#ifdef DMALLOC +# include +#endif +/* Just in case libc doesn't define some of these... */ +#ifndef _PATH_PASSWD +#define _PATH_PASSWD "/etc/passwd" +#endif +#ifndef _PATH_GROUP +#define _PATH_GROUP "/etc/group" +#endif +#ifndef _PATH_SHADOW +#define _PATH_SHADOW "/etc/shadow" +#endif +#ifndef _PATH_GSHADOW +#define _PATH_GSHADOW "/etc/gshadow" +#endif +#if defined __FreeBSD__ || defined __OpenBSD__ +# include +# include +#elif defined __APPLE__ +# include +#else +# include +//This breaks on bionic: +//# if !defined(__socklen_t_defined) && !defined(_SOCKLEN_T_DECLARED) +///* We #define socklen_t *after* includes, otherwise we get +// * typedef redefinition errors from system headers +// * (in case "is it defined already" detection above failed) +// */ +//# define socklen_t bb_socklen_t +// typedef unsigned socklen_t; +//# endif +//if this is still needed, add a fix along the lines of +// ifdef SPECIFIC_BROKEN_LIBC_CHECK / typedef socklen_t / endif +//in platform.h instead! +#endif +#ifndef HAVE_CLEARENV +# define clearenv() do { if (environ) environ[0] = NULL; } while (0) +#endif +#ifndef HAVE_FDATASYNC +# define fdatasync fsync +#endif +#ifndef HAVE_XTABS +# define XTABS TAB3 +#endif + + +/* Some libc's forget to declare these, do it ourself */ + +extern char **environ; +#if defined(__GLIBC__) && __GLIBC__ < 2 +int vdprintf(int d, const char *format, va_list ap); +#endif +/* klogctl is in libc's klog.h, but we cheat and not #include that */ +int klogctl(int type, char *b, int len); +#ifndef PATH_MAX +# define PATH_MAX 256 +#endif +#ifndef BUFSIZ +# define BUFSIZ 4096 +#endif + + +/* Busybox does not use threads, we can speed up stdio. */ +#ifdef HAVE_UNLOCKED_STDIO +# undef getc +# define getc(stream) getc_unlocked(stream) +# undef getchar +# define getchar() getchar_unlocked() +# undef putc +# define putc(c,stream) putc_unlocked(c,stream) +# undef putchar +# define putchar(c) putchar_unlocked(c) +# undef fgetc +# define fgetc(stream) getc_unlocked(stream) +# undef fputc +# define fputc(c,stream) putc_unlocked(c,stream) +#endif +/* Above functions are required by POSIX.1-2008, below ones are extensions */ +#ifdef HAVE_UNLOCKED_LINE_OPS +# undef fgets +# define fgets(s,n,stream) fgets_unlocked(s,n,stream) +# undef fputs +# define fputs(s,stream) fputs_unlocked(s,stream) +/* musl <= 1.1.15 does not support fflush_unlocked(NULL) */ +//# undef fflush +//# define fflush(stream) fflush_unlocked(stream) +# undef feof +# define feof(stream) feof_unlocked(stream) +# undef ferror +# define ferror(stream) ferror_unlocked(stream) +# undef fileno +# define fileno(stream) fileno_unlocked(stream) +#endif + + +/* Make all declarations hidden (-fvisibility flag only affects definitions) */ +/* (don't include system headers after this until corresponding pop!) */ +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + + +#ifndef ENABLE_USE_BB_PWD_GRP +#define ENABLE_USE_BB_PWD_GRP 0 +#endif +#if ENABLE_USE_BB_PWD_GRP +# include "pwd_.h" +# include "grp_.h" +#endif +#if ENABLE_FEATURE_SHADOWPASSWDS +# if ENABLE_USE_BB_SHADOW +# include "shadow_.h" +# endif +#endif + +/* Tested to work correctly with all int types (IIRC :]) */ +#define MAXINT(T) (T)( \ + ((T)-1) > 0 \ + ? (T)-1 \ + : (T)~((T)1 << (sizeof(T)*8-1)) \ + ) + +#define MININT(T) (T)( \ + ((T)-1) > 0 \ + ? (T)0 \ + : ((T)1 << (sizeof(T)*8-1)) \ + ) + +/* Large file support */ +/* Note that CONFIG_LFS=y forces bbox to be built with all common ops + * (stat, lseek etc) mapped to "largefile" variants by libc. + * Practically it means that open() automatically has O_LARGEFILE added + * and all filesize/file_offset parameters and struct members are "large" + * (in today's world - signed 64bit). For full support of large files, + * we need a few helper #defines (below) and careful use of off_t + * instead of int/ssize_t. No lseek64(), O_LARGEFILE etc necessary */ +#if ENABLE_LFS +/* CONFIG_LFS is on */ +# if ULONG_MAX > 0xffffffff +/* "long" is long enough on this system */ +typedef unsigned long uoff_t; +# define XATOOFF(a) xatoul_range((a), 0, LONG_MAX) +/* usage: sz = BB_STRTOOFF(s, NULL, 10); if (errno || sz < 0) die(); */ +# define BB_STRTOOFF bb_strtoul +# define STRTOOFF strtoul +/* usage: printf("size: %"OFF_FMT"d (%"OFF_FMT"x)\n", sz, sz); */ +# define OFF_FMT "l" +# else +/* "long" is too short, need "long long" */ +typedef unsigned long long uoff_t; +# define XATOOFF(a) xatoull_range((a), 0, LLONG_MAX) +# define BB_STRTOOFF bb_strtoull +# define STRTOOFF strtoull +# define OFF_FMT "ll" +# endif +#else +/* CONFIG_LFS is off */ +# if UINT_MAX == 0xffffffff +/* While sizeof(off_t) == sizeof(int), off_t is typedef'ed to long anyway. + * gcc will throw warnings on printf("%d", off_t). Crap... */ +typedef unsigned long uoff_t; +# define XATOOFF(a) xatoi_positive(a) +# define BB_STRTOOFF bb_strtou +# define STRTOOFF strtol +# define OFF_FMT "l" +# else +typedef unsigned long uoff_t; +# define XATOOFF(a) xatoul_range((a), 0, LONG_MAX) +# define BB_STRTOOFF bb_strtoul +# define STRTOOFF strtol +# define OFF_FMT "l" +# endif +#endif +/* scary. better ideas? (but do *test* them first!) */ +#define OFF_T_MAX ((off_t)~((off_t)1 << (sizeof(off_t)*8-1))) +/* Users report bionic to use 32-bit off_t even if LARGEFILE support is requested. + * We misdetected that. Don't let it build: + */ +struct BUG_off_t_size_is_misdetected { + char BUG_off_t_size_is_misdetected[sizeof(off_t) == sizeof(uoff_t) ? 1 : -1]; +}; + +/* Some useful definitions */ +#undef FALSE +#define FALSE ((int) 0) +#undef TRUE +#define TRUE ((int) 1) +#undef SKIP +#define SKIP ((int) 2) + +/* Macros for min/max. */ +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +#ifndef MAX +#define MAX(a,b) (((a)>(b))?(a):(b)) +#endif + +/* buffer allocation schemes */ +#if ENABLE_FEATURE_BUFFERS_GO_ON_STACK +#define RESERVE_CONFIG_BUFFER(buffer,len) char buffer[len] +#define RESERVE_CONFIG_UBUFFER(buffer,len) unsigned char buffer[len] +#define RELEASE_CONFIG_BUFFER(buffer) ((void)0) +#else +#if ENABLE_FEATURE_BUFFERS_GO_IN_BSS +#define RESERVE_CONFIG_BUFFER(buffer,len) static char buffer[len] +#define RESERVE_CONFIG_UBUFFER(buffer,len) static unsigned char buffer[len] +#define RELEASE_CONFIG_BUFFER(buffer) ((void)0) +#else +#define RESERVE_CONFIG_BUFFER(buffer,len) char *buffer = xmalloc(len) +#define RESERVE_CONFIG_UBUFFER(buffer,len) unsigned char *buffer = xmalloc(len) +#define RELEASE_CONFIG_BUFFER(buffer) free(buffer) +#endif +#endif + +#if defined(__GLIBC__) +/* glibc uses __errno_location() to get a ptr to errno */ +/* We can just memorize it once - no multithreading in busybox :) */ +extern int *const bb_errno; +#undef errno +#define errno (*bb_errno) +#endif + +#if !(ULONG_MAX > 0xffffffff) +/* Only 32-bit CPUs need this, 64-bit ones use inlined version */ +uint64_t bb_bswap_64(uint64_t x) FAST_FUNC; +#endif + +unsigned long long monotonic_ns(void) FAST_FUNC; +unsigned long long monotonic_us(void) FAST_FUNC; +unsigned long long monotonic_ms(void) FAST_FUNC; +unsigned monotonic_sec(void) FAST_FUNC; + +extern void chomp(char *s) FAST_FUNC; +extern void trim(char *s) FAST_FUNC; +extern char *skip_whitespace(const char *) FAST_FUNC; +extern char *skip_non_whitespace(const char *) FAST_FUNC; +extern char *skip_dev_pfx(const char *tty_name) FAST_FUNC; + +extern char *strrstr(const char *haystack, const char *needle) FAST_FUNC; + +//TODO: supply a pointer to char[11] buffer (avoid statics)? +extern const char *bb_mode_string(mode_t mode) FAST_FUNC; +extern int is_directory(const char *name, int followLinks) FAST_FUNC; +#ifndef ENABLE_FEATURE_VERBOSE +#define ENABLE_FEATURE_VERBOSE 0 +#endif +enum { /* cp.c, mv.c, install.c depend on these values. CAREFUL when changing them! */ + FILEUTILS_PRESERVE_STATUS = 1 << 0, /* -p */ + FILEUTILS_DEREFERENCE = 1 << 1, /* !-d */ + FILEUTILS_RECUR = 1 << 2, /* -R */ + FILEUTILS_FORCE = 1 << 3, /* -f */ + FILEUTILS_INTERACTIVE = 1 << 4, /* -i */ + FILEUTILS_MAKE_HARDLINK = 1 << 5, /* -l */ + FILEUTILS_MAKE_SOFTLINK = 1 << 6, /* -s */ + FILEUTILS_DEREF_SOFTLINK = 1 << 7, /* -L */ + FILEUTILS_DEREFERENCE_L0 = 1 << 8, /* -H */ + /* -a = -pdR (mapped in cp.c) */ + /* -r = -dR (mapped in cp.c) */ + /* -P = -d (mapped in cp.c) */ + FILEUTILS_VERBOSE = (1 << 12) * ENABLE_FEATURE_VERBOSE, /* -v */ + FILEUTILS_UPDATE = 1 << 13, /* -u */ +#if ENABLE_SELINUX + FILEUTILS_PRESERVE_SECURITY_CONTEXT = 1 << 14, /* -c */ +#endif + FILEUTILS_RMDEST = 1 << (15 - !ENABLE_SELINUX), /* --remove-destination */ + /* + * Hole. cp may have some bits set here, + * they should not affect remove_file()/copy_file() + */ +#if ENABLE_SELINUX + FILEUTILS_SET_SECURITY_CONTEXT = 1 << 30, +#endif + FILEUTILS_IGNORE_CHMOD_ERR = 1 << 31, +}; +#define FILEUTILS_CP_OPTSTR "pdRfilsLHarPvu" IF_SELINUX("c") +extern int remove_file(const char *path, int flags) FAST_FUNC; +/* NB: without FILEUTILS_RECUR in flags, it will basically "cat" + * the source, not copy (unless "source" is a directory). + * This makes "cp /dev/null file" and "install /dev/null file" (!!!) + * work coreutils-compatibly. */ +extern int copy_file(const char *source, const char *dest, int flags) FAST_FUNC; + +enum { + ACTION_RECURSE = (1 << 0), + ACTION_FOLLOWLINKS = (1 << 1), + ACTION_FOLLOWLINKS_L0 = (1 << 2), + ACTION_DEPTHFIRST = (1 << 3), + /*ACTION_REVERSE = (1 << 4), - unused */ + ACTION_QUIET = (1 << 5), + ACTION_DANGLING_OK = (1 << 6), +}; +typedef uint8_t recurse_flags_t; +extern int recursive_action(const char *fileName, unsigned flags, + int FAST_FUNC (*fileAction)(const char *fileName, struct stat* statbuf, void* userData, int depth), + int FAST_FUNC (*dirAction)(const char *fileName, struct stat* statbuf, void* userData, int depth), + void* userData, unsigned depth) FAST_FUNC; +extern int device_open(const char *device, int mode) FAST_FUNC; +enum { GETPTY_BUFSIZE = 16 }; /* more than enough for "/dev/ttyXXX" */ +extern int xgetpty(char *line) FAST_FUNC; +extern int get_console_fd_or_die(void) FAST_FUNC; +extern void console_make_active(int fd, const int vt_num) FAST_FUNC; +extern char *find_block_device(const char *path) FAST_FUNC; +/* bb_copyfd_XX print read/write errors and return -1 if they occur */ +extern off_t bb_copyfd_eof(int fd1, int fd2) FAST_FUNC; +extern off_t bb_copyfd_size(int fd1, int fd2, off_t size) FAST_FUNC; +extern void bb_copyfd_exact_size(int fd1, int fd2, off_t size) FAST_FUNC; +/* "short" copy can be detected by return value < size */ +/* this helper yells "short read!" if param is not -1 */ +extern void complain_copyfd_and_die(off_t sz) NORETURN FAST_FUNC; + +extern char bb_process_escape_sequence(const char **ptr) FAST_FUNC; +char* strcpy_and_process_escape_sequences(char *dst, const char *src) FAST_FUNC; +/* xxxx_strip version can modify its parameter: + * "/" -> "/" + * "abc" -> "abc" + * "abc/def" -> "def" + * "abc/def/" -> "def" !! + */ +char *bb_get_last_path_component_strip(char *path) FAST_FUNC; +/* "abc/def/" -> "" and it never modifies 'path' */ +char *bb_get_last_path_component_nostrip(const char *path) FAST_FUNC; +/* Simpler version: does not special case "/" string */ +const char *bb_basename(const char *name) FAST_FUNC; +/* NB: can violate const-ness (similarly to strchr) */ +char *last_char_is(const char *s, int c) FAST_FUNC; +const char* endofname(const char *name) FAST_FUNC; +char *is_prefixed_with(const char *string, const char *key) FAST_FUNC; +char *is_suffixed_with(const char *string, const char *key) FAST_FUNC; + +#define ATLAS_MSM_VERSION "2.6.4" + +/* What's the best place for this? AA may be atlas_probe.h */ +#define ATLAS_HOME "/home/atlas" +#define ATLAS_CRONS_REL "crons" +#define ATLAS_STATUS_REL "status" +#define ATLAS_DATA_OUT_REL "data/out" +#define ATLAS_DATA_OOQ_OUT_REL "data/ooq.out" +#define ATLAS_DATA_NEW_REL "data/new" +#define ATLAS_DATA_STORAGE_REL "data/storage" +#define ATLAS_TIMESYNC_FILE_REL ATLAS_DATA_NEW_REL "/timesync.vol" +#define ATLAS_FUZZING_REL "data" + +extern int atlas_unsafe(void); +extern char *rebased_validated_filename(const char *base, const char *path, const char *prefix); +extern char *rebased_validated_dir(const char *base, const char *path, const char *prefix); +extern int validate_atlas_id(const char *atlas_id); +extern int get_probe_id(void); +extern int get_timesync(void); +extern int gettime_mono(struct timespec *tsp); +extern char *atlas_get_version_json_str(void); +extern int bind_interface(int socket, int af, char *name); +extern int atlas_check_addr(const struct sockaddr *sa, socklen_t len); +extern char *atlas_name_macro(char *str); +extern int atlas_tests(void); +extern time_t atlas_time(void); +extern int do_ipv6_option(int sock, int hbh_dest, unsigned size); +extern void route_set_flags(char *flagstr, int flags); +extern void peek_response(int fd, int *typep); +extern void peek_response_file(FILE *file, int *typep); +extern void read_response(int fd, int type, size_t *sizep, void *data); +extern void read_response_file(FILE *file, int type, size_t *sizep, + void *data); +extern void write_response(FILE *file, int type, size_t size, void *data); +extern int rxtxrpt_main(int argc, char *argv[]); + +int ndelay_on(int fd) FAST_FUNC; +int ndelay_off(int fd) FAST_FUNC; +void close_on_exec_on(int fd) FAST_FUNC; +void xdup2(int, int) FAST_FUNC; +void xmove_fd(int, int) FAST_FUNC; + + +DIR *xopendir(const char *path) FAST_FUNC; +DIR *warn_opendir(const char *path) FAST_FUNC; + +char *xmalloc_realpath(const char *path) FAST_FUNC RETURNS_MALLOC; +char *xmalloc_readlink(const char *path) FAST_FUNC RETURNS_MALLOC; +char *xmalloc_readlink_or_warn(const char *path) FAST_FUNC RETURNS_MALLOC; +/* !RETURNS_MALLOC: it's a realloc-like function */ +char *xrealloc_getcwd_or_warn(char *cwd) FAST_FUNC; + +char *xmalloc_follow_symlinks(const char *path) FAST_FUNC RETURNS_MALLOC; + +extern size_t strlcat(char *__restrict dst, const char *__restrict src, + size_t n); +extern size_t strlcpy(char *__restrict dst, const char *__restrict src, + size_t n); + +enum { + /* bb_signals(BB_FATAL_SIGS, handler) catches all signals which + * otherwise would kill us, except for those resulting from bugs: + * SIGSEGV, SIGILL, SIGFPE. + * Other fatal signals not included (TODO?): + * SIGBUS Bus error (bad memory access) + * SIGPOLL Pollable event. Synonym of SIGIO + * SIGPROF Profiling timer expired + * SIGSYS Bad argument to routine + * SIGTRAP Trace/breakpoint trap + * + * The only known arch with some of these sigs not fitting + * into 32 bits is parisc (SIGXCPU=33, SIGXFSZ=34, SIGSTKFLT=36). + * Dance around with long long to guard against that... + */ + BB_FATAL_SIGS = (int)(0 + + (1LL << SIGHUP) + + (1LL << SIGINT) + + (1LL << SIGTERM) + + (1LL << SIGPIPE) // Write to pipe with no readers + + (1LL << SIGQUIT) // Quit from keyboard + + (1LL << SIGABRT) // Abort signal from abort(3) + + (1LL << SIGALRM) // Timer signal from alarm(2) + + (1LL << SIGVTALRM) // Virtual alarm clock + + (1LL << SIGXCPU) // CPU time limit exceeded + + (1LL << SIGXFSZ) // File size limit exceeded + + (1LL << SIGUSR1) // Yes kids, these are also fatal! + + (1LL << SIGUSR2) + + 0), +}; +void bb_signals(int sigs, void (*f)(int)) FAST_FUNC; +/* Unlike signal() and bb_signals, sets handler with sigaction() + * and in a way that while signal handler is run, no other signals + * will be blocked; syscalls will not be restarted: */ +void bb_signals_recursive_norestart(int sigs, void (*f)(int)) FAST_FUNC; +/* syscalls like read() will be interrupted with EINTR: */ +void signal_no_SA_RESTART_empty_mask(int sig, void (*handler)(int)) FAST_FUNC; +/* syscalls like read() won't be interrupted (though select/poll will be): */ +void signal_SA_RESTART_empty_mask(int sig, void (*handler)(int)) FAST_FUNC; +void wait_for_any_sig(void) FAST_FUNC; +void kill_myself_with_sig(int sig) NORETURN FAST_FUNC; +void sig_block(int sig) FAST_FUNC; +void sig_unblock(int sig) FAST_FUNC; +/* Will do sigaction(signum, act, NULL): */ +int sigaction_set(int sig, const struct sigaction *act) FAST_FUNC; +/* SIG_BLOCK/SIG_UNBLOCK all signals: */ +int sigprocmask_allsigs(int how) FAST_FUNC; +/* Standard handler which just records signo */ +extern smallint bb_got_signal; +void record_signo(int signo); /* not FAST_FUNC! */ + + +void xsetgid(gid_t gid) FAST_FUNC; +void xsetuid(uid_t uid) FAST_FUNC; +void xsetegid(gid_t egid) FAST_FUNC; +void xseteuid(uid_t euid) FAST_FUNC; +void xchdir(const char *path) FAST_FUNC; +void xfchdir(int fd) FAST_FUNC; +void xchroot(const char *path) FAST_FUNC; +void xsetenv(const char *key, const char *value) FAST_FUNC; +void bb_unsetenv(const char *key) FAST_FUNC; +void bb_unsetenv_and_free(char *key) FAST_FUNC; +void xunlink(const char *pathname) FAST_FUNC; +void xstat(const char *pathname, struct stat *buf) FAST_FUNC; +void xfstat(int fd, struct stat *buf, const char *errmsg) FAST_FUNC; +int open3_or_warn(const char *pathname, int flags, int mode) FAST_FUNC; +int open_or_warn(const char *pathname, int flags) FAST_FUNC; +int xopen3(const char *pathname, int flags, int mode) FAST_FUNC; +int xopen(const char *pathname, int flags) FAST_FUNC; +int xopen_nonblocking(const char *pathname) FAST_FUNC; +int xopen_as_uid_gid(const char *pathname, int flags, uid_t u, gid_t g) FAST_FUNC; +int open_or_warn_stdin(const char *pathname) FAST_FUNC; +int xopen_stdin(const char *pathname) FAST_FUNC; +void xrename(const char *oldpath, const char *newpath) FAST_FUNC; +int rename_or_warn(const char *oldpath, const char *newpath) FAST_FUNC; +off_t xlseek(int fd, off_t offset, int whence) FAST_FUNC; +int xmkstemp(char *template) FAST_FUNC; +off_t fdlength(int fd) FAST_FUNC; + +uoff_t FAST_FUNC get_volume_size_in_bytes(int fd, + const char *override, + unsigned override_units, + int extend); + +void xpipe(int filedes[2]) FAST_FUNC; +/* In this form code with pipes is much more readable */ +struct fd_pair { int rd; int wr; }; +#define piped_pair(pair) pipe(&((pair).rd)) +#define xpiped_pair(pair) xpipe(&((pair).rd)) + +/* Useful for having small structure members/global variables */ +typedef int8_t socktype_t; +typedef int8_t family_t; +struct BUG_too_small { + char BUG_socktype_t_too_small[(0 + | SOCK_STREAM + | SOCK_DGRAM + | SOCK_RDM + | SOCK_SEQPACKET + | SOCK_RAW + ) <= 127 ? 1 : -1]; + char BUG_family_t_too_small[(0 + | AF_UNSPEC + | AF_INET + | AF_INET6 + | AF_UNIX +#ifdef AF_PACKET + | AF_PACKET +#endif +#ifdef AF_NETLINK + | AF_NETLINK +#endif + /* | AF_DECnet */ + /* | AF_IPX */ + ) <= 127 ? 1 : -1]; +}; + + +void parse_datestr(const char *date_str, struct tm *ptm) FAST_FUNC; +time_t validate_tm_time(const char *date_str, struct tm *ptm) FAST_FUNC; +char *strftime_HHMMSS(char *buf, unsigned len, time_t *tp) FAST_FUNC; +char *strftime_YYYYMMDDHHMMSS(char *buf, unsigned len, time_t *tp) FAST_FUNC; + +int xsocket(int domain, int type, int protocol) FAST_FUNC; +void xbind(int sockfd, struct sockaddr *my_addr, socklen_t addrlen) FAST_FUNC; +void xrbind(int sockfd, struct sockaddr *my_addr, socklen_t addrlen, + void (*reportf)(int err)) FAST_FUNC; +void xlisten(int s, int backlog) FAST_FUNC; +void xconnect(int s, const struct sockaddr *s_addr, socklen_t addrlen) FAST_FUNC; +void xrconnect(int s, const struct sockaddr *s_addr, socklen_t addrlen, + void (*reportf)(int err)) FAST_FUNC; +ssize_t xsendto(int s, const void *buf, size_t len, const struct sockaddr *to, + socklen_t tolen) FAST_FUNC; +ssize_t xrsendto(int s, const void *buf, size_t len, const struct sockaddr *to, + socklen_t tolen, void (*reportf)(int err)) FAST_FUNC; +ssize_t rsendto(int s, const void *buf, size_t len, const struct sockaddr *to, + socklen_t tolen, void (*reportf)(int err)) FAST_FUNC; + +int setsockopt_int(int fd, int level, int optname, int optval) FAST_FUNC; +int setsockopt_1(int fd, int level, int optname) FAST_FUNC; +int setsockopt_SOL_SOCKET_int(int fd, int optname, int optval) FAST_FUNC; +int setsockopt_SOL_SOCKET_1(int fd, int optname) FAST_FUNC; +/* SO_REUSEADDR allows a server to rebind to an address that is already + * "in use" by old connections to e.g. previous server instance which is + * killed or crashed. Without it bind will fail until all such connections + * time out. Linux does not allow multiple live binds on same ip:port + * regardless of SO_REUSEADDR (unlike some other flavors of Unix). + * Turn it on before you call bind(). */ +void setsockopt_reuseaddr(int fd) FAST_FUNC; /* On Linux this never fails. */ +int setsockopt_keepalive(int fd) FAST_FUNC; +int setsockopt_broadcast(int fd) FAST_FUNC; +int setsockopt_bindtodevice(int fd, const char *iface) FAST_FUNC; +/* NB: returns port in host byte order */ +unsigned bb_lookup_port(const char *port, const char *protocol, unsigned default_port) FAST_FUNC; +#ifndef ENABLE_FEATURE_IPV6 +#define ENABLE_FEATURE_IPV6 1 +#endif +typedef struct len_and_sockaddr { + socklen_t len; + union { + struct sockaddr sa; + struct sockaddr_in sin; +#if ENABLE_FEATURE_IPV6 + struct sockaddr_in6 sin6; +#endif + } u; +} len_and_sockaddr; +enum { + LSA_LEN_SIZE = offsetof(len_and_sockaddr, u), + LSA_SIZEOF_SA = sizeof( + union { + struct sockaddr sa; + struct sockaddr_in sin; +#if ENABLE_FEATURE_IPV6 + struct sockaddr_in6 sin6; +#endif + } + ) +}; +/* Create stream socket, and allocate suitable lsa. + * (lsa of correct size and lsa->sa.sa_family (AF_INET/AF_INET6)) + * af == AF_UNSPEC will result in trying to create IPv6 socket, + * and if kernel doesn't support it, fall back to IPv4. + * This is useful if you plan to bind to resulting local lsa. + */ +int xsocket_type(len_and_sockaddr **lsap, int af, int sock_type) FAST_FUNC; +int xsocket_stream(len_and_sockaddr **lsap) FAST_FUNC; +/* Create server socket bound to bindaddr:port. bindaddr can be NULL, + * numeric IP ("N.N.N.N") or numeric IPv6 address, + * and can have ":PORT" suffix (for IPv6 use "[X:X:...:X]:PORT"). + * Only if there is no suffix, port argument is used */ +/* NB: these set SO_REUSEADDR before bind */ +int create_and_bind_stream_or_die(const char *bindaddr, int port) FAST_FUNC; +int create_and_bind_dgram_or_die(const char *bindaddr, int port) FAST_FUNC; +/* Create client TCP socket connected to peer:port. Peer cannot be NULL. + * Peer can be numeric IP ("N.N.N.N"), numeric IPv6 address or hostname, + * and can have ":PORT" suffix (for IPv6 use "[X:X:...:X]:PORT"). + * If there is no suffix, port argument is used */ +int create_and_connect_stream_or_die(const char *peer, int port) FAST_FUNC; +/* Connect to peer identified by lsa */ +int xconnect_stream(const len_and_sockaddr *lsa) FAST_FUNC; +/* Get local address of bound or accepted socket */ +len_and_sockaddr *get_sock_lsa(int fd) FAST_FUNC RETURNS_MALLOC; +/* Get remote address of connected or accepted socket */ +len_and_sockaddr *get_peer_lsa(int fd) FAST_FUNC RETURNS_MALLOC; +/* Return malloc'ed len_and_sockaddr with socket address of host:port + * Currently will return IPv4 or IPv6 sockaddrs only + * (depending on host), but in theory nothing prevents e.g. + * UNIX socket address being returned, IPX sockaddr etc... + * On error does bb_error_msg and returns NULL */ +len_and_sockaddr* host2sockaddr(const char *host, int port) FAST_FUNC RETURNS_MALLOC; +/* Version which dies on error */ +len_and_sockaddr* xhost2sockaddr(const char *host, int port) FAST_FUNC RETURNS_MALLOC; +len_and_sockaddr* xdotted2sockaddr(const char *host, int port) FAST_FUNC RETURNS_MALLOC; +/* Same, useful if you want to force family (e.g. IPv6) */ +#if !ENABLE_FEATURE_IPV6 +#define host_and_af2sockaddr(host, port, af) host2sockaddr((host), (port)) +#define xhost_and_af2sockaddr(host, port, af) xhost2sockaddr((host), (port)) +#else +len_and_sockaddr* host_and_af2sockaddr(const char *host, int port, sa_family_t af) FAST_FUNC RETURNS_MALLOC; +len_and_sockaddr* xhost_and_af2sockaddr(const char *host, int port, sa_family_t af) FAST_FUNC RETURNS_MALLOC; +#endif +/* Assign sin[6]_port member if the socket is an AF_INET[6] one, + * otherwise no-op. Useful for ftp. + * NB: does NOT do htons() internally, just direct assignment. */ +void set_nport(struct sockaddr *sa, unsigned port) FAST_FUNC; +/* Retrieve sin[6]_port or return -1 for non-INET[6] lsa's */ +int get_nport(const struct sockaddr *sa) FAST_FUNC; +/* Reverse DNS. Returns NULL on failure. */ +char* xmalloc_sockaddr2host(const struct sockaddr *sa) FAST_FUNC RETURNS_MALLOC; +/* This one doesn't append :PORTNUM */ +char* xmalloc_sockaddr2host_noport(const struct sockaddr *sa) FAST_FUNC RETURNS_MALLOC; +/* This one also doesn't fall back to dotted IP (returns NULL) */ +char* xmalloc_sockaddr2hostonly_noport(const struct sockaddr *sa) FAST_FUNC RETURNS_MALLOC; +/* inet_[ap]ton on steroids */ +char* xmalloc_sockaddr2dotted(const struct sockaddr *sa) FAST_FUNC RETURNS_MALLOC; +char* xmalloc_sockaddr2dotted_noport(const struct sockaddr *sa) FAST_FUNC RETURNS_MALLOC; +// "old" (ipv4 only) API +// users: traceroute.c hostname.c - use _list_ of all IPs +struct hostent *xgethostbyname(const char *name) FAST_FUNC; +// Also mount.c and inetd.c are using gethostbyname(), +// + inet_common.c has additional IPv4-only stuff + +len_and_sockaddr* get_sock_lsa(int fd) FAST_FUNC; + +void socket_want_pktinfo(int fd) FAST_FUNC; +ssize_t send_to_from(int fd, void *buf, size_t len, int flags, + const struct sockaddr *to, + const struct sockaddr *from, + socklen_t tolen) FAST_FUNC; +ssize_t recv_from_to(int fd, void *buf, size_t len, int flags, + struct sockaddr *from, + struct sockaddr *to, + socklen_t sa_size) FAST_FUNC; + +uint16_t inet_cksum(uint16_t *addr, int len) FAST_FUNC; + +char *xstrdup(const char *s) FAST_FUNC RETURNS_MALLOC; +char *xstrndup(const char *s, int n) FAST_FUNC RETURNS_MALLOC; +void *xmemdup(const void *s, int n) FAST_FUNC RETURNS_MALLOC; +void overlapping_strcpy(char *dst, const char *src) FAST_FUNC; +char *safe_strncpy(char *dst, const char *src, size_t size) FAST_FUNC; +char *strncpy_IFNAMSIZ(char *dst, const char *src) FAST_FUNC; +unsigned count_strstr(const char *str, const char *sub) FAST_FUNC; +char *xmalloc_substitute_string(const char *src, int count, const char *sub, const char *repl) FAST_FUNC; +/* Guaranteed to NOT be a macro (smallest code). Saves nearly 2k on uclibc. + * But potentially slow, don't use in one-billion-times loops */ +int bb_putchar(int ch) FAST_FUNC; +/* Note: does not use stdio, writes to fd 2 directly */ +int bb_putchar_stderr(char ch) FAST_FUNC; +char *xasprintf(const char *format, ...) __attribute__ ((format(printf, 1, 2))) FAST_FUNC RETURNS_MALLOC; +char *auto_string(char *str) FAST_FUNC; +// gcc-4.1.1 still isn't good enough at optimizing it +// (+200 bytes compared to macro) +//static ALWAYS_INLINE +//int LONE_DASH(const char *s) { return s[0] == '-' && !s[1]; } +//static ALWAYS_INLINE +//int NOT_LONE_DASH(const char *s) { return s[0] != '-' || s[1]; } +#define LONE_DASH(s) ((s)[0] == '-' && !(s)[1]) +#define NOT_LONE_DASH(s) ((s)[0] != '-' || (s)[1]) +#define LONE_CHAR(s,c) ((s)[0] == (c) && !(s)[1]) +#define NOT_LONE_CHAR(s,c) ((s)[0] != (c) || (s)[1]) +#define DOT_OR_DOTDOT(s) ((s)[0] == '.' && (!(s)[1] || ((s)[1] == '.' && !(s)[2]))) + +typedef struct uni_stat_t { + unsigned byte_count; + unsigned unicode_count; + unsigned unicode_width; +} uni_stat_t; +/* Returns a string with unprintable chars replaced by '?' or + * SUBST_WCHAR. This function is unicode-aware. */ +const char* FAST_FUNC printable_string(uni_stat_t *stats, const char *str); +/* Prints unprintable char ch as ^C or M-c to file + * (M-c is used only if ch is ORed with PRINTABLE_META), + * else it is printed as-is (except for ch = 0x9b) */ +enum { PRINTABLE_META = 0x100 }; +void fputc_printable(int ch, FILE *file) FAST_FUNC; +/* Return a string that is the printable representation of character ch. + * Buffer must hold at least four characters. */ +enum { + VISIBLE_ENDLINE = 1 << 0, + VISIBLE_SHOW_TABS = 1 << 1, +}; +void visible(unsigned ch, char *buf, int flags) FAST_FUNC; + +/* dmalloc will redefine these to it's own implementation. It is safe + * to have the prototypes here unconditionally. */ +void *malloc_or_warn(size_t size) FAST_FUNC RETURNS_MALLOC; +void *xmalloc(size_t size) FAST_FUNC RETURNS_MALLOC; +void *xzalloc(size_t size) FAST_FUNC RETURNS_MALLOC; +void *xrealloc(void *old, size_t size) FAST_FUNC; +/* After v = xrealloc_vector(v, SHIFT, idx) it's ok to use + * at least v[idx] and v[idx+1], for all idx values. + * SHIFT specifies how many new elements are added (1:2, 2:4, ..., 8:256...) + * when all elements are used up. New elements are zeroed out. + * xrealloc_vector(v, SHIFT, idx) *MUST* be called with consecutive IDXs - + * skipping an index is a bad bug - it may miss a realloc! + */ +#define xrealloc_vector(vector, shift, idx) \ + xrealloc_vector_helper((vector), (sizeof((vector)[0]) << 8) + (shift), (idx)) +void* xrealloc_vector_helper(void *vector, unsigned sizeof_and_shift, int idx) FAST_FUNC; + + +extern ssize_t safe_read(int fd, void *buf, size_t count) FAST_FUNC; +extern ssize_t nonblock_immune_read(int fd, void *buf, size_t count) FAST_FUNC; +// NB: will return short read on error, not -1, +// if some data was read before error occurred +extern ssize_t full_read(int fd, void *buf, size_t count) FAST_FUNC; +extern void xread(int fd, void *buf, size_t count) FAST_FUNC; +extern unsigned char xread_char(int fd) FAST_FUNC; +extern ssize_t read_close(int fd, void *buf, size_t maxsz) FAST_FUNC; +extern ssize_t open_read_close(const char *filename, void *buf, size_t maxsz) FAST_FUNC; +// Reads one line a-la fgets (but doesn't save terminating '\n'). +// Reads byte-by-byte. Useful when it is important to not read ahead. +// Bytes are appended to pfx (which must be malloced, or NULL). +extern char *xmalloc_reads(int fd, size_t *maxsz_p) FAST_FUNC; +/* Reads block up to *maxsz_p (default: INT_MAX - 4095) */ +extern void *xmalloc_read(int fd, size_t *maxsz_p) FAST_FUNC RETURNS_MALLOC; +/* Returns NULL if file can't be opened (default max size: INT_MAX - 4095) */ +extern void *xmalloc_open_read_close(const char *filename, size_t *maxsz_p) FAST_FUNC RETURNS_MALLOC; +/* Never returns NULL */ +extern void *xmalloc_xopen_read_close(const char *filename, size_t *maxsz_p) FAST_FUNC RETURNS_MALLOC; + +#if defined(ARG_MAX) && (ARG_MAX >= 60*1024 || !defined(_SC_ARG_MAX)) +/* Use _constant_ maximum if: defined && (big enough || no variable one exists) */ +# define bb_arg_max() ((unsigned)ARG_MAX) +#elif defined(_SC_ARG_MAX) +/* Else use variable one (a bit more expensive) */ +unsigned bb_arg_max(void) FAST_FUNC; +#else +/* If all else fails */ +# define bb_arg_max() ((unsigned)(32 * 1024)) +#endif +unsigned bb_clk_tck(void) FAST_FUNC; + +#define SEAMLESS_COMPRESSION (0 \ + || ENABLE_FEATURE_SEAMLESS_XZ \ + || ENABLE_FEATURE_SEAMLESS_LZMA \ + || ENABLE_FEATURE_SEAMLESS_BZ2 \ + || ENABLE_FEATURE_SEAMLESS_GZ \ + || ENABLE_FEATURE_SEAMLESS_Z) + +#if SEAMLESS_COMPRESSION +/* Autodetects gzip/bzip2 formats. fd may be in the middle of the file! */ +extern int setup_unzip_on_fd(int fd, int fail_if_not_compressed) FAST_FUNC; +/* Autodetects .gz etc */ +extern int open_zipped(const char *fname, int fail_if_not_compressed) FAST_FUNC; +extern void *xmalloc_open_zipped_read_close(const char *fname, size_t *maxsz_p) FAST_FUNC RETURNS_MALLOC; +#else +# define setup_unzip_on_fd(...) (0) +# define open_zipped(fname, fail_if_not_compressed) open((fname), O_RDONLY); +# define xmalloc_open_zipped_read_close(fname, maxsz_p) xmalloc_open_read_close((fname), (maxsz_p)) +#endif + +extern ssize_t safe_write(int fd, const void *buf, size_t count) FAST_FUNC; +// NB: will return short write on error, not -1, +// if some data was written before error occurred +extern ssize_t full_write(int fd, const void *buf, size_t count) FAST_FUNC; +extern void xwrite(int fd, const void *buf, size_t count) FAST_FUNC; +extern void xwrite_str(int fd, const char *str) FAST_FUNC; +extern ssize_t full_write1_str(const char *str) FAST_FUNC; +extern ssize_t full_write2_str(const char *str) FAST_FUNC; +extern void xopen_xwrite_close(const char* file, const char *str) FAST_FUNC; + +/* Close fd, but check for failures (some types of write errors) */ +extern void xclose(int fd) FAST_FUNC; + +/* Reads and prints to stdout till eof, then closes FILE. Exits on error: */ +extern void xprint_and_close_file(FILE *file) FAST_FUNC; + +/* Reads a line from a text file, up to a newline or NUL byte, inclusive. + * Returns malloc'ed char*. If end is NULL '\n' isn't considered + * end of line. If end isn't NULL, length of the chunk is stored in it. + * Returns NULL if EOF/error. + */ +extern char *bb_get_chunk_from_file(FILE *file, int *end) FAST_FUNC; +/* Reads up to (and including) TERMINATING_STRING: */ +extern char *xmalloc_fgets_str(FILE *file, const char *terminating_string) FAST_FUNC RETURNS_MALLOC; +/* Same, with limited max size, and returns the length (excluding NUL): */ +extern char *xmalloc_fgets_str_len(FILE *file, const char *terminating_string, size_t *maxsz_p) FAST_FUNC RETURNS_MALLOC; +/* Chops off TERMINATING_STRING from the end: */ +extern char *xmalloc_fgetline_str(FILE *file, const char *terminating_string) FAST_FUNC RETURNS_MALLOC; +/* Reads up to (and including) "\n" or NUL byte: */ +extern char *xmalloc_fgets(FILE *file) FAST_FUNC RETURNS_MALLOC; +/* Chops off '\n' from the end, unlike fgets: */ +extern char *xmalloc_fgetline(FILE *file) FAST_FUNC RETURNS_MALLOC; +/* Same, but doesn't try to conserve space (may have some slack after the end) */ +/* extern char *xmalloc_fgetline_fast(FILE *file) FAST_FUNC RETURNS_MALLOC; */ + +void die_if_ferror(FILE *file, const char *msg) FAST_FUNC; +void die_if_ferror_stdout(void) FAST_FUNC; +int fflush_all(void) FAST_FUNC; +void fflush_stdout_and_exit(int retval) NORETURN FAST_FUNC; +int fclose_if_not_stdin(FILE *file) FAST_FUNC; +FILE* xfopen(const char *filename, const char *mode) FAST_FUNC; +/* Prints warning to stderr and returns NULL on failure: */ +FILE* fopen_or_warn(const char *filename, const char *mode) FAST_FUNC; +/* "Opens" stdin if filename is special, else just opens file: */ +FILE* xfopen_stdin(const char *filename) FAST_FUNC; +FILE* fopen_or_warn_stdin(const char *filename) FAST_FUNC; +FILE* fopen_for_read(const char *path) FAST_FUNC; +FILE* xfopen_for_read(const char *path) FAST_FUNC; +FILE* fopen_for_write(const char *path) FAST_FUNC; +FILE* xfopen_for_write(const char *path) FAST_FUNC; +FILE* xfdopen_for_read(int fd) FAST_FUNC; +FILE* xfdopen_for_write(int fd) FAST_FUNC; + +int bb_pstrcmp(const void *a, const void *b) /* not FAST_FUNC! */; +void qsort_string_vector(char **sv, unsigned count) FAST_FUNC; + +/* Wrapper which restarts poll on EINTR or ENOMEM. + * On other errors complains [perror("poll")] and returns. + * Warning! May take (much) longer than timeout_ms to return! + * If this is a problem, use bare poll and open-code EINTR/ENOMEM handling */ +int safe_poll(struct pollfd *ufds, nfds_t nfds, int timeout_ms) FAST_FUNC; + +char *safe_gethostname(void) FAST_FUNC; + +/* Convert each alpha char in str to lower-case */ +char* str_tolower(char *str) FAST_FUNC; + +char *utoa(unsigned n) FAST_FUNC; +char *itoa(int n) FAST_FUNC; +/* Returns a pointer past the formatted number, does NOT null-terminate */ +char *utoa_to_buf(unsigned n, char *buf, unsigned buflen) FAST_FUNC; +char *itoa_to_buf(int n, char *buf, unsigned buflen) FAST_FUNC; +/* Intelligent formatters of bignums */ +char *smart_ulltoa4(unsigned long long ul, char buf[4], const char *scale) FAST_FUNC; +char *smart_ulltoa5(unsigned long long ul, char buf[5], const char *scale) FAST_FUNC; +/* If block_size == 0, display size without fractional part, + * else display (size * block_size) with one decimal digit. + * If display_unit == 0, show value no bigger than 1024 with suffix (K,M,G...), + * else divide by display_unit and do not use suffix. */ +#define HUMAN_READABLE_MAX_WIDTH 7 /* "1024.0G" */ +#define HUMAN_READABLE_MAX_WIDTH_STR "7" +//TODO: provide pointer to buf (avoid statics)? +const char *make_human_readable_str(unsigned long long size, + unsigned long block_size, unsigned long display_unit) FAST_FUNC; +/* Put a string of hex bytes ("1b2e66fe"...), return advanced pointer */ +char *bin2hex(char *dst, const char *src, int count) FAST_FUNC; +/* Reverse */ +char* hex2bin(char *dst, const char *src, int count) FAST_FUNC; + +/* Generate a UUID */ +void generate_uuid(uint8_t *buf) FAST_FUNC; + +/* Last element is marked by mult == 0 */ +struct suffix_mult { + char suffix[4]; + unsigned mult; +}; +extern const struct suffix_mult bkm_suffixes[]; +#define km_suffixes (bkm_suffixes + 1) +extern const struct suffix_mult cwbkMG_suffixes[]; +#define kMG_suffixes (cwbkMG_suffixes + 3) +extern const struct suffix_mult kmg_i_suffixes[]; + +#include "xatonum.h" +/* Specialized: */ + +/* Using xatoi() instead of naive atoi() is not always convenient - + * in many places people want *non-negative* values, but store them + * in signed int. Therefore we need this one: + * dies if input is not in [0, INT_MAX] range. Also will reject '-0' etc. + * It should really be named xatoi_nonnegative (since it allows 0), + * but that would be too long. + */ +int xatoi_positive(const char *numstr) FAST_FUNC; + +/* Useful for reading port numbers */ +uint16_t xatou16(const char *numstr) FAST_FUNC; + + +/* These parse entries in /etc/passwd and /etc/group. This is desirable + * for BusyBox since we want to avoid using the glibc NSS stuff, which + * increases target size and is often not needed on embedded systems. */ +long xuname2uid(const char *name) FAST_FUNC; +long xgroup2gid(const char *name) FAST_FUNC; +/* wrapper: allows string to contain numeric uid or gid */ +unsigned long get_ug_id(const char *s, long FAST_FUNC (*xname2id)(const char *)) FAST_FUNC; +struct bb_uidgid_t { + uid_t uid; + gid_t gid; +}; +/* always sets uid and gid; returns 0 on failure */ +int get_uidgid(struct bb_uidgid_t*, const char*) FAST_FUNC; +/* always sets uid and gid; exits on failure */ +void xget_uidgid(struct bb_uidgid_t*, const char*) FAST_FUNC; +/* chown-like handling of "user[:[group]" */ +void parse_chown_usergroup_or_die(struct bb_uidgid_t *u, char *user_group) FAST_FUNC; +struct passwd* xgetpwnam(const char *name) FAST_FUNC; +struct group* xgetgrnam(const char *name) FAST_FUNC; +struct passwd* xgetpwuid(uid_t uid) FAST_FUNC; +struct group* xgetgrgid(gid_t gid) FAST_FUNC; +char* xuid2uname(uid_t uid) FAST_FUNC; +char* xgid2group(gid_t gid) FAST_FUNC; +char* uid2uname(uid_t uid) FAST_FUNC; +char* gid2group(gid_t gid) FAST_FUNC; +char* uid2uname_utoa(uid_t uid) FAST_FUNC; +char* gid2group_utoa(gid_t gid) FAST_FUNC; +/* versions which cache results (useful for ps, ls etc) */ +const char* get_cached_username(uid_t uid) FAST_FUNC; +const char* get_cached_groupname(gid_t gid) FAST_FUNC; +void clear_username_cache(void) FAST_FUNC; +/* internally usernames are saved in fixed-sized char[] buffers */ +enum { USERNAME_MAX_SIZE = 32 - sizeof(uid_t) }; +#ifndef ENABLE_FEATURE_CHECK_NAMES +#define ENABLE_FEATURE_CHECK_NAMES 0 +#endif +#if ENABLE_FEATURE_CHECK_NAMES +void die_if_bad_username(const char* name) FAST_FUNC; +#else +#define die_if_bad_username(name) ((void)(name)) +#endif + +#if ENABLE_FEATURE_UTMP +void FAST_FUNC write_new_utmp(pid_t pid, int new_type, const char *tty_name, const char *username, const char *hostname); +void FAST_FUNC update_utmp(pid_t pid, int new_type, const char *tty_name, const char *username, const char *hostname); +void FAST_FUNC update_utmp_DEAD_PROCESS(pid_t pid); +#else +# define write_new_utmp(pid, new_type, tty_name, username, hostname) ((void)0) +# define update_utmp(pid, new_type, tty_name, username, hostname) ((void)0) +# define update_utmp_DEAD_PROCESS(pid) ((void)0) +#endif + + +int file_is_executable(const char *name) FAST_FUNC; +char *find_executable(const char *filename, char **PATHp) FAST_FUNC; +int executable_exists(const char *filename) FAST_FUNC; + +/* BB_EXECxx always execs (it's not doing NOFORK/NOEXEC stuff), + * but it may exec busybox and call applet instead of searching PATH. + */ +#if ENABLE_FEATURE_PREFER_APPLETS +int BB_EXECVP(const char *file, char *const argv[]) FAST_FUNC; +#define BB_EXECLP(prog,cmd,...) \ + do { \ + if (find_applet_by_name(prog) >= 0) \ + execlp(bb_busybox_exec_path, cmd, __VA_ARGS__); \ + execlp(prog, cmd, __VA_ARGS__); \ + } while (0) +#else +#define BB_EXECVP(prog,cmd) execvp(prog,cmd) +#define BB_EXECLP(prog,cmd,...) execlp(prog,cmd,__VA_ARGS__) +#endif +void BB_EXECVP_or_die(char **argv) NORETURN FAST_FUNC; +void exec_prog_or_SHELL(char **argv) NORETURN FAST_FUNC; + +/* xvfork() can't be a _function_, return after vfork in child mangles stack + * in the parent. It must be a macro. */ +#define xvfork() \ +({ \ + pid_t bb__xvfork_pid = vfork(); \ + if (bb__xvfork_pid < 0) \ + bb_perror_msg_and_die("vfork"); \ + bb__xvfork_pid; \ +}) +#if BB_MMU +pid_t xfork(void) FAST_FUNC; +#endif +void xvfork_parent_waits_and_exits(void) FAST_FUNC; + +/* NOMMU friendy fork+exec: */ +pid_t spawn(char **argv) FAST_FUNC; +pid_t xspawn(char **argv) FAST_FUNC; + +pid_t safe_waitpid(pid_t pid, int *wstat, int options) FAST_FUNC; +pid_t wait_any_nohang(int *wstat) FAST_FUNC; +/* wait4pid: unlike waitpid, waits ONLY for one process. + * Returns sig + 0x180 if child is killed by signal. + * It's safe to pass negative 'pids' from failed [v]fork - + * wait4pid will return -1 (and will not clobber [v]fork's errno). + * IOW: rc = wait4pid(spawn(argv)); + * if (rc < 0) bb_perror_msg("%s", argv[0]); + * if (rc > 0) bb_error_msg("exit code: %d", rc & 0xff); + */ +int wait4pid(pid_t pid) FAST_FUNC; +int wait_for_exitstatus(pid_t pid) FAST_FUNC; +/* Same as wait4pid(spawn(argv)), but with NOFORK/NOEXEC if configured: */ +int spawn_and_wait(char **argv) FAST_FUNC; +/* Does NOT check that applet is NOFORK, just blindly runs it */ +int run_nofork_applet(int applet_no, char **argv) FAST_FUNC; + +/* Helpers for daemonization. + * + * bb_daemonize(flags) = daemonize, does not compile on NOMMU + * + * bb_daemonize_or_rexec(flags, argv) = daemonizes on MMU (and ignores argv), + * rexec's itself on NOMMU with argv passed as command line. + * Thus bb_daemonize_or_rexec may cause your _main() to be re-executed + * from the start. (It will detect it and not reexec again second time). + * You have to audit carefully that you don't do something twice as a result + * (opening files/sockets, parsing config files etc...)! + * + * Both of the above will redirect fd 0,1,2 to /dev/null and drop ctty + * (will do setsid()). + * + * fork_or_rexec(argv) = bare-bones fork on MMU, + * "vfork + re-exec ourself" on NOMMU. No fd redirection, no setsid(). + * On MMU ignores argv. + * + * Helper for network daemons in foreground mode: + * + * bb_sanitize_stdio() = make sure that fd 0,1,2 are opened by opening them + * to /dev/null if they are not. + */ +enum { + DAEMON_CHDIR_ROOT = 1, + DAEMON_DEVNULL_STDIO = 2, + DAEMON_CLOSE_EXTRA_FDS = 4, + DAEMON_ONLY_SANITIZE = 8, /* internal use */ + DAEMON_DOUBLE_FORK = 16, /* double fork to avoid controlling tty */ +}; +#if BB_MMU + enum { re_execed = 0 }; +# define fork_or_rexec(argv) xfork() +# define bb_daemonize_or_rexec(flags, argv) bb_daemonize_or_rexec(flags) +# define bb_daemonize(flags) bb_daemonize_or_rexec(flags, bogus) +#else + extern bool re_execed; + /* Note: re_exec() and fork_or_rexec() do argv[0][0] |= 0x80 on NOMMU! + * _Parent_ needs to undo it if it doesn't want to have argv[0] mangled. + */ + void re_exec(char **argv) NORETURN FAST_FUNC; + pid_t fork_or_rexec(char **argv) FAST_FUNC; + int BUG_fork_is_unavailable_on_nommu(void) FAST_FUNC; + int BUG_daemon_is_unavailable_on_nommu(void) FAST_FUNC; + void BUG_bb_daemonize_is_unavailable_on_nommu(void) FAST_FUNC; +# define fork() BUG_fork_is_unavailable_on_nommu() +# define xfork() BUG_fork_is_unavailable_on_nommu() +# define daemon(a,b) BUG_daemon_is_unavailable_on_nommu() +# define bb_daemonize(a) BUG_bb_daemonize_is_unavailable_on_nommu() +#endif +void bb_daemonize_or_rexec(int flags, char **argv) FAST_FUNC; +void bb_sanitize_stdio(void) FAST_FUNC; +/* Clear dangerous stuff, set PATH. Return 1 if was run by different user. */ +int sanitize_env_if_suid(void) FAST_FUNC; + + +char* single_argv(char **argv) FAST_FUNC; +extern const char *const bb_argv_dash[]; /* "-", NULL */ +extern const char *opt_complementary; +#if ENABLE_LONG_OPTS || ENABLE_FEATURE_GETOPT_LONG +#define No_argument "\0" +#define Required_argument "\001" +#define Optional_argument "\002" +extern const char *applet_long_options; +#endif +extern uint32_t option_mask32; +extern uint32_t getopt32(char **argv, const char *applet_opts, ...) FAST_FUNC; + + +/* Having next pointer as a first member allows easy creation + * of "llist-compatible" structs, and using llist_FOO functions + * on them. + */ +typedef struct llist_t { + struct llist_t *link; + char *data; +} llist_t; +void llist_add_to(llist_t **old_head, void *data) FAST_FUNC; +void llist_add_to_end(llist_t **list_head, void *data) FAST_FUNC; +void *llist_pop(llist_t **elm) FAST_FUNC; +void llist_unlink(llist_t **head, llist_t *elm) FAST_FUNC; +void llist_free(llist_t *elm, void (*freeit)(void *data)) FAST_FUNC; +llist_t *llist_rev(llist_t *list) FAST_FUNC; +llist_t *llist_find_str(llist_t *first, const char *str) FAST_FUNC; +/* BTW, surprisingly, changing API to + * llist_t *llist_add_to(llist_t *old_head, void *data) + * etc does not result in smaller code... */ + +/* start_stop_daemon and udhcpc are special - they want + * to create pidfiles regardless of FEATURE_PIDFILE */ +#if ENABLE_FEATURE_PIDFILE || defined(WANT_PIDFILE) +/* True only if we created pidfile which is *file*, not /dev/null etc */ +extern smallint wrote_pidfile; +int write_pidfile(const char *path) FAST_FUNC; +int check_pidfile(const char *path) FAST_FUNC; +#define remove_pidfile(path) do { if (wrote_pidfile) unlink(path); } while (0) +#else +enum { wrote_pidfile = 0 }; +#define write_pidfile(path) ((void)0) +#define remove_pidfile(path) ((void)0) +#endif + +enum { + LOGMODE_NONE = 0, + LOGMODE_STDIO = (1 << 0), + LOGMODE_SYSLOG = (1 << 1) * ENABLE_FEATURE_SYSLOG, + LOGMODE_BOTH = LOGMODE_SYSLOG + LOGMODE_STDIO, +}; +extern const char *msg_eol; +extern smallint syslog_level; +extern smallint logmode; +extern uint8_t xfunc_error_retval; +extern void (*die_func)(void); +extern void xfunc_die(void) NORETURN FAST_FUNC; +extern void bb_show_usage(void) NORETURN FAST_FUNC; +extern void bb_error_msg(const char *s, ...) __attribute__ ((format (printf, 1, 2))) FAST_FUNC; +extern void bb_error_msg_and_die(const char *s, ...) __attribute__ ((noreturn, format (printf, 1, 2))) FAST_FUNC; +extern void bb_perror_msg(const char *s, ...) __attribute__ ((format (printf, 1, 2))) FAST_FUNC; +extern void bb_simple_perror_msg(const char *s) FAST_FUNC; +extern void bb_perror_msg_and_die(const char *s, ...) __attribute__ ((noreturn, format (printf, 1, 2))) FAST_FUNC; +extern void bb_simple_perror_msg_and_die(const char *s) NORETURN FAST_FUNC; +extern void bb_herror_msg(const char *s, ...) __attribute__ ((format (printf, 1, 2))) FAST_FUNC; +extern void bb_herror_msg_and_die(const char *s, ...) __attribute__ ((noreturn, format (printf, 1, 2))) FAST_FUNC; +extern void bb_perror_nomsg_and_die(void) NORETURN FAST_FUNC; +extern void bb_perror_nomsg(void) FAST_FUNC; +extern void bb_verror_msg(const char *s, va_list p, const char *strerr) FAST_FUNC; +extern void bb_logenv_override(void) FAST_FUNC; + +/* We need to export XXX_main from libbusybox + * only if we build "individual" binaries + */ +#if ENABLE_FEATURE_INDIVIDUAL +#define MAIN_EXTERNALLY_VISIBLE EXTERNALLY_VISIBLE +#else +#define MAIN_EXTERNALLY_VISIBLE +#endif + + +/* Applets which are useful from another applets */ +int bb_cat(char** argv); +/* If shell needs them, they exist even if not enabled as applets */ +#ifndef IF_ECHO +#define IF_ECHO(x) +#endif +int echo_main(int argc, char** argv) IF_ECHO(MAIN_EXTERNALLY_VISIBLE); +#ifndef IF_PRINTF +#define IF_PRINTF(x) +#endif +int printf_main(int argc, char **argv) IF_PRINTF(MAIN_EXTERNALLY_VISIBLE); +#ifndef ENABLE_TEST +#define ENABLE_TEST 0 +#endif +#ifndef ENABLE_TEST1 +#define ENABLE_TEST1 0 +#endif +#ifndef ENABLE_TEST2 +#define ENABLE_TEST2 0 +#endif +int test_main(int argc, char **argv) +#if ENABLE_TEST || ENABLE_TEST1 || ENABLE_TEST2 + MAIN_EXTERNALLY_VISIBLE +#endif +; +#ifndef ENABLE_KILL +#define ENABLE_KILL 0 +#endif +#ifndef ENABLE_KILLALL +#define ENABLE_KILLALL 0 +#endif +#ifndef ENABLE_KILLALL6 +#define ENABLE_KILLALL5 0 +#endif +int kill_main(int argc, char **argv) +#if ENABLE_KILL || ENABLE_KILLALL || ENABLE_KILLALL5 + MAIN_EXTERNALLY_VISIBLE +#endif +; +/* Similar, but used by chgrp, not shell */ +#ifndef IF_CHOWN +#define IF_CHOWN(x) +#endif +int chown_main(int argc, char **argv) IF_CHOWN(MAIN_EXTERNALLY_VISIBLE); +/* Used by ftpd */ +#ifndef IF_LS +#define IF_LS(x) +#endif +int ls_main(int argc, char **argv) IF_LS(MAIN_EXTERNALLY_VISIBLE); +/* Don't need IF_xxx() guard for these */ +int gunzip_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int bunzip2_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; + +#ifndef ENABLE_ROUTE +#define ENABLE_ROUTE 0 +#endif +#if ENABLE_ROUTE +void bb_displayroutes(int noresolve, int netstatfmt) FAST_FUNC; +#endif + + +/* Networking */ +/* This structure defines protocol families and their handlers. */ +struct aftype { + const char *name; + const char *title; + int af; + int alen; + char* FAST_FUNC (*print)(unsigned char *); + const char* FAST_FUNC (*sprint)(struct sockaddr *, int numeric); + int FAST_FUNC (*input)(/*int type,*/ const char *bufp, struct sockaddr *); + void FAST_FUNC (*herror)(char *text); + int FAST_FUNC (*rprint)(int options); + int FAST_FUNC (*rinput)(int typ, int ext, char **argv); + /* may modify src */ + int FAST_FUNC (*getmask)(char *src, struct sockaddr *mask, char *name); +}; +/* This structure defines hardware protocols and their handlers. */ +struct hwtype { + const char *name; + const char *title; + int type; + int alen; + char* FAST_FUNC (*print)(unsigned char *); + int FAST_FUNC (*input)(const char *, struct sockaddr *); + int FAST_FUNC (*activate)(int fd); + int suppress_null_addr; +}; +extern smallint interface_opt_a; +int display_interfaces(char *ifname) FAST_FUNC; +int in_ether(const char *bufp, struct sockaddr *sap) FAST_FUNC; +#if ENABLE_FEATURE_HWIB +int in_ib(const char *bufp, struct sockaddr *sap) FAST_FUNC; +#else +#define in_ib(a, b) 1 /* fail */ +#endif +const struct aftype *get_aftype(const char *name) FAST_FUNC; +const struct hwtype *get_hwtype(const char *name) FAST_FUNC; +const struct hwtype *get_hwntype(int type) FAST_FUNC; + + +#ifndef BUILD_INDIVIDUAL +extern int find_applet_by_name(const char *name) FAST_FUNC; +extern void run_applet_no_and_exit(int a, char **argv) NORETURN FAST_FUNC; +#endif + +#ifdef HAVE_MNTENT_H +extern int match_fstype(const struct mntent *mt, const char *fstypes) FAST_FUNC; +extern struct mntent *find_mount_point(const char *name, int subdir_too) FAST_FUNC; +#endif +extern void erase_mtab(const char * name) FAST_FUNC; +extern unsigned int tty_baud_to_value(speed_t speed) FAST_FUNC; +extern speed_t tty_value_to_baud(unsigned int value) FAST_FUNC; +#if ENABLE_DESKTOP +extern void bb_warn_ignoring_args(char *arg) FAST_FUNC; +#else +# define bb_warn_ignoring_args(arg) ((void)0) +#endif + +extern int get_linux_version_code(void) FAST_FUNC; + +extern char *query_loop(const char *device) FAST_FUNC; +extern int del_loop(const char *device) FAST_FUNC; +/* If *devname is not NULL, use that name, otherwise try to find free one, + * malloc and return it in *devname. + * return value: 1: read-only loopdev was setup, 0: rw, < 0: error */ +extern int set_loop(char **devname, const char *file, unsigned long long offset, int ro) FAST_FUNC; + +/* Like bb_ask below, but asks on stdin with no timeout. */ +char *bb_ask_stdin(const char * prompt) FAST_FUNC; +//TODO: pass buf pointer or return allocated buf (avoid statics)? +char *bb_ask(const int fd, int timeout, const char * prompt) FAST_FUNC; +int bb_ask_confirmation(void) FAST_FUNC; + +/* Returns -1 if input is invalid. current_mode is a base for e.g. "u+rw" */ +int bb_parse_mode(const char* s, unsigned cur_mode) FAST_FUNC; + +/* + * Config file parser + */ +#ifndef ENABLE_FEATURE_CROND_D +#define ENABLE_FEATURE_CROND_D 0 +#endif +enum { + PARSE_COLLAPSE = 0x00010000, // treat consecutive delimiters as one + PARSE_TRIM = 0x00020000, // trim leading and trailing delimiters +// TODO: COLLAPSE and TRIM seem to always go in pair + PARSE_GREEDY = 0x00040000, // last token takes entire remainder of the line + PARSE_MIN_DIE = 0x00100000, // die if < min tokens found + // keep a copy of current line + PARSE_KEEP_COPY = 0x00200000 * ENABLE_FEATURE_CROND_D, + PARSE_EOL_COMMENTS = 0x00400000, // comments are recognized even if they aren't the first char + // NORMAL is: + // * remove leading and trailing delimiters and collapse + // multiple delimiters into one + // * warn and continue if less than mintokens delimiters found + // * grab everything into last token + // * comments are recognized even if they aren't the first char + PARSE_NORMAL = PARSE_COLLAPSE | PARSE_TRIM | PARSE_GREEDY | PARSE_EOL_COMMENTS, +}; +typedef struct parser_t { + FILE *fp; + char *data; + char *line, *nline; + size_t line_alloc, nline_alloc; + int lineno; +} parser_t; +parser_t* config_open(const char *filename) FAST_FUNC; +parser_t* config_open2(const char *filename, FILE* FAST_FUNC (*fopen_func)(const char *path)) FAST_FUNC; +/* delims[0] is a comment char (use '\0' to disable), the rest are token delimiters */ +int config_read(parser_t *parser, char **tokens, unsigned flags, const char *delims) FAST_FUNC; +#define config_read(parser, tokens, max, min, str, flags) \ + config_read(parser, tokens, ((flags) | (((min) & 0xFF) << 8) | ((max) & 0xFF)), str) +void config_close(parser_t *parser) FAST_FUNC; + +/* Concatenate path and filename to new allocated buffer. + * Add "/" only as needed (no duplicate "//" are produced). + * If path is NULL, it is assumed to be "/". + * filename should not be NULL. */ +char *concat_path_file(const char *path, const char *filename) FAST_FUNC; +/* Returns NULL on . and .. */ +char *concat_subpath_file(const char *path, const char *filename) FAST_FUNC; + + +int bb_make_directory(char *path, long mode, int flags) FAST_FUNC; + +int get_signum(const char *name) FAST_FUNC; +const char *get_signame(int number) FAST_FUNC; +void print_signames(void) FAST_FUNC; + +char *bb_simplify_path(const char *path) FAST_FUNC; +/* Returns ptr to NUL */ +char *bb_simplify_abs_path_inplace(char *path) FAST_FUNC; + +#ifndef LOGIN_FAIL_DELAY +#define LOGIN_FAIL_DELAY 3 +#endif +extern void bb_do_delay(int seconds) FAST_FUNC; +extern void change_identity(const struct passwd *pw) FAST_FUNC; +extern void run_shell(const char *shell, int loginshell, const char **args) NORETURN FAST_FUNC; + +/* Returns $SHELL, getpwuid(getuid())->pw_shell, or DEFAULT_SHELL. + * Note that getpwuid result might need xstrdup'ing + * if there is a possibility of intervening getpwxxx() calls. + */ +const char *get_shell_name(void) FAST_FUNC; + +#if ENABLE_SELINUX +extern void renew_current_security_context(void) FAST_FUNC; +extern void set_current_security_context(security_context_t sid) FAST_FUNC; +extern context_t set_security_context_component(security_context_t cur_context, + char *user, char *role, char *type, char *range) FAST_FUNC; +extern void setfscreatecon_or_die(security_context_t scontext) FAST_FUNC; +extern void selinux_preserve_fcontext(int fdesc) FAST_FUNC; +#else +#define selinux_preserve_fcontext(fdesc) ((void)0) +#endif +extern void selinux_or_die(void) FAST_FUNC; + + +/* setup_environment: + * if chdir pw->pw_dir: ok: else if to_tmp == 1: goto /tmp else: goto / or die + * if clear_env = 1: cd(pw->pw_dir), clear environment, then set + * TERM=(old value) + * USER=pw->pw_name, LOGNAME=pw->pw_name + * PATH=bb_default_[root_]path + * HOME=pw->pw_dir + * SHELL=shell + * else if change_env = 1: + * if not root (if pw->pw_uid != 0): + * USER=pw->pw_name, LOGNAME=pw->pw_name + * HOME=pw->pw_dir + * SHELL=shell + * else does nothing + */ +#define SETUP_ENV_CHANGEENV (1 << 0) +#define SETUP_ENV_CLEARENV (1 << 1) +#define SETUP_ENV_TO_TMP (1 << 2) +#define SETUP_ENV_NO_CHDIR (1 << 4) +void setup_environment(const char *shell, int flags, const struct passwd *pw) FAST_FUNC; +void nuke_str(char *str) FAST_FUNC; +int check_password(const struct passwd *pw, const char *plaintext) FAST_FUNC; +int ask_and_check_password_extended(const struct passwd *pw, int timeout, const char *prompt) FAST_FUNC; +int ask_and_check_password(const struct passwd *pw) FAST_FUNC; +/* Returns a malloced string */ +#ifndef ENABLE_USE_BB_CRYPT +#define ENABLE_USE_BB_CRYPT 0 +#endif +#if !ENABLE_USE_BB_CRYPT +#define pw_encrypt(clear, salt, cleanup) pw_encrypt(clear, salt) +#endif +extern char *pw_encrypt(const char *clear, const char *salt, int cleanup) FAST_FUNC; +extern int obscure(const char *old, const char *newval, const struct passwd *pwdp) FAST_FUNC; +/* + * rnd is additional random input. New one is returned. + * Useful if you call crypt_make_salt many times in a row: + * rnd = crypt_make_salt(buf1, 4, 0); + * rnd = crypt_make_salt(buf2, 4, rnd); + * rnd = crypt_make_salt(buf3, 4, rnd); + * (otherwise we risk having same salt generated) + */ +extern int crypt_make_salt(char *p, int cnt /*, int rnd*/) FAST_FUNC; +/* "$N$" + sha_salt_16_bytes + NUL */ +#define MAX_PW_SALT_LEN (3 + 16 + 1) +extern char* crypt_make_pw_salt(char p[MAX_PW_SALT_LEN], const char *algo) FAST_FUNC; + +#ifndef ENABLE_FEATURE_ADDUSER_TO_GROUP +#define ENABLE_FEATURE_ADDUSER_TO_GROUP 0 +#endif +#ifndef ENABLE_FEATURE_DEL_USER_FROM_GROUP +#define ENABLE_FEATURE_DEL_USER_FROM_GROUP 0 +#endif + +/* Returns number of lines changed, or -1 on error */ +#if !(ENABLE_FEATURE_ADDUSER_TO_GROUP || ENABLE_FEATURE_DEL_USER_FROM_GROUP) +#define update_passwd(filename, username, data, member) \ + update_passwd(filename, username, data) +#endif +extern int update_passwd(const char *filename, + const char *username, + const char *data, + const char *member) FAST_FUNC; + +int index_in_str_array(const char *const string_array[], const char *key) FAST_FUNC; +int index_in_strings(const char *strings, const char *key) FAST_FUNC; +int index_in_substr_array(const char *const string_array[], const char *key) FAST_FUNC; +int index_in_substrings(const char *strings, const char *key) FAST_FUNC; +const char *nth_string(const char *strings, int n) FAST_FUNC; + +extern void print_login_issue(const char *issue_file, const char *tty) FAST_FUNC; +extern void print_login_prompt(void) FAST_FUNC; + +char *xmalloc_ttyname(int fd) FAST_FUNC RETURNS_MALLOC; +/* NB: typically you want to pass fd 0, not 1. Think 'applet | grep something' */ +int get_terminal_width_height(int fd, unsigned *width, unsigned *height) FAST_FUNC; +int get_terminal_width(int fd) FAST_FUNC; + +int tcsetattr_stdin_TCSANOW(const struct termios *tp) FAST_FUNC; + +/* NB: "unsigned request" is crucial! "int request" will break some arches! */ +int ioctl_or_perror(int fd, unsigned request, void *argp, const char *fmt,...) __attribute__ ((format (printf, 4, 5))) FAST_FUNC; +int ioctl_or_perror_and_die(int fd, unsigned request, void *argp, const char *fmt,...) __attribute__ ((format (printf, 4, 5))) FAST_FUNC; +#if ENABLE_IOCTL_HEX2STR_ERROR +int bb_ioctl_or_warn(int fd, unsigned request, void *argp, const char *ioctl_name) FAST_FUNC; +int bb_xioctl(int fd, unsigned request, void *argp, const char *ioctl_name) FAST_FUNC; +#define ioctl_or_warn(fd,request,argp) bb_ioctl_or_warn(fd,request,argp,#request) +#define xioctl(fd,request,argp) bb_xioctl(fd,request,argp,#request) +#else +int bb_ioctl_or_warn(int fd, unsigned request, void *argp) FAST_FUNC; +int bb_xioctl(int fd, unsigned request, void *argp) FAST_FUNC; +#define ioctl_or_warn(fd,request,argp) bb_ioctl_or_warn(fd,request,argp) +#define xioctl(fd,request,argp) bb_xioctl(fd,request,argp) +#endif + +char *is_in_ino_dev_hashtable(const struct stat *statbuf) FAST_FUNC; +void add_to_ino_dev_hashtable(const struct stat *statbuf, const char *name) FAST_FUNC; +void reset_ino_dev_hashtable(void) FAST_FUNC; +#ifdef __GLIBC__ +/* At least glibc has horrendously large inline for this, so wrap it */ +unsigned long long bb_makedev(unsigned major, unsigned minor) FAST_FUNC; +#undef makedev +#define makedev(a,b) bb_makedev(a,b) +#endif + + +/* "Keycodes" that report an escape sequence. + * We use something which fits into signed char, + * yet doesn't represent any valid Unicode character. + * Also, -1 is reserved for error indication and we don't use it. */ +enum { + KEYCODE_UP = -2, + KEYCODE_DOWN = -3, + KEYCODE_RIGHT = -4, + KEYCODE_LEFT = -5, + KEYCODE_HOME = -6, + KEYCODE_END = -7, + KEYCODE_INSERT = -8, + KEYCODE_DELETE = -9, + KEYCODE_PAGEUP = -10, + KEYCODE_PAGEDOWN = -11, + KEYCODE_BACKSPACE = -12, /* Used only if Alt/Ctrl/Shifted */ + KEYCODE_D = -13, /* Used only if Alted */ +#if 0 + KEYCODE_FUN1 = , + KEYCODE_FUN2 = , + KEYCODE_FUN3 = , + KEYCODE_FUN4 = , + KEYCODE_FUN5 = , + KEYCODE_FUN6 = , + KEYCODE_FUN7 = , + KEYCODE_FUN8 = , + KEYCODE_FUN9 = , + KEYCODE_FUN10 = , + KEYCODE_FUN11 = , + KEYCODE_FUN12 = , +#endif + /* ^^^^^ Be sure that last defined value is small enough. + * Current read_key() code allows going up to -32 (0xfff..fffe0). + * This gives three upper bits in LSB to play with: + * KEYCODE_foo values are 0xfff..fffXX, lowest XX bits are: scavvvvv, + * s=0 if SHIFT, c=0 if CTRL, a=0 if ALT, + * vvvvv bits are the same for same key regardless of "shift bits". + */ + //KEYCODE_SHIFT_... = KEYCODE_... & ~0x80, + KEYCODE_CTRL_RIGHT = KEYCODE_RIGHT & ~0x40, + KEYCODE_CTRL_LEFT = KEYCODE_LEFT & ~0x40, + KEYCODE_ALT_RIGHT = KEYCODE_RIGHT & ~0x20, + KEYCODE_ALT_LEFT = KEYCODE_LEFT & ~0x20, + KEYCODE_ALT_BACKSPACE = KEYCODE_BACKSPACE & ~0x20, + KEYCODE_ALT_D = KEYCODE_D & ~0x20, + + KEYCODE_CURSOR_POS = -0x100, /* 0xfff..fff00 */ + /* How long is the longest ESC sequence we know? + * We want it big enough to be able to contain + * cursor position sequence "ESC [ 9999 ; 9999 R" + */ + KEYCODE_BUFFER_SIZE = 16 +}; +/* Note: fd may be in blocking or non-blocking mode, both make sense. + * For one, less uses non-blocking mode. + * Only the first read syscall inside read_key may block indefinitely + * (unless fd is in non-blocking mode), + * subsequent reads will time out after a few milliseconds. + * Return of -1 means EOF or error (errno == 0 on EOF). + * buffer[0] is used as a counter of buffered chars and must be 0 + * on first call. + * timeout: + * -2: do not poll for input; + * -1: poll(-1) (i.e. block); + * >=0: poll for TIMEOUT milliseconds, return -1/EAGAIN on timeout + */ +int64_t read_key(int fd, char *buffer, int timeout) FAST_FUNC; +void read_key_ungets(char *buffer, const char *str, unsigned len) FAST_FUNC; + + +#if ENABLE_FEATURE_EDITING +/* It's NOT just ENABLEd or disabled. It's a number: */ +# if defined CONFIG_FEATURE_EDITING_HISTORY && CONFIG_FEATURE_EDITING_HISTORY > 0 +# define MAX_HISTORY (CONFIG_FEATURE_EDITING_HISTORY + 0) +unsigned size_from_HISTFILESIZE(const char *hp) FAST_FUNC; +# else +# define MAX_HISTORY 0 +# endif +typedef struct line_input_t { + int flags; + const char *path_lookup; +# if MAX_HISTORY + int cnt_history; + int cur_history; + int max_history; /* must never be <= 0 */ +# if ENABLE_FEATURE_EDITING_SAVEHISTORY + /* meaning of this field depends on FEATURE_EDITING_SAVE_ON_EXIT: + * if !FEATURE_EDITING_SAVE_ON_EXIT: "how many lines are + * in on-disk history" + * if FEATURE_EDITING_SAVE_ON_EXIT: "how many in-memory lines are + * also in on-disk history (and thus need to be skipped on save)" + */ + unsigned cnt_history_in_file; + const char *hist_file; +# endif + char *history[MAX_HISTORY + 1]; +# endif +} line_input_t; +enum { + DO_HISTORY = 1 * (MAX_HISTORY > 0), + TAB_COMPLETION = 2 * ENABLE_FEATURE_TAB_COMPLETION, + USERNAME_COMPLETION = 4 * ENABLE_FEATURE_USERNAME_COMPLETION, + VI_MODE = 8 * ENABLE_FEATURE_EDITING_VI, + WITH_PATH_LOOKUP = 0x10, + FOR_SHELL = DO_HISTORY | TAB_COMPLETION | USERNAME_COMPLETION, +}; +line_input_t *new_line_input_t(int flags) FAST_FUNC; +/* So far static: void free_line_input_t(line_input_t *n) FAST_FUNC; */ +/* + * maxsize must be >= 2. + * Returns: + * -1 on read errors or EOF, or on bare Ctrl-D, + * 0 on ctrl-C (the line entered is still returned in 'command'), + * >0 length of input string, including terminating '\n' + */ +int read_line_input(line_input_t *st, const char *prompt, char *command, int maxsize, int timeout) FAST_FUNC; +void show_history(const line_input_t *st) FAST_FUNC; +# if ENABLE_FEATURE_EDITING_SAVE_ON_EXIT +void save_history(line_input_t *st); +# endif +#else +#define MAX_HISTORY 0 +int read_line_input(const char* prompt, char* command, int maxsize) FAST_FUNC; +#define read_line_input(state, prompt, command, maxsize, timeout) \ + read_line_input(prompt, command, maxsize) +#endif + + +#ifndef COMM_LEN +# ifdef TASK_COMM_LEN +enum { COMM_LEN = TASK_COMM_LEN }; +# else +/* synchronize with sizeof(task_struct.comm) in /usr/include/linux/sched.h */ +enum { COMM_LEN = 16 }; +# endif +#endif + +struct smaprec { + unsigned long mapped_rw; + unsigned long mapped_ro; + unsigned long shared_clean; + unsigned long shared_dirty; + unsigned long private_clean; + unsigned long private_dirty; + unsigned long stack; + unsigned long smap_pss, smap_swap; + unsigned long smap_size; + unsigned long smap_start; + char smap_mode[5]; + char *smap_name; +}; + +#ifndef ENABLE_PMAP +#define ENABLE_PMAP 0 +#endif +#if !ENABLE_PMAP +#define procps_read_smaps(pid, total, cb, data) \ + procps_read_smaps(pid, total) +#endif +int FAST_FUNC procps_read_smaps(pid_t pid, struct smaprec *total, + void (*cb)(struct smaprec *, void *), void *data); + +#ifndef IF_FEATURE_SHOW_THREADS +#define IF_FEATURE_SHOW_THREADS(x) +#endif +#ifndef ENABLE_FEATURE_TOPMEM +#define ENABLE_FEATURE_TOPMEM 0 +#endif +#ifndef ENABLE_FEATURE_TOP_SMP_PROCESS +#define ENABLE_FEATURE_TOP_SMP_PROCESS 0 +#endif +#ifndef ENABLE_FEATURE_PS_ADDITIONAL_COLUMNS +#define ENABLE_FEATURE_PS_ADDITIONAL_COLUMNS 0 +#endif +typedef struct procps_status_t { + DIR *dir; + IF_FEATURE_SHOW_THREADS(DIR *task_dir;) + uint8_t shift_pages_to_bytes; + uint8_t shift_pages_to_kb; +/* Fields are set to 0/NULL if failed to determine (or not requested) */ + uint16_t argv_len; + char *argv0; + char *exe; + IF_SELINUX(char *context;) + IF_FEATURE_SHOW_THREADS(unsigned main_thread_pid;) + /* Everything below must contain no ptrs to malloc'ed data: + * it is memset(0) for each process in procps_scan() */ + unsigned long vsz, rss; /* we round it to kbytes */ + unsigned long stime, utime; + unsigned long start_time; + unsigned pid; + unsigned ppid; + unsigned pgid; + unsigned sid; + unsigned uid; + unsigned gid; +#if ENABLE_FEATURE_PS_ADDITIONAL_COLUMNS + unsigned ruid; + unsigned rgid; + int niceness; +#endif + unsigned tty_major,tty_minor; +#if ENABLE_FEATURE_TOPMEM + struct smaprec smaps; +#endif + char state[4]; + /* basename of executable in exec(2), read from /proc/N/stat + * (if executable is symlink or script, it is NOT replaced + * by link target or interpreter name) */ + char comm[COMM_LEN]; + /* user/group? - use passwd/group parsing functions */ +#if ENABLE_FEATURE_TOP_SMP_PROCESS + int last_seen_on_cpu; +#endif +} procps_status_t; +/* flag bits for procps_scan(xx, flags) calls */ +#ifndef ENABLE_KILLALL +#define ENABLE_KILLALL 0 +#endif +#ifndef ENABLE_PGREP +#define ENABLE_PGREP 0 +#endif +#ifndef ENABLE_PKILL +#define ENABLE_PKILL 0 +#endif +#ifndef ENABLE_PIDOF +#define ENABLE_PIDOF 0 +#endif +#ifndef ENABLE_SESTATUS +#define ENABLE_SESTATUS 0 +#endif +#ifndef ENABLE_FEATURE_TOP_SMP_PROCESS +#define ENABLE_FEATURE_TOP_SMP_PROCESS 0 +#endif +#ifndef ENABLE_FEATURE_PS_ADDITIONAL_COLUMNS +#define ENABLE_FEATURE_PS_ADDITIONAL_COLUMNS 0 +#endif +#ifndef ENABLE_FEATURE_SHADOWPASSWDS +#define ENABLE_FEATURE_SHADOWPASSWDS 0 +#endif +#ifndef ENABLE_FEATURE_SHOW_THREADS +#define ENABLE_FEATURE_SHOW_THREADS 0 +#endif +enum { + PSSCAN_PID = 1 << 0, + PSSCAN_PPID = 1 << 1, + PSSCAN_PGID = 1 << 2, + PSSCAN_SID = 1 << 3, + PSSCAN_UIDGID = 1 << 4, + PSSCAN_COMM = 1 << 5, + /* PSSCAN_CMD = 1 << 6, - use read_cmdline instead */ + PSSCAN_ARGV0 = 1 << 7, + PSSCAN_EXE = 1 << 8, + PSSCAN_STATE = 1 << 9, + PSSCAN_VSZ = 1 << 10, + PSSCAN_RSS = 1 << 11, + PSSCAN_STIME = 1 << 12, + PSSCAN_UTIME = 1 << 13, + PSSCAN_TTY = 1 << 14, + PSSCAN_SMAPS = (1 << 15) * ENABLE_FEATURE_TOPMEM, + /* NB: used by find_pid_by_name(). Any applet using it + * needs to be mentioned here. */ + PSSCAN_ARGVN = (1 << 16) * (ENABLE_KILLALL + || ENABLE_PGREP || ENABLE_PKILL + || ENABLE_PIDOF + || ENABLE_SESTATUS + ), + PSSCAN_CONTEXT = (1 << 17) * ENABLE_SELINUX, + PSSCAN_START_TIME = 1 << 18, + PSSCAN_CPU = (1 << 19) * ENABLE_FEATURE_TOP_SMP_PROCESS, + PSSCAN_NICE = (1 << 20) * ENABLE_FEATURE_PS_ADDITIONAL_COLUMNS, + PSSCAN_RUIDGID = (1 << 21) * ENABLE_FEATURE_PS_ADDITIONAL_COLUMNS, + PSSCAN_TASKS = (1 << 22) * ENABLE_FEATURE_SHOW_THREADS, +}; +//procps_status_t* alloc_procps_scan(void) FAST_FUNC; +void free_procps_scan(procps_status_t* sp) FAST_FUNC; +procps_status_t* procps_scan(procps_status_t* sp, int flags) FAST_FUNC; +/* Format cmdline (up to col chars) into char buf[size] */ +/* Puts [comm] if cmdline is empty (-> process is a kernel thread) */ +void read_cmdline(char *buf, int size, unsigned pid, const char *comm) FAST_FUNC; +pid_t *find_pid_by_name(const char* procName) FAST_FUNC; +int comm_match(procps_status_t *p, const char *procName); +pid_t *pidlist_reverse(pid_t *pidList) FAST_FUNC; +int starts_with_cpu(const char *str) FAST_FUNC; +unsigned get_cpu_count(void) FAST_FUNC; + + +/* Use strict=1 if you process input from untrusted source: + * it will return NULL on invalid %xx (bad hex chars) + * and str + 1 if decoded char is / or NUL. + * In non-strict mode, it always succeeds (returns str), + * and also it additionally decoded '+' to space. + */ +char *percent_decode_in_place(char *str, int strict) FAST_FUNC; + + +extern const char bb_uuenc_tbl_base64[] ALIGN1; +extern const char bb_uuenc_tbl_std[] ALIGN1; +void bb_uuencode(char *store, const void *s, int length, const char *tbl) FAST_FUNC; +enum { + BASE64_FLAG_UU_STOP = 0x100, + /* Sign-extends to a value which never matches fgetc result: */ + BASE64_FLAG_NO_STOP_CHAR = 0x80, +}; +const char *decode_base64(char **pp_dst, const char *src) FAST_FUNC; +void read_base64(FILE *src_stream, FILE *dst_stream, int flags) FAST_FUNC; + +typedef struct md5_ctx_t { + uint8_t wbuffer[64]; /* always correctly aligned for uint64_t */ + void (*process_block)(struct md5_ctx_t*) FAST_FUNC; + uint64_t total64; /* must be directly before hash[] */ + uint32_t hash[8]; /* 4 elements for md5, 5 for sha1, 8 for sha256 */ +} md5_ctx_t; +typedef struct md5_ctx_t sha1_ctx_t; +typedef struct md5_ctx_t sha256_ctx_t; +typedef struct sha512_ctx_t { + uint64_t total64[2]; /* must be directly before hash[] */ + uint64_t hash[8]; + uint8_t wbuffer[128]; /* always correctly aligned for uint64_t */ +} sha512_ctx_t; +typedef struct sha3_ctx_t { + uint64_t state[25]; + unsigned bytes_queued; + unsigned input_block_bytes; +} sha3_ctx_t; +void md5_begin(md5_ctx_t *ctx) FAST_FUNC; +void md5_hash(md5_ctx_t *ctx, const void *buffer, size_t len) FAST_FUNC; +void md5_end(md5_ctx_t *ctx, void *resbuf) FAST_FUNC; +void sha1_begin(sha1_ctx_t *ctx) FAST_FUNC; +#define sha1_hash md5_hash +void sha1_end(sha1_ctx_t *ctx, void *resbuf) FAST_FUNC; +void sha256_begin(sha256_ctx_t *ctx) FAST_FUNC; +#define sha256_hash md5_hash +#define sha256_end sha1_end +void sha512_begin(sha512_ctx_t *ctx) FAST_FUNC; +void sha512_hash(sha512_ctx_t *ctx, const void *buffer, size_t len) FAST_FUNC; +void sha512_end(sha512_ctx_t *ctx, void *resbuf) FAST_FUNC; +void sha3_begin(sha3_ctx_t *ctx) FAST_FUNC; +void sha3_hash(sha3_ctx_t *ctx, const void *buffer, size_t len) FAST_FUNC; +void sha3_end(sha3_ctx_t *ctx, void *resbuf) FAST_FUNC; + +extern uint32_t *global_crc32_table; +uint32_t *crc32_filltable(uint32_t *tbl256, int endian) FAST_FUNC; +uint32_t crc32_block_endian1(uint32_t val, const void *buf, unsigned len, uint32_t *crc_table) FAST_FUNC; +uint32_t crc32_block_endian0(uint32_t val, const void *buf, unsigned len, uint32_t *crc_table) FAST_FUNC; + +typedef struct masks_labels_t { + const char *labels; + const int masks[]; +} masks_labels_t; +int print_flags_separated(const int *masks, const char *labels, + int flags, const char *separator) FAST_FUNC; +int print_flags(const masks_labels_t *ml, int flags) FAST_FUNC; + +typedef struct bb_progress_t { + unsigned last_size; + unsigned last_update_sec; + unsigned last_change_sec; + unsigned start_sec; + const char *curfile; +} bb_progress_t; + +#define is_bb_progress_inited(p) ((p)->curfile != NULL) +#define bb_progress_free(p) do { \ + if (ENABLE_UNICODE_SUPPORT) free((char*)((p)->curfile)); \ + (p)->curfile = NULL; \ +} while (0) +void bb_progress_init(bb_progress_t *p, const char *curfile) FAST_FUNC; +void bb_progress_update(bb_progress_t *p, + uoff_t beg_range, + uoff_t transferred, + uoff_t totalsize) FAST_FUNC; + +unsigned ubi_devnum_from_devname(const char *str) FAST_FUNC; +int ubi_get_volid_by_name(unsigned ubi_devnum, const char *vol_name) FAST_FUNC; + + +extern const char *applet_name; + +/* Some older linkers don't perform string merging, we used to have common strings + * as global arrays to do it by hand. But: + * (1) newer linkers do it themselves, + * (2) however, they DONT merge string constants with global arrays, + * even if the value is the same (!). Thus global arrays actually + * increased size a bit: for example, "/etc/passwd" string from libc + * wasn't merged with bb_path_passwd_file[] array! + * Therefore now we use #defines. + */ +/* "BusyBox vN.N.N (timestamp or extra_version)" */ +extern const char bb_banner[] ALIGN1; +extern const char bb_msg_memory_exhausted[] ALIGN1; +extern const char bb_msg_invalid_date[] ALIGN1; +#define bb_msg_read_error "read error" +#define bb_msg_write_error "write error" +extern const char bb_msg_unknown[] ALIGN1; +extern const char bb_msg_can_not_create_raw_socket[] ALIGN1; +extern const char bb_msg_perm_denied_are_you_root[] ALIGN1; +extern const char bb_msg_you_must_be_root[] ALIGN1; +extern const char bb_msg_requires_arg[] ALIGN1; +extern const char bb_msg_invalid_arg_to[] ALIGN1; +extern const char bb_msg_standard_input[] ALIGN1; +extern const char bb_msg_standard_output[] ALIGN1; + +/* NB: (bb_hexdigits_upcase[i] | 0x20) -> lowercase hex digit */ +extern const char bb_hexdigits_upcase[] ALIGN1; + +extern const char bb_path_wtmp_file[] ALIGN1; + +/* Busybox mount uses either /proc/mounts or /etc/mtab to + * get the list of currently mounted filesystems */ +#define bb_path_mtab_file IF_FEATURE_MTAB_SUPPORT("/etc/mtab")IF_NOT_FEATURE_MTAB_SUPPORT("/proc/mounts") + +#define bb_path_passwd_file _PATH_PASSWD +#define bb_path_group_file _PATH_GROUP +#define bb_path_shadow_file _PATH_SHADOW +#define bb_path_gshadow_file _PATH_GSHADOW + +#define bb_path_motd_file "/etc/motd" + +#define bb_dev_null "/dev/null" +extern const char bb_busybox_exec_path[] ALIGN1; +/* util-linux manpage says /sbin:/bin:/usr/sbin:/usr/bin, + * but I want to save a few bytes here */ +extern const char bb_PATH_root_path[] ALIGN1; /* "PATH=/sbin:/usr/sbin:/bin:/usr/bin" */ +#define bb_default_root_path (bb_PATH_root_path + sizeof("PATH")) +#define bb_default_path (bb_PATH_root_path + sizeof("PATH=/sbin:/usr/sbin")) + +extern const int const_int_0; +//extern const int const_int_1; + +/* This struct is deliberately not defined. */ +/* See docs/keep_data_small.txt */ +struct globals; +/* '*const' ptr makes gcc optimize code much better. + * Magic prevents ptr_to_globals from going into rodata. + * If you want to assign a value, use SET_PTR_TO_GLOBALS(x) */ +extern struct globals *const ptr_to_globals; +/* At least gcc 3.4.6 on mipsel system needs optimization barrier */ +#define barrier() __asm__ __volatile__("":::"memory") +#define SET_PTR_TO_GLOBALS(x) do { \ + (*(struct globals**)&ptr_to_globals) = (void*)(x); \ + barrier(); \ +} while (0) +#define FREE_PTR_TO_GLOBALS() do { \ + if (ENABLE_FEATURE_CLEAN_UP) { \ + free(ptr_to_globals); \ + } \ +} while (0) + +/* You can change LIBBB_DEFAULT_LOGIN_SHELL, but don't use it, + * use bb_default_login_shell and following defines. + * If you change LIBBB_DEFAULT_LOGIN_SHELL, + * don't forget to change increment constant. */ +#define LIBBB_DEFAULT_LOGIN_SHELL "-/bin/sh" +extern const char bb_default_login_shell[] ALIGN1; +/* "/bin/sh" */ +#define DEFAULT_SHELL (bb_default_login_shell+1) +/* "sh" */ +#define DEFAULT_SHELL_SHORT_NAME (bb_default_login_shell+6) + +/* The following devices are the same on all systems. */ +#define CURRENT_TTY "/dev/tty" +#define DEV_CONSOLE "/dev/console" + +#ifndef ENABLE_FEATURE_DEVFS +#define ENABLE_FEATURE_DEVFS 0 +#endif + +#if defined(__FreeBSD_kernel__) +# define CURRENT_VC CURRENT_TTY +# define VC_1 "/dev/ttyv0" +# define VC_2 "/dev/ttyv1" +# define VC_3 "/dev/ttyv2" +# define VC_4 "/dev/ttyv3" +# define VC_5 "/dev/ttyv4" +# define VC_FORMAT "/dev/ttyv%d" +#elif defined(__GNU__) +# define CURRENT_VC CURRENT_TTY +# define VC_1 "/dev/tty1" +# define VC_2 "/dev/tty2" +# define VC_3 "/dev/tty3" +# define VC_4 "/dev/tty4" +# define VC_5 "/dev/tty5" +# define VC_FORMAT "/dev/tty%d" +#elif ENABLE_FEATURE_DEVFS +/*Linux, obsolete devfs names */ +# define CURRENT_VC "/dev/vc/0" +# define VC_1 "/dev/vc/1" +# define VC_2 "/dev/vc/2" +# define VC_3 "/dev/vc/3" +# define VC_4 "/dev/vc/4" +# define VC_5 "/dev/vc/5" +# define VC_FORMAT "/dev/vc/%d" +# define LOOP_FORMAT "/dev/loop/%u" +# define LOOP_NAMESIZE (sizeof("/dev/loop/") + sizeof(int)*3 + 1) +# define LOOP_NAME "/dev/loop/" +# define FB_0 "/dev/fb/0" +#else +/*Linux, normal names */ +# define CURRENT_VC "/dev/tty0" +# define VC_1 "/dev/tty1" +# define VC_2 "/dev/tty2" +# define VC_3 "/dev/tty3" +# define VC_4 "/dev/tty4" +# define VC_5 "/dev/tty5" +# define VC_FORMAT "/dev/tty%d" +# define LOOP_FORMAT "/dev/loop%u" +# define LOOP_NAMESIZE (sizeof("/dev/loop") + sizeof(int)*3 + 1) +# define LOOP_NAME "/dev/loop" +# define FB_0 "/dev/fb0" +#endif + + +#define ARRAY_SIZE(x) ((unsigned)(sizeof(x) / sizeof((x)[0]))) +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) + + +/* We redefine ctype macros. Unicode-correct handling of char types + * can't be done with such byte-oriented operations anyway, + * we don't lose anything. + */ +#undef isalnum +#undef isalpha +#undef isascii +#undef isblank +#undef iscntrl +#undef isdigit +#undef isgraph +#undef islower +#undef isprint +#undef ispunct +#undef isspace +#undef isupper +#undef isxdigit +#undef toupper +#undef tolower + +/* We save ~500 bytes on isdigit alone. + * BTW, x86 likes (unsigned char) cast more than (unsigned). */ + +/* These work the same for ASCII and Unicode, + * assuming no one asks "is this a *Unicode* letter?" using isalpha(letter) */ +#define isascii(a) ((unsigned char)(a) <= 0x7f) +#define isdigit(a) ((unsigned char)((a) - '0') <= 9) +#define isupper(a) ((unsigned char)((a) - 'A') <= ('Z' - 'A')) +#define islower(a) ((unsigned char)((a) - 'a') <= ('z' - 'a')) +#define isalpha(a) ((unsigned char)(((a)|0x20) - 'a') <= ('z' - 'a')) +#define isblank(a) ({ unsigned char bb__isblank = (a); bb__isblank == ' ' || bb__isblank == '\t'; }) +#define iscntrl(a) ({ unsigned char bb__iscntrl = (a); bb__iscntrl < ' ' || bb__iscntrl == 0x7f; }) +/* In POSIX/C locale isspace is only these chars: "\t\n\v\f\r" and space. + * "\t\n\v\f\r" happen to have ASCII codes 9,10,11,12,13. + */ +#define isspace(a) ({ unsigned char bb__isspace = (a) - 9; bb__isspace == (' ' - 9) || bb__isspace <= (13 - 9); }) +// Unsafe wrt NUL: #define ispunct(a) (strchr("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", (a)) != NULL) +#define ispunct(a) (strchrnul("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", (a))[0]) +// Bigger code: #define isalnum(a) ({ unsigned char bb__isalnum = (a) - '0'; bb__isalnum <= 9 || ((bb__isalnum - ('A' - '0')) & 0xdf) <= 25; }) +#define isalnum(a) bb_ascii_isalnum(a) +static ALWAYS_INLINE int bb_ascii_isalnum(unsigned char a) +{ + unsigned char b = a - '0'; + if (b <= 9) + return (b <= 9); + b = (a|0x20) - 'a'; + return b <= 'z' - 'a'; +} +#define isxdigit(a) bb_ascii_isxdigit(a) +static ALWAYS_INLINE int bb_ascii_isxdigit(unsigned char a) +{ + unsigned char b = a - '0'; + if (b <= 9) + return (b <= 9); + b = (a|0x20) - 'a'; + return b <= 'f' - 'a'; +} +#define toupper(a) bb_ascii_toupper(a) +static ALWAYS_INLINE unsigned char bb_ascii_toupper(unsigned char a) +{ + unsigned char b = a - 'a'; + if (b <= ('z' - 'a')) + a -= 'a' - 'A'; + return a; +} +#define tolower(a) bb_ascii_tolower(a) +static ALWAYS_INLINE unsigned char bb_ascii_tolower(unsigned char a) +{ + unsigned char b = a - 'A'; + if (b <= ('Z' - 'A')) + a += 'a' - 'A'; + return a; +} + +/* In ASCII and Unicode, these are likely to be very different. + * Let's prevent ambiguous usage from the start */ +#define isgraph(a) isgraph_is_ambiguous_dont_use(a) +#define isprint(a) isprint_is_ambiguous_dont_use(a) +/* NB: must not treat EOF as isgraph or isprint */ +#define isgraph_asciionly(a) ((unsigned)((a) - 0x21) <= 0x7e - 0x21) +#define isprint_asciionly(a) ((unsigned)((a) - 0x20) <= 0x7e - 0x20) + + +/* Simple unit-testing framework */ + +typedef void (*bbunit_testfunc)(void); + +struct bbunit_listelem { + const char* name; + bbunit_testfunc testfunc; +}; + +void bbunit_registertest(struct bbunit_listelem* test); +void bbunit_settestfailed(void); + +#define BBUNIT_DEFINE_TEST(NAME) \ + static void bbunit_##NAME##_test(void); \ + static struct bbunit_listelem bbunit_##NAME##_elem = { \ + .name = #NAME, \ + .testfunc = bbunit_##NAME##_test, \ + }; \ + static void INIT_FUNC bbunit_##NAME##_register(void) \ + { \ + bbunit_registertest(&bbunit_##NAME##_elem); \ + } \ + static void bbunit_##NAME##_test(void) + +/* + * Both 'goto bbunit_end' and 'break' are here only to get rid + * of compiler warnings. + */ +#define BBUNIT_ENDTEST \ + do { \ + goto bbunit_end; \ + bbunit_end: \ + break; \ + } while (0) + +#define BBUNIT_PRINTASSERTFAIL \ + do { \ + bb_error_msg( \ + "[ERROR] Assertion failed in file %s, line %d", \ + __FILE__, __LINE__); \ + } while (0) + +#define BBUNIT_ASSERTION_FAILED \ + do { \ + bbunit_settestfailed(); \ + goto bbunit_end; \ + } while (0) + +/* + * Assertions. + * For now we only offer assertions which cause tests to fail + * immediately. In the future 'expects' might be added too - + * similar to those offered by the gtest framework. + */ +#define BBUNIT_ASSERT_EQ(EXPECTED, ACTUAL) \ + do { \ + if ((EXPECTED) != (ACTUAL)) { \ + BBUNIT_PRINTASSERTFAIL; \ + bb_error_msg("[ERROR] '%s' isn't equal to '%s'", \ + #EXPECTED, #ACTUAL); \ + BBUNIT_ASSERTION_FAILED; \ + } \ + } while (0) + +#define BBUNIT_ASSERT_NOTEQ(EXPECTED, ACTUAL) \ + do { \ + if ((EXPECTED) == (ACTUAL)) { \ + BBUNIT_PRINTASSERTFAIL; \ + bb_error_msg("[ERROR] '%s' is equal to '%s'", \ + #EXPECTED, #ACTUAL); \ + BBUNIT_ASSERTION_FAILED; \ + } \ + } while (0) + +#define BBUNIT_ASSERT_NOTNULL(PTR) \ + do { \ + if ((PTR) == NULL) { \ + BBUNIT_PRINTASSERTFAIL; \ + bb_error_msg("[ERROR] '%s' is NULL!", #PTR); \ + BBUNIT_ASSERTION_FAILED; \ + } \ + } while (0) + +#define BBUNIT_ASSERT_NULL(PTR) \ + do { \ + if ((PTR) != NULL) { \ + BBUNIT_PRINTASSERTFAIL; \ + bb_error_msg("[ERROR] '%s' is not NULL!", #PTR); \ + BBUNIT_ASSERTION_FAILED; \ + } \ + } while (0) + +#define BBUNIT_ASSERT_FALSE(STATEMENT) \ + do { \ + if ((STATEMENT)) { \ + BBUNIT_PRINTASSERTFAIL; \ + bb_error_msg("[ERROR] Statement '%s' evaluated to true!", \ + #STATEMENT); \ + BBUNIT_ASSERTION_FAILED; \ + } \ + } while (0) + +#define BBUNIT_ASSERT_TRUE(STATEMENT) \ + do { \ + if (!(STATEMENT)) { \ + BBUNIT_PRINTASSERTFAIL; \ + bb_error_msg("[ERROR] Statement '%s' evaluated to false!", \ + #STATEMENT); \ + BBUNIT_ASSERTION_FAILED; \ + } \ + } while (0) + +#define BBUNIT_ASSERT_STREQ(STR1, STR2) \ + do { \ + if (strcmp(STR1, STR2) != 0) { \ + BBUNIT_PRINTASSERTFAIL; \ + bb_error_msg("[ERROR] Strings '%s' and '%s' " \ + "are not the same", STR1, STR2); \ + BBUNIT_ASSERTION_FAILED; \ + } \ + } while (0) + +#define BBUNIT_ASSERT_STRNOTEQ(STR1, STR2) \ + do { \ + if (strcmp(STR1, STR2) == 0) { \ + BBUNIT_PRINTASSERTFAIL; \ + bb_error_msg("[ERROR] Strings '%s' and '%s' " \ + "are the same, but were " \ + "expected to differ", STR1, STR2); \ + BBUNIT_ASSERTION_FAILED; \ + } \ + } while (0) + + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/include/liblzo_interface.h b/probe-busybox/include/liblzo_interface.h new file mode 100644 index 00000000..b7f1b639 --- /dev/null +++ b/probe-busybox/include/liblzo_interface.h @@ -0,0 +1,71 @@ +/* + This file is part of the LZO real-time data compression library. + + Copyright (C) 1996..2008 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + Markus F.X.J. Oberhumer + http://www.oberhumer.com/opensource/lzo/ + + The LZO library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The LZO library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the LZO library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#define LZO1X +#undef LZO1Y + +#undef assert +/* +static void die_at(int line) +{ + bb_error_msg_and_die("internal error at %d", line); +} +#define assert(v) if (!(v)) die_at(__LINE__) +*/ +#define assert(v) ((void)0) + +int lzo1x_1_compress(const uint8_t* src, unsigned src_len, + uint8_t* dst, unsigned* dst_len, + void* wrkmem); +int lzo1x_1_15_compress(const uint8_t* src, unsigned src_len, + uint8_t* dst, unsigned* dst_len, + void* wrkmem); +int lzo1x_999_compress_level(const uint8_t* in, unsigned in_len, + uint8_t* out, unsigned* out_len, + void* wrkmem, + int compression_level); + +/* decompression */ +//int lzo1x_decompress(const uint8_t* src, unsigned src_len, +// uint8_t* dst, unsigned* dst_len, +// void* wrkmem /* NOT USED */); +/* safe decompression with overrun testing */ +int lzo1x_decompress_safe(const uint8_t* src, unsigned src_len, + uint8_t* dst, unsigned* dst_len, + void* wrkmem /* NOT USED */); + +#define LZO_E_OK 0 +#define LZO_E_ERROR (-1) +#define LZO_E_OUT_OF_MEMORY (-2) /* [not used right now] */ +#define LZO_E_NOT_COMPRESSIBLE (-3) /* [not used right now] */ +#define LZO_E_INPUT_OVERRUN (-4) +#define LZO_E_OUTPUT_OVERRUN (-5) +#define LZO_E_LOOKBEHIND_OVERRUN (-6) +#define LZO_E_EOF_NOT_FOUND (-7) +#define LZO_E_INPUT_NOT_CONSUMED (-8) +#define LZO_E_NOT_YET_IMPLEMENTED (-9) /* [not used right now] */ + +/* lzo-2.03/include/lzo/lzoconf.h */ +#define LZO_VERSION 0x2030 diff --git a/probe-busybox/include/platform.h b/probe-busybox/include/platform.h new file mode 100644 index 00000000..c987d418 --- /dev/null +++ b/probe-busybox/include/platform.h @@ -0,0 +1,582 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright 2006, Bernhard Reutner-Fischer + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#ifndef BB_PLATFORM_H +#define BB_PLATFORM_H 1 + + +/* Convenience macros to test the version of gcc. */ +#undef __GNUC_PREREQ +#if defined __GNUC__ && defined __GNUC_MINOR__ +# define __GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +#else +# define __GNUC_PREREQ(maj, min) 0 +#endif + +/* __restrict is known in EGCS 1.2 and above. */ +#if !__GNUC_PREREQ(2,92) +# ifndef __restrict +# define __restrict +# endif +#endif + +#if !__GNUC_PREREQ(2,7) +# ifndef __attribute__ +# define __attribute__(x) +# endif +#endif + +#undef inline +#if defined(__STDC_VERSION__) && __STDC_VERSION__ > 199901L +/* it's a keyword */ +#elif __GNUC_PREREQ(2,7) +# define inline __inline__ +#else +# define inline +#endif + +#ifndef __const +# define __const const +#endif + +#define UNUSED_PARAM __attribute__ ((__unused__)) +#define NORETURN __attribute__ ((__noreturn__)) +/* "The malloc attribute is used to tell the compiler that a function + * may be treated as if any non-NULL pointer it returns cannot alias + * any other pointer valid when the function returns. This will often + * improve optimization. Standard functions with this property include + * malloc and calloc. realloc-like functions have this property as long + * as the old pointer is never referred to (including comparing it + * to the new pointer) after the function returns a non-NULL value." + */ +#define RETURNS_MALLOC __attribute__ ((malloc)) +#define PACKED __attribute__ ((__packed__)) +#define ALIGNED(m) __attribute__ ((__aligned__(m))) + +/* __NO_INLINE__: some gcc's do not honor inlining! :( */ +#if __GNUC_PREREQ(3,0) && !defined(__NO_INLINE__) +# define ALWAYS_INLINE __attribute__ ((always_inline)) inline +/* I've seen a toolchain where I needed __noinline__ instead of noinline */ +# define NOINLINE __attribute__((__noinline__)) +# if !ENABLE_WERROR +# define DEPRECATED __attribute__ ((__deprecated__)) +# define UNUSED_PARAM_RESULT __attribute__ ((warn_unused_result)) +# else +# define DEPRECATED +# define UNUSED_PARAM_RESULT +# endif +#else +# define ALWAYS_INLINE inline +# define NOINLINE +# define DEPRECATED +# define UNUSED_PARAM_RESULT +#endif + +/* used by unit test machinery to run registration functions before calling main() */ +#define INIT_FUNC __attribute__ ((constructor)) + +/* -fwhole-program makes all symbols local. The attribute externally_visible + * forces a symbol global. */ +#if __GNUC_PREREQ(4,1) +# define EXTERNALLY_VISIBLE __attribute__(( visibility("default") )) +//__attribute__ ((__externally_visible__)) +#else +# define EXTERNALLY_VISIBLE +#endif + +/* At 4.4 gcc become much more anal about this, need to use "aliased" types */ +#if __GNUC_PREREQ(4,4) +# define FIX_ALIASING __attribute__((__may_alias__)) +#else +# define FIX_ALIASING +#endif + +/* We use __extension__ in some places to suppress -pedantic warnings + * about GCC extensions. This feature didn't work properly before + * gcc 2.8. */ +#if !__GNUC_PREREQ(2,8) +# ifndef __extension__ +# define __extension__ +# endif +#endif + +/* FAST_FUNC is a qualifier which (possibly) makes function call faster + * and/or smaller by using modified ABI. It is usually only needed + * on non-static, busybox internal functions. Recent versions of gcc + * optimize statics automatically. FAST_FUNC on static is required + * only if you need to match a function pointer's type */ +#if __GNUC_PREREQ(3,0) && defined(i386) /* || defined(__x86_64__)? */ +/* stdcall makes callee to pop arguments from stack, not caller */ +# define FAST_FUNC __attribute__((regparm(3),stdcall)) +/* #elif ... - add your favorite arch today! */ +#else +# define FAST_FUNC +#endif + +/* Make all declarations hidden (-fvisibility flag only affects definitions) */ +/* (don't include system headers after this until corresponding pop!) */ +#if __GNUC_PREREQ(4,1) && !defined(__CYGWIN__) +# define PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN _Pragma("GCC visibility push(hidden)") +# define POP_SAVED_FUNCTION_VISIBILITY _Pragma("GCC visibility pop") +#else +# define PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN +# define POP_SAVED_FUNCTION_VISIBILITY +#endif + +/* gcc-2.95 had no va_copy but only __va_copy. */ +#if !__GNUC_PREREQ(3,0) +# include +# if !defined va_copy && defined __va_copy +# define va_copy(d,s) __va_copy((d),(s)) +# endif +#endif + + +/* ---- Endian Detection ------------------------------------ */ + +#include +#if defined(__digital__) && defined(__unix__) +# include +#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) \ + || defined(__APPLE__) +# include /* rlimit */ +# include +# define bswap_64 __bswap64 +# define bswap_32 __bswap32 +# define bswap_16 __bswap16 +#else +# include +# include +#endif + +#if defined(__BYTE_ORDER) && __BYTE_ORDER == __BIG_ENDIAN +# define BB_BIG_ENDIAN 1 +# define BB_LITTLE_ENDIAN 0 +#elif defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN +# define BB_BIG_ENDIAN 0 +# define BB_LITTLE_ENDIAN 1 +#elif defined(_BYTE_ORDER) && _BYTE_ORDER == _BIG_ENDIAN +# define BB_BIG_ENDIAN 1 +# define BB_LITTLE_ENDIAN 0 +#elif defined(_BYTE_ORDER) && _BYTE_ORDER == _LITTLE_ENDIAN +# define BB_BIG_ENDIAN 0 +# define BB_LITTLE_ENDIAN 1 +#elif defined(BYTE_ORDER) && BYTE_ORDER == BIG_ENDIAN +# define BB_BIG_ENDIAN 1 +# define BB_LITTLE_ENDIAN 0 +#elif defined(BYTE_ORDER) && BYTE_ORDER == LITTLE_ENDIAN +# define BB_BIG_ENDIAN 0 +# define BB_LITTLE_ENDIAN 1 +#elif defined(__386__) +# define BB_BIG_ENDIAN 0 +# define BB_LITTLE_ENDIAN 1 +#else +# error "Can't determine endianness" +#endif + +#if ULONG_MAX > 0xffffffff +# define bb_bswap_64(x) bswap_64(x) +#endif + +/* SWAP_LEnn means "convert CPU<->little_endian by swapping bytes" */ +#if BB_BIG_ENDIAN +# define SWAP_BE16(x) (x) +# define SWAP_BE32(x) (x) +# define SWAP_BE64(x) (x) +# define SWAP_LE16(x) bswap_16(x) +# define SWAP_LE32(x) bswap_32(x) +# define SWAP_LE64(x) bb_bswap_64(x) +# define IF_BIG_ENDIAN(...) __VA_ARGS__ +# define IF_LITTLE_ENDIAN(...) +#else +# define SWAP_BE16(x) bswap_16(x) +# define SWAP_BE32(x) bswap_32(x) +# define SWAP_BE64(x) bb_bswap_64(x) +# define SWAP_LE16(x) (x) +# define SWAP_LE32(x) (x) +# define SWAP_LE64(x) (x) +# define IF_BIG_ENDIAN(...) +# define IF_LITTLE_ENDIAN(...) __VA_ARGS__ +#endif + + +/* ---- Unaligned access ------------------------------------ */ + +#include +typedef int bb__aliased_int FIX_ALIASING; +typedef long bb__aliased_long FIX_ALIASING; +typedef uint16_t bb__aliased_uint16_t FIX_ALIASING; +typedef uint32_t bb__aliased_uint32_t FIX_ALIASING; +typedef uint64_t bb__aliased_uint64_t FIX_ALIASING; + +/* NB: unaligned parameter should be a pointer, aligned one - + * a lvalue. This makes it more likely to not swap them by mistake + */ +#if defined(i386) || defined(__x86_64__) || defined(__powerpc__) +# define BB_UNALIGNED_MEMACCESS_OK 1 +# define move_from_unaligned_int(v, intp) ((v) = *(bb__aliased_int*)(intp)) +# define move_from_unaligned_long(v, longp) ((v) = *(bb__aliased_long*)(longp)) +# define move_from_unaligned16(v, u16p) ((v) = *(bb__aliased_uint16_t*)(u16p)) +# define move_from_unaligned32(v, u32p) ((v) = *(bb__aliased_uint32_t*)(u32p)) +# define move_to_unaligned16(u16p, v) (*(bb__aliased_uint16_t*)(u16p) = (v)) +# define move_to_unaligned32(u32p, v) (*(bb__aliased_uint32_t*)(u32p) = (v)) +/* #elif ... - add your favorite arch today! */ +#else +# define BB_UNALIGNED_MEMACCESS_OK 0 +/* performs reasonably well (gcc usually inlines memcpy here) */ +# define move_from_unaligned_int(v, intp) (memcpy(&(v), (intp), sizeof(int))) +# define move_from_unaligned_long(v, longp) (memcpy(&(v), (longp), sizeof(long))) +# define move_from_unaligned16(v, u16p) (memcpy(&(v), (u16p), 2)) +# define move_from_unaligned32(v, u32p) (memcpy(&(v), (u32p), 4)) +# define move_to_unaligned16(u16p, v) do { \ + uint16_t __t = (v); \ + memcpy((u16p), &__t, 2); \ +} while (0) +# define move_to_unaligned32(u32p, v) do { \ + uint32_t __t = (v); \ + memcpy((u32p), &__t, 4); \ +} while (0) +#endif + + +/* ---- Size-saving "small" ints (arch-dependent) ----------- */ + +#if defined(i386) || defined(__x86_64__) || defined(__mips__) || defined(__cris__) +/* add other arches which benefit from this... */ +typedef signed char smallint; +typedef unsigned char smalluint; +#else +/* for arches where byte accesses generate larger code: */ +typedef int smallint; +typedef unsigned smalluint; +#endif + +/* ISO C Standard: 7.16 Boolean type and values */ +#if (defined __digital__ && defined __unix__) +/* old system without (proper) C99 support */ +# define bool smalluint +#else +/* modern system, so use it */ +# include +#endif + + +/*----- Kernel versioning ------------------------------------*/ + +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) + +#ifdef __UCLIBC__ +# define UCLIBC_VERSION KERNEL_VERSION(__UCLIBC_MAJOR__, __UCLIBC_MINOR__, __UCLIBC_SUBLEVEL__) +#else +# define UCLIBC_VERSION 0 +#endif + + +/* ---- Miscellaneous --------------------------------------- */ + +#if defined __GLIBC__ \ + || defined __UCLIBC__ \ + || defined __dietlibc__ \ + || defined __BIONIC__ \ + || defined _NEWLIB_VERSION +# include +#endif + +/* Define bb_setpgrp */ +#if defined(__digital__) && defined(__unix__) +/* use legacy setpgrp(pid_t, pid_t) for now. move to platform.c */ +# define bb_setpgrp() do { pid_t __me = getpid(); setpgrp(__me, __me); } while (0) +#else +# define bb_setpgrp() setpgrp() +#endif + +/* fdprintf is more readable, we used it before dprintf was standardized */ +#include +#define fdprintf dprintf + +/* Useful for defeating gcc's alignment of "char message[]"-like data */ +#if !defined(__s390__) + /* on s390[x], non-word-aligned data accesses require larger code */ +# define ALIGN1 __attribute__((aligned(1))) +# define ALIGN2 __attribute__((aligned(2))) +# define ALIGN4 __attribute__((aligned(4))) +#else +/* Arches which MUST have 2 or 4 byte alignment for everything are here */ +# define ALIGN1 +# define ALIGN2 +# define ALIGN4 +#endif + +/* + * For 0.9.29 and svn, __ARCH_USE_MMU__ indicates no-mmu reliably. + * For earlier versions there is no reliable way to check if we are building + * for a mmu-less system. + */ +#if ENABLE_NOMMU || \ + (defined __UCLIBC__ && \ + UCLIBC_VERSION > KERNEL_VERSION(0, 9, 28) && \ + !defined __ARCH_USE_MMU__) +# define BB_MMU 0 +# define USE_FOR_NOMMU(...) __VA_ARGS__ +# define USE_FOR_MMU(...) +#else +# define BB_MMU 1 +# define USE_FOR_NOMMU(...) +# define USE_FOR_MMU(...) __VA_ARGS__ +#endif + +#if defined(__digital__) && defined(__unix__) +# include +# include +# define PRIu32 "u" +# if !defined ADJ_OFFSET_SINGLESHOT && defined MOD_CLKA && defined MOD_OFFSET +# define ADJ_OFFSET_SINGLESHOT (MOD_CLKA | MOD_OFFSET) +# endif +# if !defined ADJ_FREQUENCY && defined MOD_FREQUENCY +# define ADJ_FREQUENCY MOD_FREQUENCY +# endif +# if !defined ADJ_TIMECONST && defined MOD_TIMECONST +# define ADJ_TIMECONST MOD_TIMECONST +# endif +# if !defined ADJ_TICK && defined MOD_CLKB +# define ADJ_TICK MOD_CLKB +# endif +#endif + +#if defined(__CYGWIN__) +# define MAXSYMLINKS SYMLOOP_MAX +#endif + +#if defined(ANDROID) || defined(__ANDROID__) +# define BB_ADDITIONAL_PATH ":/system/sbin:/system/bin:/system/xbin" +# define SYS_ioprio_set __NR_ioprio_set +# define SYS_ioprio_get __NR_ioprio_get +#endif + + +/* ---- Who misses what? ------------------------------------ */ + +/* Assume all these functions and header files exist by default. + * Platforms where it is not true will #undef them below. + */ +#define HAVE_CLEARENV 1 +#define HAVE_FDATASYNC 1 +#define HAVE_DPRINTF 1 +#define HAVE_MEMRCHR 1 +#define HAVE_MKDTEMP 1 +#define HAVE_TTYNAME_R 1 +#define HAVE_PTSNAME_R 1 +#define HAVE_SETBIT 1 +#define HAVE_SIGHANDLER_T 1 +#define HAVE_STPCPY 1 +#define HAVE_MEMPCPY 1 +#define HAVE_STRCASESTR 1 +#define HAVE_STRCHRNUL 1 +#define HAVE_STRSEP 1 +#define HAVE_STRSIGNAL 1 +#define HAVE_STRVERSCMP 1 +#define HAVE_VASPRINTF 1 +#define HAVE_USLEEP 1 +#define HAVE_UNLOCKED_STDIO 1 +#define HAVE_UNLOCKED_LINE_OPS 1 +#define HAVE_GETLINE 1 +#define HAVE_XTABS 1 +#define HAVE_MNTENT_H 1 +#define HAVE_NET_ETHERNET_H 1 +#define HAVE_SYS_STATFS_H 1 + +#if defined(__UCLIBC__) +# if UCLIBC_VERSION < KERNEL_VERSION(0, 9, 32) +# undef HAVE_STRVERSCMP +# endif +# if UCLIBC_VERSION >= KERNEL_VERSION(0, 9, 30) +# ifndef __UCLIBC_SUSV3_LEGACY__ +# undef HAVE_USLEEP +# endif +# endif +#endif + +#if defined(__WATCOMC__) +# undef HAVE_DPRINTF +# undef HAVE_GETLINE +# undef HAVE_MEMRCHR +# undef HAVE_MKDTEMP +# undef HAVE_SETBIT +# undef HAVE_STPCPY +# undef HAVE_STRCASESTR +# undef HAVE_STRCHRNUL +# undef HAVE_STRSEP +# undef HAVE_STRSIGNAL +# undef HAVE_STRVERSCMP +# undef HAVE_VASPRINTF +# undef HAVE_UNLOCKED_STDIO +# undef HAVE_UNLOCKED_LINE_OPS +# undef HAVE_NET_ETHERNET_H +#endif + +#if defined(__CYGWIN__) +# undef HAVE_CLEARENV +# undef HAVE_FDPRINTF +# undef HAVE_MEMRCHR +# undef HAVE_PTSNAME_R +# undef HAVE_STRVERSCMP +# undef HAVE_UNLOCKED_LINE_OPS +#endif + +/* These BSD-derived OSes share many similarities */ +#if (defined __digital__ && defined __unix__) \ + || defined __APPLE__ \ + || defined __OpenBSD__ || defined __NetBSD__ +# undef HAVE_CLEARENV +# undef HAVE_FDATASYNC +# undef HAVE_GETLINE +# undef HAVE_MNTENT_H +# undef HAVE_PTSNAME_R +# undef HAVE_SYS_STATFS_H +# undef HAVE_SIGHANDLER_T +# undef HAVE_STRVERSCMP +# undef HAVE_XTABS +# undef HAVE_DPRINTF +# undef HAVE_UNLOCKED_STDIO +# undef HAVE_UNLOCKED_LINE_OPS +#endif + +#if defined(__dietlibc__) +# undef HAVE_STRCHRNUL +#endif + +#if defined(__APPLE__) +# undef HAVE_STRCHRNUL +#endif + +#if defined(__FreeBSD__) +/* users say mempcpy is not present in FreeBSD 9.x */ +# undef HAVE_MEMPCPY +# undef HAVE_CLEARENV +# undef HAVE_FDATASYNC +# undef HAVE_MNTENT_H +# undef HAVE_PTSNAME_R +# undef HAVE_SYS_STATFS_H +# undef HAVE_SIGHANDLER_T +# undef HAVE_STRVERSCMP +# undef HAVE_XTABS +# undef HAVE_UNLOCKED_LINE_OPS +# include +# if __FreeBSD_version < 1000029 +# undef HAVE_STRCHRNUL /* FreeBSD added strchrnul() between 1000028 and 1000029 */ +# endif +#endif + +#if defined(__NetBSD__) +# define HAVE_GETLINE 1 /* Recent NetBSD versions have getline() */ +#endif + +#if defined(__digital__) && defined(__unix__) +# undef HAVE_STPCPY +#endif + +#if defined(ANDROID) || defined(__ANDROID__) +# if __ANDROID_API__ < 8 + /* ANDROID < 8 has no [f]dprintf at all */ +# undef HAVE_DPRINTF +# elif __ANDROID_API__ < 21 + /* ANDROID < 21 has fdprintf */ +# define dprintf fdprintf +# else + /* ANDROID >= 21 has standard dprintf */ +# endif +# if __ANDROID_API__ < 21 +# undef HAVE_TTYNAME_R +# undef HAVE_GETLINE +# undef HAVE_STPCPY +# endif +# undef HAVE_MEMPCPY +# undef HAVE_STRCHRNUL +# undef HAVE_STRVERSCMP +# undef HAVE_UNLOCKED_LINE_OPS +# undef HAVE_NET_ETHERNET_H +#endif + +/* + * Now, define prototypes for all the functions defined in platform.c + * These must come after all the HAVE_* macros are defined (or not) + */ + +#ifndef HAVE_DPRINTF +extern int dprintf(int fd, const char *format, ...); +#endif + +#ifndef HAVE_MEMRCHR +extern void *memrchr(const void *s, int c, size_t n) FAST_FUNC; +#endif + +#ifndef HAVE_MKDTEMP +extern char *mkdtemp(char *template) FAST_FUNC; +#endif + +#ifndef HAVE_TTYNAME_R +#define ttyname_r bb_ttyname_r +extern int ttyname_r(int fd, char *buf, size_t buflen); +#endif + +#ifndef HAVE_SETBIT +# define setbit(a, b) ((a)[(b) >> 3] |= 1 << ((b) & 7)) +# define clrbit(a, b) ((a)[(b) >> 3] &= ~(1 << ((b) & 7))) +#endif + +#ifndef HAVE_SIGHANDLER_T +typedef void (*sighandler_t)(int); +#endif + +#ifndef HAVE_STPCPY +extern char *stpcpy(char *p, const char *to_add) FAST_FUNC; +#endif + +#ifndef HAVE_MEMPCPY +#include +/* In case we are wrong about !HAVE_MEMPCPY, and toolchain _does_ have + * mempcpy(), avoid colliding with it: + */ +#define mempcpy bb__mempcpy +static ALWAYS_INLINE void *mempcpy(void *dest, const void *src, size_t len) +{ + return memcpy(dest, src, len) + len; +} +#endif + +#ifndef HAVE_STRCASESTR +extern char *strcasestr(const char *s, const char *pattern) FAST_FUNC; +#endif + +#ifndef HAVE_STRCHRNUL +extern char *strchrnul(const char *s, int c) FAST_FUNC; +#endif + +#ifndef HAVE_STRSEP +extern char *strsep(char **stringp, const char *delim) FAST_FUNC; +#endif + +#ifndef HAVE_STRSIGNAL +/* Not exactly the same: instead of "Stopped" it shows "STOP" etc */ +# define strsignal(sig) get_signame(sig) +#endif + +#ifndef HAVE_USLEEP +extern int usleep(unsigned) FAST_FUNC; +#endif + +#ifndef HAVE_VASPRINTF +extern int vasprintf(char **string_ptr, const char *format, va_list p) FAST_FUNC; +#endif + +#ifndef HAVE_GETLINE +# include /* for FILE */ +# include /* size_t */ +extern ssize_t getline(char **lineptr, size_t *n, FILE *stream) FAST_FUNC; +#endif + +#endif diff --git a/probe-busybox/include/pwd_.h b/probe-busybox/include/pwd_.h new file mode 100644 index 00000000..17348298 --- /dev/null +++ b/probe-busybox/include/pwd_.h @@ -0,0 +1,67 @@ +/* vi: set sw=4 ts=4: */ +/* Copyright (C) 1991,92,95,96,97,98,99,2001 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* + * POSIX Standard: 9.2.2 User Database Access + */ + +#ifndef BB_PWD_H +#define BB_PWD_H 1 + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +/* This file is #included after #include + * We will use libc-defined structures, but will #define function names + * so that function calls are directed to bb_internal_XXX replacements + */ +#undef endpwent +#define setpwent bb_internal_setpwent +#define endpwent bb_internal_endpwent +#define getpwent bb_internal_getpwent +#define getpwuid bb_internal_getpwuid +#define getpwnam bb_internal_getpwnam +#define getpwnam_r bb_internal_getpwnam_r + +/* All function names below should be remapped by #defines above + * in order to not collide with libc names. */ + +/* Rewind the password-file stream. */ +void FAST_FUNC setpwent(void); + +/* Close the password-file stream. */ +void FAST_FUNC endpwent(void); + +/* Read an entry from the password-file stream, opening it if necessary. */ +struct passwd* FAST_FUNC getpwent(void); + +/* Search for an entry with a matching user ID. */ +struct passwd* FAST_FUNC getpwuid(uid_t __uid); + +/* Search for an entry with a matching username. */ +struct passwd* FAST_FUNC getpwnam(const char *__name); + +/* Reentrant versions of some of the functions above. */ +int FAST_FUNC getpwnam_r(const char *__restrict __name, + struct passwd *__restrict __resultbuf, + char *__restrict __buffer, size_t __buflen, + struct passwd **__restrict __result); + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/include/rtc_.h b/probe-busybox/include/rtc_.h new file mode 100644 index 00000000..750fc20e --- /dev/null +++ b/probe-busybox/include/rtc_.h @@ -0,0 +1,76 @@ +/* + * Common defines/structures/etc... for applets that need to work with the RTC. + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#ifndef BB_RTC_H +#define BB_RTC_H 1 + +#include "libbb.h" + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +int rtc_adjtime_is_utc(void) FAST_FUNC; +int rtc_xopen(const char **default_rtc, int flags) FAST_FUNC; +void rtc_read_tm(struct tm *ptm, int fd) FAST_FUNC; +time_t rtc_tm2time(struct tm *ptm, int utc) FAST_FUNC; + + +/* + * Everything below this point has been copied from linux/rtc.h + * to eliminate the kernel header dependency + */ + +struct linux_rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +struct linux_rtc_wkalrm { + unsigned char enabled; /* 0 = alarm disabled, 1 = alarm enabled */ + unsigned char pending; /* 0 = alarm not pending, 1 = alarm pending */ + struct linux_rtc_time time; /* time the alarm is set to */ +}; + +/* + * ioctl calls that are permitted to the /dev/rtc interface, if + * any of the RTC drivers are enabled. + */ +#define RTC_AIE_ON _IO('p', 0x01) /* Alarm int. enable on */ +#define RTC_AIE_OFF _IO('p', 0x02) /* ... off */ +#define RTC_UIE_ON _IO('p', 0x03) /* Update int. enable on */ +#define RTC_UIE_OFF _IO('p', 0x04) /* ... off */ +#define RTC_PIE_ON _IO('p', 0x05) /* Periodic int. enable on */ +#define RTC_PIE_OFF _IO('p', 0x06) /* ... off */ +#define RTC_WIE_ON _IO('p', 0x0f) /* Watchdog int. enable on */ +#define RTC_WIE_OFF _IO('p', 0x10) /* ... off */ + +#define RTC_ALM_SET _IOW('p', 0x07, struct linux_rtc_time) /* Set alarm time */ +#define RTC_ALM_READ _IOR('p', 0x08, struct linux_rtc_time) /* Read alarm time */ +#define RTC_RD_TIME _IOR('p', 0x09, struct linux_rtc_time) /* Read RTC time */ +#define RTC_SET_TIME _IOW('p', 0x0a, struct linux_rtc_time) /* Set RTC time */ +#define RTC_IRQP_READ _IOR('p', 0x0b, unsigned long) /* Read IRQ rate */ +#define RTC_IRQP_SET _IOW('p', 0x0c, unsigned long) /* Set IRQ rate */ +#define RTC_EPOCH_READ _IOR('p', 0x0d, unsigned long) /* Read epoch */ +#define RTC_EPOCH_SET _IOW('p', 0x0e, unsigned long) /* Set epoch */ + +#define RTC_WKALM_SET _IOW('p', 0x0f, struct linux_rtc_wkalrm)/* Set wakeup alarm*/ +#define RTC_WKALM_RD _IOR('p', 0x10, struct linux_rtc_wkalrm)/* Get wakeup alarm*/ + +/* interrupt flags */ +#define RTC_IRQF 0x80 /* any of the following is active */ +#define RTC_PF 0x40 +#define RTC_AF 0x20 +#define RTC_UF 0x10 + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/include/shadow_.h b/probe-busybox/include/shadow_.h new file mode 100644 index 00000000..8e2581e7 --- /dev/null +++ b/probe-busybox/include/shadow_.h @@ -0,0 +1,106 @@ +/* vi: set sw=4 ts=4: */ +/* Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* Declaration of types and functions for shadow password suite */ + +#ifndef BB_SHADOW_H +#define BB_SHADOW_H 1 + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +/* Structure of the password file */ +struct spwd { + char *sp_namp; /* Login name */ + char *sp_pwdp; /* Encrypted password */ + long sp_lstchg; /* Date of last change */ + long sp_min; /* Minimum number of days between changes */ + long sp_max; /* Maximum number of days between changes */ + long sp_warn; /* Number of days to warn user to change the password */ + long sp_inact; /* Number of days the account may be inactive */ + long sp_expire; /* Number of days since 1970-01-01 until account expires */ + unsigned long sp_flag; /* Reserved */ +}; + +#define setspent bb_internal_setspent +#define endspent bb_internal_endspent +#define getspent bb_internal_getspent +#define getspnam bb_internal_getspnam +#define sgetspent bb_internal_sgetspent +#define fgetspent bb_internal_fgetspent +#define putspent bb_internal_putspent +#define getspent_r bb_internal_getspent_r +#define getspnam_r bb_internal_getspnam_r +#define sgetspent_r bb_internal_sgetspent_r +#define fgetspent_r bb_internal_fgetspent_r +#define lckpwdf bb_internal_lckpwdf +#define ulckpwdf bb_internal_ulckpwdf + + +/* All function names below should be remapped by #defines above + * in order to not collide with libc names. */ + +#ifdef UNUSED_FOR_NOW +/* Open database for reading */ +void FAST_FUNC setspent(void); + +/* Close database */ +void FAST_FUNC endspent(void); + +/* Get next entry from database, perhaps after opening the file */ +struct spwd* FAST_FUNC getspent(void); + +/* Get shadow entry matching NAME */ +struct spwd* FAST_FUNC getspnam(const char *__name); + +/* Read shadow entry from STRING */ +struct spwd* FAST_FUNC sgetspent(const char *__string); + +/* Read next shadow entry from STREAM */ +struct spwd* FAST_FUNC fgetspent(FILE *__stream); + +/* Write line containing shadow password entry to stream */ +int FAST_FUNC putspent(const struct spwd *__p, FILE *__stream); + +/* Reentrant versions of some of the functions above */ +int FAST_FUNC getspent_r(struct spwd *__result_buf, char *__buffer, + size_t __buflen, struct spwd **__result); +#endif + +int FAST_FUNC getspnam_r(const char *__name, struct spwd *__result_buf, + char *__buffer, size_t __buflen, + struct spwd **__result); + +#ifdef UNUSED_FOR_NOW +int FAST_FUNC sgetspent_r(const char *__string, struct spwd *__result_buf, + char *__buffer, size_t __buflen, + struct spwd **__result); + +int FAST_FUNC fgetspent_r(FILE *__stream, struct spwd *__result_buf, + char *__buffer, size_t __buflen, + struct spwd **__result); +/* Protect password file against multi writers */ +int FAST_FUNC lckpwdf(void); + +/* Unlock password file */ +int FAST_FUNC ulckpwdf(void); +#endif + +POP_SAVED_FUNCTION_VISIBILITY + +#endif /* shadow.h */ diff --git a/probe-busybox/include/unicode.h b/probe-busybox/include/unicode.h new file mode 100644 index 00000000..0317a215 --- /dev/null +++ b/probe-busybox/include/unicode.h @@ -0,0 +1,129 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#ifndef UNICODE_H +#define UNICODE_H 1 + +#if ENABLE_UNICODE_USING_LOCALE +# include +# include +#endif + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +enum { + UNICODE_UNKNOWN = 0, + UNICODE_OFF = 1, + UNICODE_ON = 2, +}; + +#define unicode_bidi_isrtl(wc) 0 +#define unicode_bidi_is_neutral_wchar(wc) (wc <= 126 && !isalpha(wc)) + +#if !ENABLE_UNICODE_SUPPORT + +# define unicode_strlen(string) strlen(string) +# define unicode_strwidth(string) strlen(string) +# define unicode_status UNICODE_OFF +# define init_unicode() ((void)0) +# define reinit_unicode(LANG) ((void)0) + +#else + +# if CONFIG_LAST_SUPPORTED_WCHAR < 126 || CONFIG_LAST_SUPPORTED_WCHAR >= 0x30000 +# undef CONFIG_LAST_SUPPORTED_WCHAR +# define CONFIG_LAST_SUPPORTED_WCHAR 0x2ffff +# endif + +# if CONFIG_LAST_SUPPORTED_WCHAR < 0x300 +# undef ENABLE_UNICODE_COMBINING_WCHARS +# define ENABLE_UNICODE_COMBINING_WCHARS 0 +# endif + +# if CONFIG_LAST_SUPPORTED_WCHAR < 0x1100 +# undef ENABLE_UNICODE_WIDE_WCHARS +# define ENABLE_UNICODE_WIDE_WCHARS 0 +# endif + +# if CONFIG_LAST_SUPPORTED_WCHAR < 0x590 +# undef ENABLE_UNICODE_BIDI_SUPPORT +# define ENABLE_UNICODE_BIDI_SUPPORT 0 +# endif + +/* Number of unicode chars. Falls back to strlen() on invalid unicode */ +size_t FAST_FUNC unicode_strlen(const char *string); +/* Width on terminal */ +size_t FAST_FUNC unicode_strwidth(const char *string); +enum { + UNI_FLAG_PAD = (1 << 0), +}; +//UNUSED: unsigned FAST_FUNC unicode_padding_to_width(unsigned width, const char *src); +//UNUSED: char* FAST_FUNC unicode_conv_to_printable2(uni_stat_t *stats, const char *src, unsigned width, int flags); +char* FAST_FUNC unicode_conv_to_printable(uni_stat_t *stats, const char *src); +//UNUSED: char* FAST_FUNC unicode_conv_to_printable_maxwidth(uni_stat_t *stats, const char *src, unsigned maxwidth); +char* FAST_FUNC unicode_conv_to_printable_fixedwidth(/*uni_stat_t *stats,*/ const char *src, unsigned width); + +# if ENABLE_UNICODE_USING_LOCALE + +extern uint8_t unicode_status; +void init_unicode(void) FAST_FUNC; +void reinit_unicode(const char *LANG) FAST_FUNC; + +# else + +/* Homegrown Unicode support. It knows only C and Unicode locales. */ + +# if !ENABLE_FEATURE_CHECK_UNICODE_IN_ENV +# define unicode_status UNICODE_ON +# define init_unicode() ((void)0) +# define reinit_unicode(LANG) ((void)0) +# else +extern uint8_t unicode_status; +void init_unicode(void) FAST_FUNC; +void reinit_unicode(const char *LANG) FAST_FUNC; +# endif + +# undef MB_CUR_MAX +# define MB_CUR_MAX 6 + +/* Prevent name collisions */ +# define wint_t bb_wint_t +# define mbstate_t bb_mbstate_t +# define mbstowcs bb_mbstowcs +# define wcstombs bb_wcstombs +# define wcrtomb bb_wcrtomb +# define iswspace bb_iswspace +# define iswalnum bb_iswalnum +# define iswpunct bb_iswpunct +# define wcwidth bb_wcwidth + +typedef int32_t wint_t; +typedef struct { + char bogus; +} mbstate_t; + +size_t mbstowcs(wchar_t *dest, const char *src, size_t n) FAST_FUNC; +size_t wcstombs(char *dest, const wchar_t *src, size_t n) FAST_FUNC; +size_t wcrtomb(char *s, wchar_t wc, mbstate_t *ps) FAST_FUNC; +int iswspace(wint_t wc) FAST_FUNC; +int iswalnum(wint_t wc) FAST_FUNC; +int iswpunct(wint_t wc) FAST_FUNC; +int wcwidth(unsigned ucs) FAST_FUNC; +# if ENABLE_UNICODE_BIDI_SUPPORT +# undef unicode_bidi_isrtl +int unicode_bidi_isrtl(wint_t wc) FAST_FUNC; +# if ENABLE_UNICODE_NEUTRAL_TABLE +# undef unicode_bidi_is_neutral_wchar +int unicode_bidi_is_neutral_wchar(wint_t wc) FAST_FUNC; +# endif +# endif + + +# endif /* !UNICODE_USING_LOCALE */ + +#endif /* UNICODE_SUPPORT */ + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/include/usage.src.h b/probe-busybox/include/usage.src.h new file mode 100644 index 00000000..78beccf4 --- /dev/null +++ b/probe-busybox/include/usage.src.h @@ -0,0 +1,22 @@ +/* vi: set sw=8 ts=8: */ +/* + * This file suffers from chronically incorrect tabification + * of messages. Before editing this file: + * 1. Switch you editor to 8-space tab mode. + * 2. Do not use \t in messages, use real tab character. + * 3. Start each source line with message as follows: + * |<7 spaces>"text with tabs".... + * or + * |<5 spaces>"\ntext with tabs".... + */ +#ifndef BB_USAGE_H +#define BB_USAGE_H 1 + +#define NOUSAGE_STR "\b" + +INSERT + +#define busybox_notes_usage \ + "Hello world!\n" + +#endif diff --git a/probe-busybox/include/volume_id.h b/probe-busybox/include/volume_id.h new file mode 100644 index 00000000..a83da899 --- /dev/null +++ b/probe-busybox/include/volume_id.h @@ -0,0 +1,31 @@ +/* + * volume_id - reads filesystem label and uuid + * + * Copyright (C) 2005 Kay Sievers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +char *get_devname_from_label(const char *spec); +char *get_devname_from_uuid(const char *spec); +void display_uuid_cache(int scan_devices); + +/* Returns: + * 0: no UUID= or LABEL= prefix found + * 1: UUID= or LABEL= prefix found. In this case, + * *fsname is replaced if device with such UUID or LABEL is found + */ +int resolve_mount_spec(char **fsname); +int add_to_uuid_cache(const char *device); diff --git a/probe-busybox/include/xatonum.h b/probe-busybox/include/xatonum.h new file mode 100644 index 00000000..45ebbfc0 --- /dev/null +++ b/probe-busybox/include/xatonum.h @@ -0,0 +1,185 @@ +/* vi: set sw=4 ts=4: */ +/* + * ascii-to-numbers implementations for busybox + * + * Copyright (C) 2003 Manuel Novoa III + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +/* Provides extern declarations of functions */ +#define DECLARE_STR_CONV(type, T, UT) \ +\ +unsigned type xstrto##UT##_range_sfx(const char *str, int b, unsigned type l, unsigned type u, const struct suffix_mult *sfx) FAST_FUNC; \ +unsigned type xstrto##UT##_range(const char *str, int b, unsigned type l, unsigned type u) FAST_FUNC; \ +unsigned type xstrto##UT##_sfx(const char *str, int b, const struct suffix_mult *sfx) FAST_FUNC; \ +unsigned type xstrto##UT(const char *str, int b) FAST_FUNC; \ +unsigned type xato##UT##_range_sfx(const char *str, unsigned type l, unsigned type u, const struct suffix_mult *sfx) FAST_FUNC; \ +unsigned type xato##UT##_range(const char *str, unsigned type l, unsigned type u) FAST_FUNC; \ +unsigned type xato##UT##_sfx(const char *str, const struct suffix_mult *sfx) FAST_FUNC; \ +unsigned type xato##UT(const char *str) FAST_FUNC; \ +type xstrto##T##_range_sfx(const char *str, int b, type l, type u, const struct suffix_mult *sfx) FAST_FUNC; \ +type xstrto##T##_range(const char *str, int b, type l, type u) FAST_FUNC; \ +type xstrto##T(const char *str, int b) FAST_FUNC; \ +type xato##T##_range_sfx(const char *str, type l, type u, const struct suffix_mult *sfx) FAST_FUNC; \ +type xato##T##_range(const char *str, type l, type u) FAST_FUNC; \ +type xato##T##_sfx(const char *str, const struct suffix_mult *sfx) FAST_FUNC; \ +type xato##T(const char *str) FAST_FUNC; \ + +/* Unsigned long long functions always exist */ +DECLARE_STR_CONV(long long, ll, ull) + + +/* Provides inline definitions of functions */ +/* (useful for mapping them to the type of the same width) */ +#define DEFINE_EQUIV_STR_CONV(narrow, N, W, UN, UW) \ +\ +static ALWAYS_INLINE \ +unsigned narrow xstrto##UN##_range_sfx(const char *str, int b, unsigned narrow l, unsigned narrow u, const struct suffix_mult *sfx) \ +{ return xstrto##UW##_range_sfx(str, b, l, u, sfx); } \ +static ALWAYS_INLINE \ +unsigned narrow xstrto##UN##_range(const char *str, int b, unsigned narrow l, unsigned narrow u) \ +{ return xstrto##UW##_range(str, b, l, u); } \ +static ALWAYS_INLINE \ +unsigned narrow xstrto##UN##_sfx(const char *str, int b, const struct suffix_mult *sfx) \ +{ return xstrto##UW##_sfx(str, b, sfx); } \ +static ALWAYS_INLINE \ +unsigned narrow xstrto##UN(const char *str, int b) \ +{ return xstrto##UW(str, b); } \ +static ALWAYS_INLINE \ +unsigned narrow xato##UN##_range_sfx(const char *str, unsigned narrow l, unsigned narrow u, const struct suffix_mult *sfx) \ +{ return xato##UW##_range_sfx(str, l, u, sfx); } \ +static ALWAYS_INLINE \ +unsigned narrow xato##UN##_range(const char *str, unsigned narrow l, unsigned narrow u) \ +{ return xato##UW##_range(str, l, u); } \ +static ALWAYS_INLINE \ +unsigned narrow xato##UN##_sfx(const char *str, const struct suffix_mult *sfx) \ +{ return xato##UW##_sfx(str, sfx); } \ +static ALWAYS_INLINE \ +unsigned narrow xato##UN(const char *str) \ +{ return xato##UW(str); } \ +static ALWAYS_INLINE \ +narrow xstrto##N##_range_sfx(const char *str, int b, narrow l, narrow u, const struct suffix_mult *sfx) \ +{ return xstrto##W##_range_sfx(str, b, l, u, sfx); } \ +static ALWAYS_INLINE \ +narrow xstrto##N##_range(const char *str, int b, narrow l, narrow u) \ +{ return xstrto##W##_range(str, b, l, u); } \ +static ALWAYS_INLINE \ +narrow xstrto##N(const char *str, int b) \ +{ return xstrto##W(str, b); } \ +static ALWAYS_INLINE \ +narrow xato##N##_range_sfx(const char *str, narrow l, narrow u, const struct suffix_mult *sfx) \ +{ return xato##W##_range_sfx(str, l, u, sfx); } \ +static ALWAYS_INLINE \ +narrow xato##N##_range(const char *str, narrow l, narrow u) \ +{ return xato##W##_range(str, l, u); } \ +static ALWAYS_INLINE \ +narrow xato##N##_sfx(const char *str, const struct suffix_mult *sfx) \ +{ return xato##W##_sfx(str, sfx); } \ +static ALWAYS_INLINE \ +narrow xato##N(const char *str) \ +{ return xato##W(str); } \ + +/* If long == long long, then just map them one-to-one */ +#if ULONG_MAX == ULLONG_MAX +DEFINE_EQUIV_STR_CONV(long, l, ll, ul, ull) +#else +/* Else provide extern defs */ +DECLARE_STR_CONV(long, l, ul) +#endif + +/* Same for int -> [long] long */ +#if UINT_MAX == ULLONG_MAX +DEFINE_EQUIV_STR_CONV(int, i, ll, u, ull) +#elif UINT_MAX == ULONG_MAX +DEFINE_EQUIV_STR_CONV(int, i, l, u, ul) +#else +DECLARE_STR_CONV(int, i, u) +#endif + +/* Specialized */ + +uint32_t BUG_xatou32_unimplemented(void); +static ALWAYS_INLINE uint32_t xatou32(const char *numstr) +{ + if (UINT_MAX == 0xffffffff) + return xatou(numstr); + if (ULONG_MAX == 0xffffffff) + return xatoul(numstr); + return BUG_xatou32_unimplemented(); +} + +/* Non-aborting kind of convertors: bb_strto[u][l]l */ + +/* On exit: errno = 0 only if there was non-empty, '\0' terminated value + * errno = EINVAL if value was not '\0' terminated, but otherwise ok + * Return value is still valid, caller should just check whether end[0] + * is a valid terminating char for particular case. OTOH, if caller + * requires '\0' terminated input, [s]he can just check errno == 0. + * errno = ERANGE if value had alphanumeric terminating char ("1234abcg"). + * errno = ERANGE if value is out of range, missing, etc. + * errno = ERANGE if value had minus sign for strtouXX (even "-0" is not ok ) + * return value is all-ones in this case. + */ + +unsigned long long bb_strtoull(const char *arg, char **endp, int base) FAST_FUNC; +long long bb_strtoll(const char *arg, char **endp, int base) FAST_FUNC; + +#if ULONG_MAX == ULLONG_MAX +static ALWAYS_INLINE +unsigned long bb_strtoul(const char *arg, char **endp, int base) +{ return bb_strtoull(arg, endp, base); } +static ALWAYS_INLINE +long bb_strtol(const char *arg, char **endp, int base) +{ return bb_strtoll(arg, endp, base); } +#else +unsigned long bb_strtoul(const char *arg, char **endp, int base) FAST_FUNC; +long bb_strtol(const char *arg, char **endp, int base) FAST_FUNC; +#endif + +#if UINT_MAX == ULLONG_MAX +static ALWAYS_INLINE +unsigned bb_strtou(const char *arg, char **endp, int base) +{ return bb_strtoull(arg, endp, base); } +static ALWAYS_INLINE +int bb_strtoi(const char *arg, char **endp, int base) +{ return bb_strtoll(arg, endp, base); } +#elif UINT_MAX == ULONG_MAX +static ALWAYS_INLINE +unsigned bb_strtou(const char *arg, char **endp, int base) +{ return bb_strtoul(arg, endp, base); } +static ALWAYS_INLINE +int bb_strtoi(const char *arg, char **endp, int base) +{ return bb_strtol(arg, endp, base); } +#else +unsigned bb_strtou(const char *arg, char **endp, int base) FAST_FUNC; +int bb_strtoi(const char *arg, char **endp, int base) FAST_FUNC; +#endif + +uint32_t BUG_bb_strtou32_unimplemented(void); +static ALWAYS_INLINE +uint32_t bb_strtou32(const char *arg, char **endp, int base) +{ + if (sizeof(uint32_t) == sizeof(unsigned)) + return bb_strtou(arg, endp, base); + if (sizeof(uint32_t) == sizeof(unsigned long)) + return bb_strtoul(arg, endp, base); + return BUG_bb_strtou32_unimplemented(); +} +static ALWAYS_INLINE +int32_t bb_strtoi32(const char *arg, char **endp, int base) +{ + if (sizeof(int32_t) == sizeof(int)) + return bb_strtoi(arg, endp, base); + if (sizeof(int32_t) == sizeof(long)) + return bb_strtol(arg, endp, base); + return BUG_bb_strtou32_unimplemented(); +} + +/* Floating point */ + +double bb_strtod(const char *arg, char **endp) FAST_FUNC; + +POP_SAVED_FUNCTION_VISIBILITY diff --git a/probe-busybox/include/xregex.h b/probe-busybox/include/xregex.h new file mode 100644 index 00000000..5e5e6a25 --- /dev/null +++ b/probe-busybox/include/xregex.h @@ -0,0 +1,23 @@ +/* vi: set sw=4 ts=4: */ +/* + * Busybox xregcomp utility routine. This isn't in libbb.h because the + * C library we're linking against may not support regex.h. + * + * Based in part on code from sash, Copyright (c) 1999 by David I. Bell + * Permission has been granted to redistribute this code under GPL. + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#ifndef BB_REGEX_H +#define BB_REGEX_H 1 + +#include + +PUSH_AND_SET_FUNCTION_VISIBILITY_TO_HIDDEN + +char* regcomp_or_errmsg(regex_t *preg, const char *regex, int cflags) FAST_FUNC; +void xregcomp(regex_t *preg, const char *regex, int cflags) FAST_FUNC; + +POP_SAVED_FUNCTION_VISIBILITY + +#endif diff --git a/probe-busybox/libbb/Config.src b/probe-busybox/libbb/Config.src new file mode 100644 index 00000000..172fbcc0 --- /dev/null +++ b/probe-busybox/libbb/Config.src @@ -0,0 +1,397 @@ +# +# For a description of the syntax of this configuration file, +# see scripts/kbuild/config-language.txt. +# + +menu "Busybox Library Tuning" + +INSERT + +choice + prompt "Buffer allocation policy" + default FEATURE_BUFFERS_USE_MALLOC + help + There are 3 ways BusyBox can handle buffer allocations: + - Use malloc. This costs code size for the call to xmalloc. + - Put them on stack. For some very small machines with limited stack + space, this can be deadly. For most folks, this works just fine. + - Put them in BSS. This works beautifully for computers with a real + MMU (and OS support), but wastes runtime RAM for uCLinux. This + behavior was the only one available for BusyBox versions 0.48 and + earlier. + +config FEATURE_BUFFERS_USE_MALLOC + bool "Allocate with Malloc" + +config FEATURE_BUFFERS_GO_ON_STACK + bool "Allocate on the Stack" + +config FEATURE_BUFFERS_GO_IN_BSS + bool "Allocate in the .bss section" + +endchoice + +config PASSWORD_MINLEN + int "Minimum password length" + default 6 + range 5 32 + help + Minimum allowable password length. + +config MD5_SMALL + int "MD5: Trade bytes for speed (0:fast, 3:slow)" + default 1 # all "fast or small" options default to small + range 0 3 + help + Trade binary size versus speed for the md5sum algorithm. + Approximate values running uClibc and hashing + linux-2.4.4.tar.bz2 were: + user times (sec) text size (386) + 0 (fastest) 1.1 6144 + 1 1.4 5392 + 2 3.0 5088 + 3 (smallest) 5.1 4912 + +config SHA3_SMALL + int "SHA3: Trade bytes for speed (0:fast, 1:slow)" + default 1 # all "fast or small" options default to small + range 0 1 + help + Trade binary size versus speed for the sha3sum algorithm. + SHA3_SMALL=0 compared to SHA3_SMALL=1 (approximate): + 64-bit x86: +270 bytes of code, 45% faster + 32-bit x86: +450 bytes of code, 75% faster + +config FEATURE_FAST_TOP + bool "Faster /proc scanning code (+100 bytes)" + default n # all "fast or small" options default to small + help + This option makes top (and ps) ~20% faster (or 20% less CPU hungry), + but code size is slightly bigger. + +config FEATURE_ETC_NETWORKS + bool "Support for /etc/networks" + default n + help + Enable support for network names in /etc/networks. This is + a rarely used feature which allows you to use names + instead of IP/mask pairs in route command. + +config FEATURE_USE_TERMIOS + bool "Use termios to manipulate the screen" + default y + depends on MORE || TOP || POWERTOP + help + This option allows utilities such as 'more' and 'top' to determine + the size of the screen. If you leave this disabled, your utilities + that display things on the screen will be especially primitive and + will be unable to determine the current screen size, and will be + unable to move the cursor. + +config FEATURE_EDITING + bool "Command line editing" + default y + help + Enable line editing (mainly for shell command line). + +config FEATURE_EDITING_MAX_LEN + int "Maximum length of input" + range 128 8192 + default 1024 + depends on FEATURE_EDITING + help + Line editing code uses on-stack buffers for storage. + You may want to decrease this parameter if your target machine + benefits from smaller stack usage. + +config FEATURE_EDITING_VI + bool "vi-style line editing commands" + default n + depends on FEATURE_EDITING + help + Enable vi-style line editing. In shells, this mode can be + turned on and off with "set -o vi" and "set +o vi". + +config FEATURE_EDITING_HISTORY + int "History size" + # Don't allow way too big values here, code uses fixed "char *history[N]" struct member + range 0 9999 + default 255 + depends on FEATURE_EDITING + help + Specify command history size (0 - disable). + +config FEATURE_EDITING_SAVEHISTORY + bool "History saving" + default y + depends on FEATURE_EDITING + help + Enable history saving in shells. + +config FEATURE_EDITING_SAVE_ON_EXIT + bool "Save history on shell exit, not after every command" + default n + depends on FEATURE_EDITING_SAVEHISTORY + help + Save history on shell exit, not after every command. + +config FEATURE_REVERSE_SEARCH + bool "Reverse history search" + default y + depends on FEATURE_EDITING + help + Enable readline-like Ctrl-R combination for reverse history search. + Increases code by about 0.5k. + +config FEATURE_TAB_COMPLETION + bool "Tab completion" + default y + depends on FEATURE_EDITING + help + Enable tab completion. + +config FEATURE_USERNAME_COMPLETION + bool "Username completion" + default y + depends on FEATURE_TAB_COMPLETION + help + Enable username completion. + +config FEATURE_EDITING_FANCY_PROMPT + bool "Fancy shell prompts" + default y + depends on FEATURE_EDITING + help + Setting this option allows for prompts to use things like \w and + \$ and escape codes. + +config FEATURE_EDITING_ASK_TERMINAL + bool "Query cursor position from terminal" + default n + depends on FEATURE_EDITING + help + Allow usage of "ESC [ 6 n" sequence. Terminal answers back with + current cursor position. This information is used to make line + editing more robust in some cases. + If you are not sure whether your terminals respond to this code + correctly, or want to save on code size (about 400 bytes), + then do not turn this option on. + +config LOCALE_SUPPORT + bool "Enable locale support (system needs locale for this to work)" + default n + help + Enable this if your system has locale support and you would like + busybox to support locale settings. + +config UNICODE_SUPPORT + bool "Support Unicode" + default y + help + This makes various applets aware that one byte is not + one character on screen. + + Busybox aims to eventually work correctly with Unicode displays. + Any older encodings are not guaranteed to work. + Probably by the time when busybox will be fully Unicode-clean, + other encodings will be mainly of historic interest. + +config UNICODE_USING_LOCALE + bool "Use libc routines for Unicode (else uses internal ones)" + default n + depends on UNICODE_SUPPORT && LOCALE_SUPPORT + help + With this option on, Unicode support is implemented using libc + routines. Otherwise, internal implementation is used. + Internal implementation is smaller. + +config FEATURE_CHECK_UNICODE_IN_ENV + bool "Check $LC_ALL, $LC_CTYPE and $LANG environment variables" + default n + depends on UNICODE_SUPPORT && !UNICODE_USING_LOCALE + help + With this option on, Unicode support is activated + only if locale-related variables have the value of the form + "xxxx.utf8" + + Otherwise, Unicode support will be always enabled and active. + +config SUBST_WCHAR + int "Character code to substitute unprintable characters with" + depends on UNICODE_SUPPORT + default 63 + help + Typical values are 63 for '?' (works with any output device), + 30 for ASCII substitute control code, + 65533 (0xfffd) for Unicode replacement character. + +config LAST_SUPPORTED_WCHAR + int "Range of supported Unicode characters" + depends on UNICODE_SUPPORT + default 767 + help + Any character with Unicode value bigger than this is assumed + to be non-printable on output device. Many applets replace + such characters with substitution character. + + The idea is that many valid printable Unicode chars + nevertheless are not displayed correctly. Think about + combining charachers, double-wide hieroglyphs, obscure + characters in dozens of ancient scripts... + Many terminals, terminal emulators, xterms etc will fail + to handle them correctly. Choose the smallest value + which suits your needs. + + Typical values are: + 126 - ASCII only + 767 (0x2ff) - there are no combining chars in [0..767] range + (the range includes Latin 1, Latin Ext. A and B), + code is ~700 bytes smaller for this case. + 4351 (0x10ff) - there are no double-wide chars in [0..4351] range, + code is ~300 bytes smaller for this case. + 12799 (0x31ff) - nearly all non-ideographic characters are + available in [0..12799] range, including + East Asian scripts like katakana, hiragana, hangul, + bopomofo... + 0 - off, any valid printable Unicode character will be printed. + +config UNICODE_COMBINING_WCHARS + bool "Allow zero-width Unicode characters on output" + default n + depends on UNICODE_SUPPORT + help + With this option off, any Unicode char with width of 0 + is substituted on output. + +config UNICODE_WIDE_WCHARS + bool "Allow wide Unicode characters on output" + default n + depends on UNICODE_SUPPORT + help + With this option off, any Unicode char with width > 1 + is substituted on output. + +config UNICODE_BIDI_SUPPORT + bool "Bidirectional character-aware line input" + default n + depends on UNICODE_SUPPORT && !UNICODE_USING_LOCALE + help + With this option on, right-to-left Unicode characters + are treated differently on input (e.g. cursor movement). + +config UNICODE_NEUTRAL_TABLE + bool "In bidi input, support non-ASCII neutral chars too" + default n + depends on UNICODE_BIDI_SUPPORT + help + In most cases it's enough to treat only ASCII non-letters + (i.e. punctuation, numbers and space) as characters + with neutral directionality. + With this option on, more extensive (and bigger) table + of neutral chars will be used. + +config UNICODE_PRESERVE_BROKEN + bool "Make it possible to enter sequences of chars which are not Unicode" + default n + depends on UNICODE_SUPPORT + help + With this option on, on line-editing input (such as used by shells) + invalid UTF-8 bytes are not substituted with the selected + substitution character. + For example, this means that entering 'l', 's', ' ', 0xff, [Enter] + at shell prompt will list file named 0xff (single char name + with char value 255), not file named '?'. + +config FEATURE_NON_POSIX_CP + bool "Non-POSIX, but safer, copying to special nodes" + default y + help + With this option, "cp file symlink" will delete symlink + and create a regular file. This does not conform to POSIX, + but prevents a symlink attack. + Similarly, "cp file device" will not send file's data + to the device. (To do that, use "cat file >device") + +config FEATURE_VERBOSE_CP_MESSAGE + bool "Give more precise messages when copy fails (cp, mv etc)" + default n + help + Error messages with this feature enabled: + $ cp file /does_not_exist/file + cp: cannot create '/does_not_exist/file': Path does not exist + $ cp file /vmlinuz/file + cp: cannot stat '/vmlinuz/file': Path has non-directory component + If this feature is not enabled, they will be, respectively: + cp: cannot create '/does_not_exist/file': No such file or directory + cp: cannot stat '/vmlinuz/file': Not a directory + This will cost you ~60 bytes. + +config FEATURE_USE_SENDFILE + bool "Use sendfile system call" + default y + select PLATFORM_LINUX + help + When enabled, busybox will use the kernel sendfile() function + instead of read/write loops to copy data between file descriptors + (for example, cp command does this a lot). + If sendfile() doesn't work, copying code falls back to read/write + loop. sendfile() was originally implemented for faster I/O + from files to sockets, but since Linux 2.6.33 it was extended + to work for many more file types. + +config FEATURE_COPYBUF_KB + int "Copy buffer size, in kilobytes" + range 1 1024 + default 4 + help + Size of buffer used by cp, mv, install, wget etc. + Buffers which are 4 kb or less will be allocated on stack. + Bigger buffers will be allocated with mmap, with fallback to 4 kb + stack buffer if mmap fails. + +config FEATURE_SKIP_ROOTFS + bool "Skip rootfs in mount table" + default y + help + Ignore rootfs entry in mount table. + + In Linux, kernel has a special filesystem, rootfs, which is initially + mounted on /. It contains initramfs data, if kernel is configured + to have one. Usually, another file system is mounted over / early + in boot process, and therefore most tools which manipulate + mount table, such as df, will skip rootfs entry. + + However, some systems do not mount anything on /. + If you need to configure busybox for one of these systems, + you may find it useful to turn this option off to make df show + initramfs statistics. + + Otherwise, choose Y. + +config MONOTONIC_SYSCALL + bool "Use clock_gettime(CLOCK_MONOTONIC) syscall" + default y + select PLATFORM_LINUX + help + Use clock_gettime(CLOCK_MONOTONIC) syscall for measuring + time intervals (time, ping, traceroute etc need this). + Probably requires Linux 2.6+. If not selected, gettimeofday + will be used instead (which gives wrong results if date/time + is reset). + +config IOCTL_HEX2STR_ERROR + bool "Use ioctl names rather than hex values in error messages" + default y + help + Use ioctl names rather than hex values in error messages + (e.g. VT_DISALLOCATE rather than 0x5608). If disabled this + saves about 1400 bytes. + +config FEATURE_HWIB + bool "Support infiniband HW" + default y + help + Support for printing infiniband addresses in + network applets. + +endmenu diff --git a/probe-busybox/libbb/Kbuild.src b/probe-busybox/libbb/Kbuild.src new file mode 100644 index 00000000..10bd039a --- /dev/null +++ b/probe-busybox/libbb/Kbuild.src @@ -0,0 +1,218 @@ +# Makefile for busybox +# +# Copyright (C) 1999-2005 by Erik Andersen +# +# Licensed under GPLv2, see file LICENSE in this source tree. + +libbb/appletlib.o: include/usage_compressed.h + +lib-y:= + +INSERT + +lib-y += appletlib.o +# lib-y += ask_confirmation.o +lib-y += atlas_bb64.o +lib-y += atlas_check_addr.o +lib-y += atlas_gettime_mono.o +lib-y += atlas_ipv6_option.o +lib-y += atlas_name_macro.o +lib-y += atlas_probe.o +lib-y += atlas_read_response.o +lib-y += atlas_tests.o +lib-y += atlas_time.o +lib-y += atlas_timesync.o +lib-y += atlas_unsafe.o +lib-y += atlas_version.o +lib-y += atlas_write_response.o +# lib-y += bb_askpass.o + lib-y += bb_bswap_64.o +# lib-y += bb_do_delay.o +# lib-y += bb_pwd.o +# lib-y += bb_qsort.o +# #lib-y += bb_strtod.o +lib-y += bb_strtonum.o +lib-y += bind_interface.o +# lib-y += change_identity.o +# lib-y += chomp.o +lib-y += compare_string_array.o +lib-y += concat_path_file.o +# lib-y += concat_subpath_file.o +# lib-y += copy_file.o +lib-y += copyfd.o +lib-y += crc32.o +lib-y += default_error_retval.o +# lib-y += device_open.o +# lib-y += dump.o +# lib-y += executable.o +# lib-y += fclose_nonstdin.o +# lib-y += fflush_stdout_and_exit.o +# lib-y += fgets_str.o +# lib-y += find_pid_by_name.o +# lib-y += find_root_device.o +lib-y += full_write.o +# lib-y += get_console.o +lib-y += get_last_path_component.o +# lib-y += get_line_from_file.o +lib-y += getopt32.o +lib-y += getpty.o +# lib-y += get_volsize.o +# lib-y += herror_msg.o +# lib-y += human_readable.o +lib-y += inet_common.o +# lib-y += inode_hash.o +# lib-y += isdirectory.o +# lib-y += kernel_version.o +lib-y += last_char_is.o +# lib-y += lineedit.o lineedit_ptr_hack.o +lib-y += llist.o +lib-y += login.o +# lib-y += make_directory.o +# lib-y += makedev.o +lib-y += hash_md5_sha.o +# # Alternative (disabled) MD5 implementation +# #lib-y += hash_md5prime.o +lib-y += messages.o +# lib-y += mode_string.o +# lib-y += parse_mode.o +lib-y += perror_msg.o +# lib-y += perror_nomsg.o +# lib-y += perror_nomsg_and_die.o +lib-y += pidfile.o +# lib-y += platform.o +# lib-y += printable.o +# lib-y += printable_string.o +# lib-y += print_flags.o +# lib-y += process_escape_sequence.o +# lib-y += procps.o +# lib-y += progress.o +lib-y += ptr_to_globals.o +lib-y += read.o +lib-y += read_printf.o +# lib-y += read_key.o +# lib-y += recursive_action.o +# lib-y += remove_file.o +lib-y += route_set_flags.o +# lib-y += run_shell.o +lib-y += safe_gethostname.o +# lib-y += safe_poll.o +lib-y += safe_strncpy.o +lib-y += safe_write.o +# lib-y += setup_environment.o +lib-y += signals.o +# lib-y += simplify_path.o +# lib-y += single_argv.o +lib-y += skip_whitespace.o +# lib-y += speed_table.o +# lib-y += str_tolower.o +lib-y += strlcat.o +lib-y += strlcpy.o +# lib-y += strrstr.o +# lib-y += sysconf.o +lib-y += time.o +# lib-y += trim.o +# lib-y += u_signal_names.o +# lib-y += uuencode.o +lib-y += validate_atlas_id.o +lib-y += validate_filename.o +# lib-y += vdprintf.o +lib-y += verror_msg.o +lib-y += vfork_daemon_rexec.o +# lib-y += warn_ignoring_args.o +lib-y += wfopen.o +lib-y += wfopen_input.o +# lib-y += write.o +lib-y += xatonum.o +lib-y += xconnect.o +lib-y += xfuncs.o +lib-y += xfuncs_printf.o +lib-y += xfunc_die.o +# lib-y += xgetcwd.o +# lib-y += xgethostbyname.o +lib-y += xreadlink.o +# lib-y += xrealloc_vector.o + +lib-$(CONFIG_PLATFORM_LINUX) += match_fstype.o + +lib-$(CONFIG_FEATURE_UTMP) += utmp.o + +# A mix of optimizations (why build stuff we know won't be used) +# and objects which may fail to build (SELinux on selinux-less system) +lib-$(CONFIG_SELINUX) += selinux_common.o +lib-$(CONFIG_FEATURE_MTAB_SUPPORT) += mtab.o +lib-$(CONFIG_UNICODE_SUPPORT) += unicode.o +lib-$(CONFIG_FEATURE_CHECK_NAMES) += die_if_bad_username.o + +lib-$(CONFIG_NC) += udp_io.o +lib-$(CONFIG_DNSD) += udp_io.o +lib-$(CONFIG_NTPD) += udp_io.o +lib-$(CONFIG_TFTP) += udp_io.o +lib-$(CONFIG_TFTPD) += udp_io.o +lib-$(CONFIG_TCPSVD) += udp_io.o +lib-$(CONFIG_UDPSVD) += udp_io.o +lib-$(CONFIG_TRACEROUTE) += udp_io.o +lib-$(CONFIG_TRACEROUTE6) += udp_io.o + +lib-$(CONFIG_LOSETUP) += loop.o +lib-$(CONFIG_FEATURE_MOUNT_LOOP) += loop.o + +lib-$(CONFIG_ADDGROUP) += update_passwd.o +lib-$(CONFIG_ADDUSER) += update_passwd.o +lib-$(CONFIG_DELGROUP) += update_passwd.o +lib-$(CONFIG_DELUSER) += update_passwd.o + +lib-$(CONFIG_FTPD) += pw_encrypt.o correct_password.o +lib-$(CONFIG_PASSWD) += pw_encrypt.o update_passwd.o obscure.o +lib-$(CONFIG_CHPASSWD) += pw_encrypt.o update_passwd.o +lib-$(CONFIG_CRYPTPW) += pw_encrypt.o +lib-$(CONFIG_MKPASSWD) += pw_encrypt.o +lib-$(CONFIG_SULOGIN) += pw_encrypt.o correct_password.o +lib-$(CONFIG_VLOCK) += pw_encrypt.o correct_password.o +lib-$(CONFIG_SU) += pw_encrypt.o correct_password.o +lib-$(CONFIG_LOGIN) += pw_encrypt.o correct_password.o +lib-$(CONFIG_FEATURE_HTTPD_AUTH_MD5) += pw_encrypt.o +lib-$(CONFIG_FEATURE_FTP_AUTHENTICATION) += pw_encrypt.o + +lib-$(CONFIG_DF) += find_mount_point.o +lib-$(CONFIG_MKFS_MINIX) += find_mount_point.o +lib-$(CONFIG_MKFS_EXT2) += find_mount_point.o +lib-$(CONFIG_MKE2FS) += find_mount_point.o +lib-$(CONFIG_MKFS_REISER) += find_mount_point.o +lib-$(CONFIG_FSCK_MINIX) += find_mount_point.o +lib-$(CONFIG_MOUNT) += find_mount_point.o + +lib-$(CONFIG_HWCLOCK) += rtc.o +lib-$(CONFIG_RTCWAKE) += rtc.o + +lib-$(CONFIG_IOSTAT) += get_cpu_count.o +lib-$(CONFIG_MPSTAT) += get_cpu_count.o +lib-$(CONFIG_POWERTOP) += get_cpu_count.o + +lib-$(CONFIG_PING) += inet_cksum.o +lib-$(CONFIG_PING6) += inet_cksum.o +lib-$(CONFIG_TRACEROUTE) += inet_cksum.o +lib-$(CONFIG_TRACEROUTE6) += inet_cksum.o +lib-$(CONFIG_UDHCPC) += inet_cksum.o +lib-$(CONFIG_UDHCPC6) += inet_cksum.o +lib-$(CONFIG_UDHCPD) += inet_cksum.o +lib-$(CONFIG_DHCPRELAY) += inet_cksum.o + +# We shouldn't build xregcomp.c if we don't need it - this ensures we don't +# require regex.h to be in the include dir even if we don't need it thereby +# allowing us to build busybox even if uclibc regex support is disabled. + +lib-$(CONFIG_AWK) += xregcomp.o +lib-$(CONFIG_SED) += xregcomp.o +lib-$(CONFIG_GREP) += xregcomp.o +lib-$(CONFIG_EGREP) += xregcomp.o +lib-$(CONFIG_FGREP) += xregcomp.o +lib-$(CONFIG_EXPR) += xregcomp.o +lib-$(CONFIG_MDEV) += xregcomp.o +lib-$(CONFIG_LESS) += xregcomp.o +lib-$(CONFIG_PGREP) += xregcomp.o +lib-$(CONFIG_PKILL) += xregcomp.o +lib-$(CONFIG_DEVFSD) += xregcomp.o +lib-$(CONFIG_FEATURE_FIND_REGEX) += xregcomp.o + +# Add the experimental logging functionality, only used by zcip +lib-$(CONFIG_ZCIP) += logenv.o diff --git a/probe-busybox/libbb/appletlib.c b/probe-busybox/libbb/appletlib.c new file mode 100644 index 00000000..f04cc5cf --- /dev/null +++ b/probe-busybox/libbb/appletlib.c @@ -0,0 +1,1009 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) tons of folks. Tracking down who wrote what + * isn't something I'm going to worry about... If you wrote something + * here, please feel free to acknowledge your work. + * + * Based in part on code from sash, Copyright (c) 1999 by David I. Bell + * Permission has been granted to redistribute this code under GPL. + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +/* We are trying to not use printf, this benefits the case when selected + * applets are really simple. Example: + * + * $ ./busybox + * ... + * Currently defined functions: + * basename, false, true + * + * $ size busybox + * text data bss dec hex filename + * 4473 52 72 4597 11f5 busybox + * + * FEATURE_INSTALLER or FEATURE_SUID will still link printf routines in. :( + */ +#include "busybox.h" + +#if !(defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) \ + || defined(__APPLE__) \ + ) +# include /* for mallopt */ +#endif + +#include "atlas_path.h" + +/* Declare _main() */ +#define PROTOTYPES +#include "applets.h" +#undef PROTOTYPES + +/* Include generated applet names, pointers to _main, etc */ +#include "applet_tables.h" +/* ...and if applet_tables generator says we have only one applet... */ +#ifdef SINGLE_APPLET_MAIN +# undef ENABLE_FEATURE_INDIVIDUAL +# define ENABLE_FEATURE_INDIVIDUAL 1 +# undef IF_FEATURE_INDIVIDUAL +# define IF_FEATURE_INDIVIDUAL(...) __VA_ARGS__ +#endif + +#include "usage_compressed.h" + +#if ENABLE_SHOW_USAGE && !ENABLE_FEATURE_COMPRESS_USAGE +static const char usage_messages[] ALIGN1 = UNPACKED_USAGE; +#else +# define usage_messages 0 +#endif + +#if ENABLE_FEATURE_COMPRESS_USAGE + +static const char packed_usage[] ALIGN1 = { PACKED_USAGE }; +# include "bb_archive.h" +static const char *unpack_usage_messages(void) +{ + char *outbuf = NULL; + bunzip_data *bd; + int i; + + i = start_bunzip(&bd, + /* src_fd: */ -1, + /* inbuf: */ packed_usage, + /* len: */ sizeof(packed_usage)); + /* read_bunzip can longjmp to start_bunzip, and ultimately + * end up here with i != 0 on read data errors! Not trivial */ + if (!i) { + /* Cannot use xmalloc: will leak bd in NOFORK case! */ + outbuf = malloc_or_warn(sizeof(UNPACKED_USAGE)); + if (outbuf) + read_bunzip(bd, outbuf, sizeof(UNPACKED_USAGE)); + } + dealloc_bunzip(bd); + return outbuf; +} +# define dealloc_usage_messages(s) free(s) + +#else + +# define unpack_usage_messages() usage_messages +# define dealloc_usage_messages(s) ((void)(s)) + +#endif /* FEATURE_COMPRESS_USAGE */ + + +void FAST_FUNC bb_show_usage(void) +{ + if (ENABLE_SHOW_USAGE) { +#ifdef SINGLE_APPLET_STR + /* Imagine that this applet is "true". Dont suck in printf! */ + const char *usage_string = unpack_usage_messages(); + + if (*usage_string == '\b') { + full_write2_str("No help available.\n\n"); + } else { + full_write2_str("Usage: "SINGLE_APPLET_STR" "); + full_write2_str(usage_string); + full_write2_str("\n\n"); + } + if (ENABLE_FEATURE_CLEAN_UP) + dealloc_usage_messages((char*)usage_string); +#else + const char *p; + const char *usage_string = p = unpack_usage_messages(); + int ap = find_applet_by_name(applet_name); + + if (ap < 0) /* never happens, paranoia */ + xfunc_die(); + while (ap) { + while (*p++) continue; + ap--; + } + full_write2_str(bb_banner); + full_write2_str(" multi-call binary.\n"); + if (*p == '\b') + full_write2_str("\nNo help available.\n\n"); + else { + full_write2_str("\nUsage: "); + full_write2_str(applet_name); + full_write2_str(" "); + full_write2_str(p); + full_write2_str("\n"); + } + if (ENABLE_FEATURE_CLEAN_UP) + dealloc_usage_messages((char*)usage_string); +#endif + } + xfunc_die(); +} + +int FAST_FUNC find_applet_by_name(const char *name) +{ + unsigned i, max; + int j; + const char *p; + +/* The commented-out word-at-a-time code is ~40% faster, but +160 bytes. + * "Faster" here saves ~0.5 microsecond of real time - not worth it. + */ +#if 0 /*BB_UNALIGNED_MEMACCESS_OK && BB_LITTLE_ENDIAN*/ + uint32_t n32; + + /* Handle all names < 2 chars long early */ + if (name[0] == '\0') + return -1; /* "" is not a valid applet name */ + if (name[1] == '\0') { + if (!ENABLE_TEST) + return -1; /* 1-char name is not valid */ + if (name[0] != ']') + return -1; /* 1-char name which isn't "[" is not valid */ + /* applet "[" is always applet #0: */ + return 0; + } +#endif + + p = applet_names; + i = 0; +#if KNOWN_APPNAME_OFFSETS <= 0 + max = NUM_APPLETS; +#else + max = NUM_APPLETS * KNOWN_APPNAME_OFFSETS; + for (j = ARRAY_SIZE(applet_nameofs)-1; j >= 0; j--) { + const char *pp = applet_names + applet_nameofs[j]; + if (strcmp(name, pp) >= 0) { + //bb_error_msg("name:'%s' >= pp:'%s'", name, pp); + p = pp; + i = max - NUM_APPLETS; + break; + } + max -= NUM_APPLETS; + } + max /= (unsigned)KNOWN_APPNAME_OFFSETS; + i /= (unsigned)KNOWN_APPNAME_OFFSETS; + //bb_error_msg("name:'%s' starting from:'%s' i:%u max:%u", name, p, i, max); +#endif + + /* Open-coded linear search without strcmp/strlen calls for speed */ + +#if 0 /*BB_UNALIGNED_MEMACCESS_OK && BB_LITTLE_ENDIAN*/ + /* skip "[\0" name, it's surely not it */ + if (ENABLE_TEST && LONE_CHAR(p, '[')) + i++, p += 2; + /* All remaining applet names in p[] are at least 2 chars long */ + /* name[] is also at least 2 chars long */ + + n32 = (name[0] << 0) | (name[1] << 8) | (name[2] << 16); + while (i < max) { + uint32_t p32; + char ch; + + /* Quickly check match of the first 3 bytes */ + move_from_unaligned32(p32, p); + p += 3; + if ((p32 & 0x00ffffff) != n32) { + /* Most likely case: 3 first bytes do not match */ + i++; + if ((p32 & 0x00ff0000) == '\0') + continue; // p[2] was NUL + p++; + if ((p32 & 0xff000000) == '\0') + continue; // p[3] was NUL + /* p[0..3] aren't matching and none is NUL, check the rest */ + while (*p++ != '\0') + continue; + continue; + } + + /* Unlikely branch: first 3 bytes ([0..2]) match */ + if ((p32 & 0x00ff0000) == '\0') { + /* name is 2-byte long, it is full match */ + //bb_error_msg("found:'%s' i:%u", name, i); + return i; + } + /* Check remaining bytes [3..NUL] */ + ch = (p32 >> 24); + j = 3; + while (ch == name[j]) { + if (ch == '\0') { + //bb_error_msg("found:'%s' i:%u", name, i); + return i; + } + ch = *++p; + j++; + } + /* Not a match. Skip it, including NUL */ + while (ch != '\0') + ch = *++p; + p++; + i++; + } + return -1; +#else + while (i < max) { + char ch; + j = 0; + /* Do we see "name\0" in applet_names[p] position? */ + while ((ch = *p) == name[j]) { + if (ch == '\0') { + //bb_error_msg("found:'%s' i:%u", name, i); + return i; /* yes */ + } + p++; + j++; + } + /* No. + * p => 1st non-matching char in applet_names[], + * skip to and including NUL. + */ + while (ch != '\0') + ch = *++p; + p++; + i++; + } + return -1; +#endif +} + + +void lbb_prepare(const char *applet + IF_FEATURE_INDIVIDUAL(, char **argv)) + MAIN_EXTERNALLY_VISIBLE; +void lbb_prepare(const char *applet + IF_FEATURE_INDIVIDUAL(, char **argv)) +{ +#ifdef __GLIBC__ + (*(int **)&bb_errno) = __errno_location(); + barrier(); +#endif + applet_name = applet; + + if (ENABLE_LOCALE_SUPPORT) + setlocale(LC_ALL, ""); + +#ifndef ENABLE_TEST +#define ENABLE_TEST 0 +#endif +#if ENABLE_FEATURE_INDIVIDUAL + /* Redundant for busybox (run_applet_and_exit covers that case) + * but needed for "individual applet" mode */ + if (argv[1] + && !argv[2] + && strcmp(argv[1], "--help") == 0 + && !is_prefixed_with(applet, "busybox") + ) { + /* Special case. POSIX says "test --help" + * should be no different from e.g. "test --foo". */ + if (!ENABLE_TEST || strcmp(applet_name, "test") != 0) + bb_show_usage(); + } +#endif +} + +/* The code below can well be in applets/applets.c, as it is used only + * for busybox binary, not "individual" binaries. + * However, keeping it here and linking it into libbusybox.so + * (together with remaining tiny applets/applets.o) + * makes it possible to avoid --whole-archive at link time. + * This makes (shared busybox) + libbusybox smaller. + * (--gc-sections would be even better....) + */ + +const char *applet_name; +#if !BB_MMU +bool re_execed; +#endif + + +/* If not built as a single-applet executable... */ +#if !defined(SINGLE_APPLET_MAIN) + +IF_FEATURE_SUID(static uid_t ruid;) /* real uid */ + +# if ENABLE_FEATURE_SUID_CONFIG + +static struct suid_config_t { + /* next ptr must be first: this struct needs to be llist-compatible */ + struct suid_config_t *m_next; + struct bb_uidgid_t m_ugid; + int m_applet; + mode_t m_mode; +} *suid_config; + +static bool suid_cfg_readable; + +/* libbb candidate */ +static char *get_trimmed_slice(char *s, char *e) +{ + /* First, consider the value at e to be nul and back up until we + * reach a non-space char. Set the char after that (possibly at + * the original e) to nul. */ + while (e-- > s) { + if (!isspace(*e)) { + break; + } + } + e[1] = '\0'; + + /* Next, advance past all leading space and return a ptr to the + * first non-space char; possibly the terminating nul. */ + return skip_whitespace(s); +} + +static void parse_config_file(void) +{ + /* Don't depend on the tools to combine strings. */ + static const char config_file[] ALIGN1 = ATLAS_DATADIR "/measurement.conf"; + + struct suid_config_t *sct_head; + int applet_no; + FILE *f; + const char *errmsg; + unsigned lc; + smallint section; + struct stat st; + + ruid = getuid(); + if (ruid == 0) /* run by root - don't need to even read config file */ + return; + + if ((stat(config_file, &st) != 0) /* No config file? */ + || !S_ISREG(st.st_mode) /* Not a regular file? */ + || (st.st_uid != 0) /* Not owned by root? */ + || (st.st_mode & (S_IWGRP | S_IWOTH)) /* Writable by non-root? */ + || !(f = fopen_for_read(config_file)) /* Cannot open? */ + ) { + return; + } + + suid_cfg_readable = 1; + sct_head = NULL; + section = lc = 0; + + while (1) { + char buffer[256]; + char *s; + + if (!fgets(buffer, sizeof(buffer), f)) { /* Are we done? */ + // Looks like bloat + //if (ferror(f)) { /* Make sure it wasn't a read error. */ + // errmsg = "reading"; + // goto pe_label; + //} + fclose(f); + suid_config = sct_head; /* Success, so set the pointer. */ + return; + } + + s = buffer; + lc++; /* Got a (partial) line. */ + + /* If a line is too long for our buffer, we consider it an error. + * The following test does mistreat one corner case though. + * If the final line of the file does not end with a newline and + * yet exactly fills the buffer, it will be treated as too long + * even though there isn't really a problem. But it isn't really + * worth adding code to deal with such an unlikely situation, and + * we do err on the side of caution. Besides, the line would be + * too long if it did end with a newline. */ + if (!strchr(s, '\n') && !feof(f)) { + errmsg = "line too long"; + goto pe_label; + } + + /* Trim leading and trailing whitespace, ignoring comments, and + * check if the resulting string is empty. */ + s = get_trimmed_slice(s, strchrnul(s, '#')); + if (!*s) { + continue; + } + + /* Check for a section header. */ + + if (*s == '[') { + /* Unlike the old code, we ignore leading and trailing + * whitespace for the section name. We also require that + * there are no stray characters after the closing bracket. */ + char *e = strchr(s, ']'); + if (!e /* Missing right bracket? */ + || e[1] /* Trailing characters? */ + || !*(s = get_trimmed_slice(s+1, e)) /* Missing name? */ + ) { + errmsg = "section header"; + goto pe_label; + } + /* Right now we only have one section so just check it. + * If more sections are added in the future, please don't + * resort to cascading ifs with multiple strcasecmp calls. + * That kind of bloated code is all too common. A loop + * and a string table would be a better choice unless the + * number of sections is very small. */ + if (strcasecmp(s, "SUID") == 0) { + section = 1; + continue; + } + section = -1; /* Unknown section so set to skip. */ + continue; + } + + /* Process sections. */ + + if (section == 1) { /* SUID */ + /* Since we trimmed leading and trailing space above, we're + * now looking for strings of the form + * [::space::]*=[::space::]* + * where both key and value could contain inner whitespace. */ + + /* First get the key (an applet name in our case). */ + char *e = strchr(s, '='); + if (e) { + s = get_trimmed_slice(s, e); + } + if (!e || !*s) { /* Missing '=' or empty key. */ + errmsg = "keyword"; + goto pe_label; + } + + /* Ok, we have an applet name. Process the rhs if this + * applet is currently built in and ignore it otherwise. + * Note: this can hide config file bugs which only pop + * up when the busybox configuration is changed. */ + applet_no = find_applet_by_name(s); + if (applet_no >= 0) { + unsigned i; + struct suid_config_t *sct; + + /* Note: We currently don't check for duplicates! + * The last config line for each applet will be the + * one used since we insert at the head of the list. + * I suppose this could be considered a feature. */ + sct = xzalloc(sizeof(*sct)); + sct->m_applet = applet_no; + /*sct->m_mode = 0;*/ + sct->m_next = sct_head; + sct_head = sct; + + /* Get the specified mode. */ + + e = skip_whitespace(e+1); + + for (i = 0; i < 3; i++) { + /* There are 4 chars for each of user/group/other. + * "x-xx" instead of "x-" are to make + * "idx > 3" check catch invalid chars. + */ + static const char mode_chars[] ALIGN1 = "Ssx-" "Ssx-" "x-xx"; + static const unsigned short mode_mask[] ALIGN2 = { + S_ISUID, S_ISUID|S_IXUSR, S_IXUSR, 0, /* Ssx- */ + S_ISGID, S_ISGID|S_IXGRP, S_IXGRP, 0, /* Ssx- */ + S_IXOTH, 0 /* x- */ + }; + const char *q = strchrnul(mode_chars + 4*i, *e); + unsigned idx = q - (mode_chars + 4*i); + if (idx > 3) { + errmsg = "mode"; + goto pe_label; + } + sct->m_mode |= mode_mask[q - mode_chars]; + e++; + } + + /* Now get the user/group info. */ + + s = skip_whitespace(e); + /* Default is 0.0, else parse USER.GROUP: */ + if (*s) { + /* We require whitespace between mode and USER.GROUP */ + if ((s == e) || !(e = strchr(s, '.'))) { + errmsg = "uid.gid"; + goto pe_label; + } + *e = ':'; /* get_uidgid needs USER:GROUP syntax */ + if (get_uidgid(&sct->m_ugid, s) == 0) { + errmsg = "unknown user/group"; + goto pe_label; + } + } + } + continue; + } + + /* Unknown sections are ignored. */ + + /* Encountering configuration lines prior to seeing a + * section header is treated as an error. This is how + * the old code worked, but it may not be desirable. + * We may want to simply ignore such lines in case they + * are used in some future version of busybox. */ + if (!section) { + errmsg = "keyword outside section"; + goto pe_label; + } + } /* while (1) */ + + pe_label: + fclose(f); + bb_error_msg("parse error in %s, line %u: %s", config_file, lc, errmsg); + + /* Release any allocated memory before returning. */ + llist_free((llist_t*)sct_head, NULL); +} +# else +static inline void parse_config_file(void) +{ + IF_FEATURE_SUID(ruid = getuid();) +} +# endif /* FEATURE_SUID_CONFIG */ + + +# if ENABLE_FEATURE_SUID && NUM_APPLETS > 0 +/* check if u is member of group g */ +static int ingroup(uid_t u, gid_t g) +{ + struct group *grp = getgrgid(g); + if (grp) { + char **mem; + for (mem = grp->gr_mem; *mem; mem++) { + struct passwd *pwd = getpwnam(*mem); + if (pwd && (pwd->pw_uid == u)) + return 1; + } + } + return 0; +} + +static void check_suid(int applet_no) +{ + gid_t rgid; /* real gid */ + + if (ruid == 0) /* set by parse_config_file() */ + return; /* run by root - no need to check more */ + rgid = getgid(); + +# if ENABLE_FEATURE_SUID_CONFIG + if (suid_cfg_readable) { + uid_t uid; + struct suid_config_t *sct; + mode_t m; + + for (sct = suid_config; sct; sct = sct->m_next) { + if (sct->m_applet == applet_no) + goto found; + } + goto check_need_suid; + found: + /* Is this user allowed to run this applet? */ + m = sct->m_mode; + if (sct->m_ugid.uid == ruid) + /* same uid */ + m >>= 6; + else if ((sct->m_ugid.gid == rgid) || ingroup(ruid, sct->m_ugid.gid)) + /* same group / in group */ + m >>= 3; + if (!(m & S_IXOTH)) /* is x bit not set? */ + bb_error_msg_and_die("you have no permission to run this applet"); + + /* We set effective AND saved ids. If saved-id is not set + * like we do below, seteuid(0) can still later succeed! */ + + /* Are we directed to change gid + * (APPLET = *s* USER.GROUP or APPLET = *S* USER.GROUP)? + */ + if (sct->m_mode & S_ISGID) + rgid = sct->m_ugid.gid; + /* else: we will set egid = rgid, thus dropping sgid effect */ + if (setresgid(-1, rgid, rgid)) + bb_perror_msg_and_die("setresgid"); + + /* Are we directed to change uid + * (APPLET = s** USER.GROUP or APPLET = S** USER.GROUP)? + */ + uid = ruid; + if (sct->m_mode & S_ISUID) + uid = sct->m_ugid.uid; + /* else: we will set euid = ruid, thus dropping suid effect */ + if (setresuid(-1, uid, uid)) + bb_perror_msg_and_die("setresuid"); + + goto ret; + } +# if !ENABLE_FEATURE_SUID_CONFIG_QUIET + { + static bool onetime = 0; + + if (!onetime) { + onetime = 1; + bb_error_msg("using fallback suid method"); + } + } +# endif + check_need_suid: +# endif + if (APPLET_SUID(applet_no) == BB_SUID_REQUIRE) { + /* Real uid is not 0. If euid isn't 0 too, suid bit + * is most probably not set on our executable */ + if (geteuid()) + bb_error_msg_and_die("must be suid to work properly"); + } else if (APPLET_SUID(applet_no) == BB_SUID_DROP) { + xsetgid(rgid); /* drop all privileges */ + xsetuid(ruid); + } +# if ENABLE_FEATURE_SUID_CONFIG + ret: ; + llist_free((llist_t*)suid_config, NULL); +# endif +} +# else +# define check_suid(x) ((void)0) +# endif /* FEATURE_SUID */ + + +# if ENABLE_FEATURE_INSTALLER +static const char usr_bin [] ALIGN1 = "/usr/bin/"; +static const char usr_sbin[] ALIGN1 = "/usr/sbin/"; +static const char *const install_dir[] = { + &usr_bin [8], /* "/" */ + &usr_bin [4], /* "/bin/" */ + &usr_sbin[4] /* "/sbin/" */ +# if !ENABLE_INSTALL_NO_USR + ,usr_bin + ,usr_sbin +# endif +}; + +/* create (sym)links for each applet */ +static void install_links(const char *busybox, int use_symbolic_links, + char *custom_install_dir) +{ + /* directory table + * this should be consistent w/ the enum, + * busybox.h::bb_install_loc_t, or else... */ + int (*lf)(const char *, const char *); + char *fpc; + const char *appname = applet_names; + unsigned i; + int rc; + + lf = link; + if (use_symbolic_links) + lf = symlink; + + for (i = 0; i < ARRAY_SIZE(applet_main); i++) { + fpc = concat_path_file( + custom_install_dir ? custom_install_dir : install_dir[APPLET_INSTALL_LOC(i)], + appname); + // debug: bb_error_msg("%slinking %s to busybox", + // use_symbolic_links ? "sym" : "", fpc); + rc = lf(busybox, fpc); + if (rc != 0 && errno != EEXIST) { + bb_simple_perror_msg(fpc); + } + free(fpc); + while (*appname++ != '\0') + continue; + } +} +# elif ENABLE_BUSYBOX +static void install_links(const char *busybox UNUSED_PARAM, + int use_symbolic_links UNUSED_PARAM, + char *custom_install_dir UNUSED_PARAM) +{ +} +# endif + +# if ENABLE_BUSYBOX +static void run_applet_and_exit(const char *name, char **argv) NORETURN; + +#ifndef ENABLE_FEATURE_AUTOWIDTH +#define ENABLE_FEATURE_AUTOWIDTH 0 +#endif +#ifndef IF_NOT_FEATURE_SH_STANDALONE +#define IF_NOT_FEATURE_SH_STANDALONE(x) +#endif +#ifndef IF_FEATURE_SH_STANDALONE +#define IF_FEATURE_SH_STANDALONE(x) +#endif + +/* If we were called as "busybox..." */ +static int busybox_main(char **argv) +{ + if (!argv[1]) { + /* Called without arguments */ + const char *a; + int col; + unsigned output_width; + help: + output_width = 80; + if (ENABLE_FEATURE_AUTOWIDTH) { + /* Obtain the terminal width */ + output_width = get_terminal_width(2); + } + + dup2(1, 2); + full_write2_str(bb_banner); /* reuse const string */ + full_write2_str(" multi-call binary.\n"); /* reuse */ + full_write2_str( + "BusyBox is copyrighted by many authors between 1998-2015.\n" + "Licensed under GPLv2. See source distribution for detailed\n" + "copyright notices.\n" + "\n" + "Usage: busybox [function [arguments]...]\n" + " or: busybox --list"IF_FEATURE_INSTALLER("[-full]")"\n" + IF_FEATURE_INSTALLER( + " or: busybox --install [-s] [DIR]\n" + ) + " or: function [arguments]...\n" + "\n" + IF_NOT_FEATURE_SH_STANDALONE( + "\tBusyBox is a multi-call binary that combines many common Unix\n" + "\tutilities into a single executable. Most people will create a\n" + "\tlink to busybox for each function they wish to use and BusyBox\n" + "\twill act like whatever it was invoked as.\n" + ) + IF_FEATURE_SH_STANDALONE( + "\tBusyBox is a multi-call binary that combines many common Unix\n" + "\tutilities into a single executable. The shell in this build\n" + "\tis configured to run built-in utilities without $PATH search.\n" + "\tYou don't need to install a link to busybox for each utility.\n" + "\tTo run external program, use full path (/sbin/ip instead of ip).\n" + ) + "\n" + "Currently defined functions:\n" + ); + col = 0; + a = applet_names; + /* prevent last comma to be in the very last pos */ + output_width--; + while (*a) { + int len2 = strlen(a) + 2; + if (col >= (int)output_width - len2) { + full_write2_str(",\n"); + col = 0; + } + if (col == 0) { + col = 6; + full_write2_str("\t"); + } else { + full_write2_str(", "); + } + full_write2_str(a); + col += len2; + a += len2 - 1; + } + full_write2_str("\n"); + return 0; + } + + if (is_prefixed_with(argv[1], "--list")) { + unsigned i = 0; + const char *a = applet_names; + dup2(1, 2); + while (*a) { +# if ENABLE_FEATURE_INSTALLER + if (argv[1][6]) /* --list-full? */ + full_write2_str(install_dir[APPLET_INSTALL_LOC(i)] + 1); +# endif + full_write2_str(a); + full_write2_str("\n"); + i++; + while (*a++ != '\0') + continue; + } + return 0; + } + + if (ENABLE_FEATURE_INSTALLER && strcmp(argv[1], "--install") == 0) { + int use_symbolic_links; + const char *busybox; + + busybox = xmalloc_readlink(bb_busybox_exec_path); + if (!busybox) { + /* bb_busybox_exec_path is usually "/proc/self/exe". + * In chroot, readlink("/proc/self/exe") usually fails. + * In such case, better use argv[0] as symlink target + * if it is a full path name. + */ + if (argv[0][0] != '/') + bb_error_msg_and_die("'%s' is not an absolute path", argv[0]); + busybox = argv[0]; + } + /* busybox --install [-s] [DIR]: + * -s: make symlinks + * DIR: directory to install links to + */ + use_symbolic_links = (argv[2] && strcmp(argv[2], "-s") == 0 && ++argv); + install_links(busybox, use_symbolic_links, argv[2]); + return 0; + } + + if (strcmp(argv[1], "--help") == 0) { + /* "busybox --help []" */ + if (!argv[2]) + goto help; + /* convert to " --help" */ + argv[0] = argv[2]; + argv[2] = NULL; + } else { + /* "busybox arg1 arg2 ..." */ + argv++; + } + /* We support "busybox /a/path/to/applet args..." too. Allows for + * "#!/bin/busybox"-style wrappers */ + applet_name = bb_get_last_path_component_nostrip(argv[0]); + run_applet_and_exit(applet_name, argv); +} +# endif + +# if NUM_APPLETS > 0 +void FAST_FUNC run_applet_no_and_exit(int applet_no, char **argv) +{ + int argc = 1; + + while (argv[argc]) + argc++; + + /* Reinit some shared global data */ + xfunc_error_retval = EXIT_FAILURE; + applet_name = bb_get_last_path_component_nostrip(argv[0]); + + /* Special case. POSIX says "test --help" + * should be no different from e.g. "test --foo". + * Thus for "test", we skip --help check. + * "true" and "false" are also special. + */ + if (1 +# if defined APPLET_NO_test + && applet_no != APPLET_NO_test +# endif +# if defined APPLET_NO_true + && applet_no != APPLET_NO_true +# endif +# if defined APPLET_NO_false + && applet_no != APPLET_NO_false +# endif + ) { + if (argc == 2 && strcmp(argv[1], "--help") == 0) { + /* Make "foo --help" exit with 0: */ + xfunc_error_retval = 0; + bb_show_usage(); + } + } + if (ENABLE_FEATURE_SUID) + check_suid(applet_no); + xfunc_error_retval = applet_main[applet_no](argc, argv); + /* Note: applet_main() may also not return (die on a xfunc or such) */ + xfunc_die(); +} +# endif /* NUM_APPLETS > 0 */ + +# if ENABLE_BUSYBOX || NUM_APPLETS > 0 +static NORETURN void run_applet_and_exit(const char *name, char **argv) +{ +# if ENABLE_BUSYBOX + if (is_prefixed_with(name, "busybox")) + exit(busybox_main(argv)); +# endif +# if NUM_APPLETS > 0 + /* find_applet_by_name() search is more expensive, so goes second */ + { + int applet = find_applet_by_name(name); + if (applet >= 0) + run_applet_no_and_exit(applet, argv); + } +# endif + + /*bb_error_msg_and_die("applet not found"); - links in printf */ + full_write2_str(applet_name); + full_write2_str(": applet not found\n"); + /* POSIX: "If a command is not found, the exit status shall be 127" */ + exit(127); +} +# endif + +#endif /* !defined(SINGLE_APPLET_MAIN) */ + + +#if ENABLE_BUILD_LIBBUSYBOX +int lbb_main(char **argv) +#else +int main(int argc UNUSED_PARAM, char **argv) +#endif +{ +#if 0 + /* TODO: find a use for a block of memory between end of .bss + * and end of page. For example, I'm getting "_end:0x812e698 2408 bytes" + * - more than 2k of wasted memory (in this particular build) + * *per each running process*! + * (If your linker does not generate "_end" name, weak attribute + * makes &_end == NULL, end_len == 0 here.) + */ + extern char _end[] __attribute__((weak)); + unsigned end_len = (-(int)_end) & 0xfff; + printf("_end:%p %u bytes\n", &_end, end_len); +#endif + + /* Tweak malloc for reduced memory consumption */ +#ifdef M_TRIM_THRESHOLD + /* M_TRIM_THRESHOLD is the maximum amount of freed top-most memory + * to keep before releasing to the OS + * Default is way too big: 256k + */ + mallopt(M_TRIM_THRESHOLD, 8 * 1024); +#endif +#ifdef M_MMAP_THRESHOLD + /* M_MMAP_THRESHOLD is the request size threshold for using mmap() + * Default is too big: 256k + */ + mallopt(M_MMAP_THRESHOLD, 32 * 1024 - 256); +#endif +#if 0 /*def M_TOP_PAD*/ + /* When the program break is increased, then M_TOP_PAD bytes are added + * to the sbrk(2) request. When the heap is trimmed because of free(3), + * this much free space is preserved at the top of the heap. + * glibc default seems to be way too big: 128k, but need to verify. + */ + mallopt(M_TOP_PAD, 8 * 1024); +#endif + +#if !BB_MMU + /* NOMMU re-exec trick sets high-order bit in first byte of name */ + if (argv[0][0] & 0x80) { + re_execed = 1; + argv[0][0] &= 0x7f; + } +#endif + +#if defined(SINGLE_APPLET_MAIN) + + /* Only one applet is selected in .config */ + if (argv[1] && is_prefixed_with(argv[0], "busybox")) { + /* "busybox " should still work as expected */ + argv++; + } + /* applet_names in this case is just "applet\0\0" */ + lbb_prepare(applet_names IF_FEATURE_INDIVIDUAL(, argv)); + return SINGLE_APPLET_MAIN(argc, argv); + +#elif !ENABLE_BUSYBOX && NUM_APPLETS == 0 + + full_write2_str(bb_basename(argv[0])); + full_write2_str(": no applets enabled\n"); + exit(127); + +#else + + lbb_prepare("busybox" IF_FEATURE_INDIVIDUAL(, argv)); +# if !ENABLE_BUSYBOX + if (argv[1] && is_prefixed_with(bb_basename(argv[0]), "busybox")) + argv++; +# endif + applet_name = argv[0]; + if (applet_name[0] == '-') + applet_name++; + applet_name = bb_basename(applet_name); + parse_config_file(); /* ...maybe, if FEATURE_SUID_CONFIG */ + run_applet_and_exit(applet_name, argv); + +#endif +} diff --git a/probe-busybox/libbb/atlas_bb64.c b/probe-busybox/libbb/atlas_bb64.c new file mode 100644 index 00000000..1c344a23 --- /dev/null +++ b/probe-busybox/libbb/atlas_bb64.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" +#define BUF_CHUNK 256 + +struct buf +{ + size_t offset; + size_t size; + size_t maxsize; + unsigned char *buf; + int fd; +}; + +void buf_init(struct buf *buf, int fd) +{ + buf->maxsize= 0; + buf->size= 0; + buf->offset= 0; + buf->buf= NULL; + buf->fd= fd; +} + +int buf_add(struct buf *buf, const void *data, size_t len ) +{ + size_t maxsize; + void *newbuf; + + if (buf->size+len <= buf->maxsize) + { + /* Easy case, just add data */ + memcpy(buf->buf+buf->size, data, len); + buf->size += len; + return 0; + } + + /* Just get a new buffer */ + maxsize= buf->size-buf->offset + len + BUF_CHUNK; + + newbuf= malloc(maxsize); + if (!newbuf) + { + fprintf(stderr, "unable to allocate %ld bytes\n", (long)maxsize); + return (1); + } + + if (buf->offset < buf->size) + { + /* Copy existing data */ + memcpy(newbuf, buf->buf+buf->offset, buf->size-buf->offset); + buf->size -= buf->offset; + buf->offset= 0; + } + else + { + buf->size= buf->offset= 0; + } + buf->maxsize= maxsize; + free(buf->buf); + buf->buf= newbuf; + + memcpy(buf->buf+buf->size, data, len); + buf->size += len; + return 0; +} + +int buf_add_b64(struct buf *buf, void *data, size_t len, int mime_nl) +{ + char b64[]= + "ABCDEFGHIJKLMNOP" + "QRSTUVWXYZabcdef" + "ghijklmnopqrstuv" + "wxyz0123456789+/"; + int i; + uint8_t *p; + uint32_t v; + char str[4]; + + p= data; + + for (i= 0; i+3 <= len; i += 3, p += 3) + { + v= (p[0] << 16) + (p[1] << 8) + p[2]; + str[0]= b64[(v >> 18) & 63]; + str[1]= b64[(v >> 12) & 63]; + str[2]= b64[(v >> 6) & 63]; + str[3]= b64[(v >> 0) & 63]; + buf_add(buf, str, 4); + if(mime_nl) + if (i % 48 == 45) + buf_add(buf, "\n", 1); + } + switch(len-i) + { + case 0: break; /* Nothing to do */ + case 1: + v= (p[0] << 16); + str[0]= b64[(v >> 18) & 63]; + str[1]= b64[(v >> 12) & 63]; + str[2]= '='; + str[3]= '='; + buf_add(buf, str, 4); + break; + case 2: + v= (p[0] << 16) + (p[1] << 8); + str[0]= b64[(v >> 18) & 63]; + str[1]= b64[(v >> 12) & 63]; + str[2]= b64[(v >> 6) & 63]; + str[3]= '='; + buf_add(buf, str, 4); + break; + default: + fprintf(stderr, "bad state in buf_add_b64"); + } +} + +void buf_cleanup(struct buf *buf) +{ + if(buf->maxsize) + free(buf->buf); + buf->buf = NULL; + buf->offset= buf->size= buf->maxsize= 0; +} diff --git a/probe-busybox/libbb/atlas_bb64.h b/probe-busybox/libbb/atlas_bb64.h new file mode 100644 index 00000000..d745a9b9 --- /dev/null +++ b/probe-busybox/libbb/atlas_bb64.h @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +struct buf +{ + size_t offset; + size_t size; + size_t maxsize; + char *buf; + int fd; +}; + +void buf_init(struct buf *buf, int fd); +int buf_add(struct buf *buf, const void *data, size_t len ); +int buf_add_b64(struct buf *buf, void *data, size_t len, int mime_nl); +void buf_cleanup(struct buf *buf); diff --git a/probe-busybox/libbb/atlas_check_addr.c b/probe-busybox/libbb/atlas_check_addr.c new file mode 100644 index 00000000..a4c3af94 --- /dev/null +++ b/probe-busybox/libbb/atlas_check_addr.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2015 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" + +struct ipv4_prefix +{ + uint32_t addr; + unsigned len; +} +static bad_ipv4[] = +{ + { 0x7F000000, 8 }, /* 127.0.0.0/8 localhost */ + { 0x0A000000, 8 }, /* 10.0.0.0/8 (RFC-1918) */ + { 0xAC100000, 12 }, /* 172.16.0.0/12 (RFC-1918) */ + { 0xC0A80000, 16 }, /* 192.168.0.0/16 (RFC-1918) */ + { 0xA9FE0000, 16 }, /* 169.254.0.0/16 (RFC-3927) */ + { 0xE0000000, 4 }, /* 224.0.0.0/4 multicast */ +}; + +struct ipv6_prefix +{ + uint16_t addr[8]; + unsigned len; +} +static bad_ipv6[] = +{ + { { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001 }, + 128 }, /* ::1 loopback */ + { { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000 }, + 96 }, /* ::ffff:0:0/96 IPv4-mapped */ + { { 0xE000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + 3 }, /* e000::/3 ULA, link local, multicast */ +}; + +int atlas_check_addr(const struct sockaddr *sa, socklen_t len) +{ + uint16_t addr2, mask2; + int i, j, prefix_len; + uint32_t addr4, mask4; + uint16_t *addr2p; + const struct sockaddr_in *sin4p; + const struct sockaddr_in6 *sin6p; + char *cp; + + static int allow_all= -1; + + if (allow_all == -1) + { + allow_all= 0; /* Safe default */ + cp= getenv("ATLAS_DISABLE_CHECK_ADDR"); + if (cp != NULL && strcmp(cp, "yes") == 0) + allow_all= 1; + } + + if (allow_all) + return 0; /* All addresses are allowed */ + + switch(sa->sa_family) + { + case AF_INET: + if (len < sizeof(*sin4p)) + return -1; + sin4p= (const struct sockaddr_in *)sa; + addr4= sin4p->sin_addr.s_addr; + addr4= ntohl(addr4); + for (i= 0; isin6_addr; + for (i= 0; is6_addr16[i/16] != + prefix->s6_addr16[i/16]) + { + /* Different prefix */ + break; + } + continue; + } + mask= ~((1ul << (16-(prefix_len % 16)))-1); + mask= htons(mask); + if ((addr->s6_addr16[i/16] & mask) == + prefix->s6_addr16[i/16]) + { + return 1; + } + break; + } + if (i < prefix_len) + { + /* No match */ + return 0; + } + + /* Match */ + return 1; +} +#endif diff --git a/probe-busybox/libbb/atlas_gettime_mono.c b/probe-busybox/libbb/atlas_gettime_mono.c new file mode 100644 index 00000000..41f9b9b4 --- /dev/null +++ b/probe-busybox/libbb/atlas_gettime_mono.c @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2020 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" + +int gettime_mono(struct timespec *tsp) +{ + static time_t reproducible_time= 0; + + if (atlas_tests()) + { + ++reproducible_time; + tsp->tv_sec= reproducible_time; + tsp->tv_nsec= 1000*reproducible_time; + return 0; + } + + return clock_gettime(CLOCK_MONOTONIC, tsp); +} + diff --git a/probe-busybox/libbb/atlas_ipv6_option.c b/probe-busybox/libbb/atlas_ipv6_option.c new file mode 100644 index 00000000..eb37f686 --- /dev/null +++ b/probe-busybox/libbb/atlas_ipv6_option.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE for details. + */ + +#include "libbb.h" + +#define OPT_PAD1 0 +#define OPT_PADN 1 + +int do_ipv6_option(int sock, int hbh_dest, + unsigned size) +{ + int i, r; + size_t totsize, ehlen, padlen; + + char packet[4096]; /* Assume we can put the on the stack. And + * assume this is big enough. + */ + + if (size == 0) + { + r= setsockopt(sock, IPPROTO_IPV6, + hbh_dest ? IPV6_DSTOPTS : IPV6_HOPOPTS, NULL, 0); + return r; + } + + /* Compute the totsize we need */ + totsize = 2 + size; + if (totsize % 8) + totsize += 8 - (totsize % 8); + + /* Consistency check */ + if (totsize > sizeof(packet)) + { + errno= EINVAL; + return -1; + } + + ehlen= totsize/8 - 1; + if (ehlen > 255) + { + errno= EINVAL; + return -1; + } + + memset(packet, '\0', totsize); + packet[1]= ehlen; + for (i= 2; i 255) + padlen= 255; + packet[i]= OPT_PADN; + packet[i+1]= padlen; + i += 2+padlen; + } + if (hbh_dest) + { + r= setsockopt(sock, IPPROTO_IPV6, IPV6_DSTOPTS, packet, + totsize); + } + else + { + r= setsockopt(sock, IPPROTO_IPV6, IPV6_HOPOPTS, packet, + totsize); + } + + return r; +} + diff --git a/probe-busybox/libbb/atlas_name_macro.c b/probe-busybox/libbb/atlas_name_macro.c new file mode 100644 index 00000000..0674fd07 --- /dev/null +++ b/probe-busybox/libbb/atlas_name_macro.c @@ -0,0 +1,87 @@ +#include "libbb.h" + +#define URANDOM_DEV "/dev/urandom" + +static char hex_chars[]= "0123456789abcdef"; + +char *atlas_name_macro(char *str) +{ + unsigned char c; + int i, fd; + size_t len; + char *p, *in, *out; + char buf[256]; + unsigned char random_buf[8]; + + p= strchr(str, '$'); + if (p == NULL) + return strdup(str); + + in= str; + out= buf; + + while (*in) + { + p= strchr(in, '$'); + if (p == NULL) + { + strlcpy(out, in, buf+sizeof(buf)-out); + break; + } + if (p != in) + { + len= p-in; + + if (len+1 > buf+sizeof(buf)-out) + return NULL; + memcpy(out, in, len); + out[len]= '\0'; + + out += len; + } + + switch(p[1]) + { + case 'p': + snprintf(out, buf+sizeof(buf)-out, "%d", + get_probe_id()); + break; + case 't': + snprintf(out, buf+sizeof(buf)-out, "%ld", + (long)time(NULL)); + break; + case 'r': + /* We need to hex digits per byte in random_buf */ + if (sizeof(random_buf)*2+1 > buf+sizeof(buf)-out) + return NULL; + + fd= open(URANDOM_DEV, O_RDONLY); + + /* Best effort, just ignore errors */ + if (fd != -1) + { + read(fd, random_buf, sizeof(random_buf)); + close(fd); + } + + for (i= 0; i> 4) & 0xf]; + out[1]= hex_chars[c & 0xf]; + out += 2; + } + + out[0]= '\0'; + break; + + default: + return NULL; + } + in= p+2; + out += strlen(out); + } + + return strdup(buf); +} diff --git a/probe-busybox/libbb/atlas_probe.c b/probe-busybox/libbb/atlas_probe.c new file mode 100644 index 00000000..d6b2a557 --- /dev/null +++ b/probe-busybox/libbb/atlas_probe.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#define REG_INIT_REPLY_REL "reg_init_reply.txt" + +#include "libbb.h" +#include "atlas_path.h" +#include + +int get_probe_id(void) +{ + static int probe_id= -1; + + size_t len; + char *check, *fn; + const char *key; + FILE *fp; + char buf[80]; + + if (probe_id > 0) + return probe_id; /* Assume probe ID never changes */ + + asprintf(&fn, "%s/%s", ATLAS_STATUS, REG_INIT_REPLY_REL); + fp= fopen(fn, "r"); + free(fn); fn= NULL; + if (!fp) + return -1; + + probe_id= -1; + while (fgets(buf, sizeof(buf), fp) != NULL) + { + if (strchr(buf, '\n') == NULL) + continue; + key= "PROBE_ID "; + len= strlen(key); + + if (strncmp(buf, key, len) != 0 || strlen(buf) <= len) + continue; + probe_id= strtol(buf+len, &check, 10); + break; + } + fclose(fp); + return probe_id; +} diff --git a/probe-busybox/libbb/atlas_probe.h b/probe-busybox/libbb/atlas_probe.h new file mode 100644 index 00000000..27879850 --- /dev/null +++ b/probe-busybox/libbb/atlas_probe.h @@ -0,0 +1,6 @@ +/* + * Copyright (c) 2013 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +int get_probe_id(void); diff --git a/probe-busybox/libbb/atlas_read_response.c b/probe-busybox/libbb/atlas_read_response.c new file mode 100644 index 00000000..bc0e27b0 --- /dev/null +++ b/probe-busybox/libbb/atlas_read_response.c @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2020 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" + +static int got_type= 0; +static int stored_type; + +void peek_response(int fd, int *typep) +{ + if (!got_type) + { + if (read(fd, &stored_type, sizeof(stored_type)) != + sizeof(stored_type)) + { + fprintf(stderr, "peek_response: error reading\n"); + exit(1); + } + got_type= 1; + } + *typep= stored_type; +} + +void peek_response_file(FILE *file, int *typep) +{ + if (!got_type) + { + if (fread(&stored_type, sizeof(stored_type), 1, file) != 1) + { + fprintf(stderr, "peek_response_file: error reading\n"); + exit(1); + } + got_type= 1; + } + *typep= stored_type; +} + +void read_response(int fd, int type, size_t *sizep, void *data) +{ + int tmp_type; + size_t tmp_size; + + if (got_type) + { + tmp_type= stored_type; + got_type= 0; + } + else + { + if (read(fd, &tmp_type, sizeof(tmp_type)) != sizeof(tmp_type)) + { + fprintf(stderr, "read_response: error reading\n"); + exit(1); + } + } + if (tmp_type != type) + { + fprintf(stderr, + "read_response: wrong type, expected %d, got %d\n", + type, tmp_type); + exit(1); + } + if (read(fd, &tmp_size, sizeof(tmp_size)) != sizeof(tmp_size)) + { + fprintf(stderr, "read_response: error reading\n"); + exit(1); + } + if (tmp_size > *sizep) + { + fprintf(stderr, "read_response: data bigger than buffer\n"); + exit(1); + } + *sizep= tmp_size; + if (read(fd, data, tmp_size) != tmp_size) + { + fprintf(stderr, "read_response: error reading\n"); + exit(1); + } +} + + +void read_response_file(FILE *file, int type, size_t *sizep, void *data) +{ + int r, tmp_type; + size_t tmp_size; + + if (got_type) + { + tmp_type= stored_type; + got_type= 0; + } + else if (fread(&tmp_type, sizeof(tmp_type), 1, file) != 1) + { + fprintf(stderr, "read_response_file: error reading\n"); + exit(1); + } + if (tmp_type != type) + { + fprintf(stderr, + "read_response_file: wrong type, expected %d, got %d\n", + type, tmp_type); + exit(1); + } + if (fread(&tmp_size, sizeof(tmp_size), 1, file) != 1) + { + fprintf(stderr, "read_response_file: error reading\n"); + exit(1); + } + if (tmp_size > *sizep) + { + fprintf(stderr, + "read_response_file: data bigger than buffer\n"); + exit(1); + } + *sizep= tmp_size; + if (tmp_size != 0) + { + r= fread(data, tmp_size, 1, file); + if (r != 1) + { + fprintf(stderr, + "read_response_file: error reading %u bytes, got %d: %s\n", + (unsigned)tmp_size, r, strerror(errno)); + exit(1); + } + } +} + diff --git a/probe-busybox/libbb/atlas_tests.c b/probe-busybox/libbb/atlas_tests.c new file mode 100644 index 00000000..30ce6852 --- /dev/null +++ b/probe-busybox/libbb/atlas_tests.c @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2020 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" + +int atlas_tests(void) +{ + static int do_tests= -1; + + if (!do_tests) + return 0; + + if (do_tests == -1) + do_tests= (getenv("ATLAS_TESTS") != NULL); + return do_tests; +} + diff --git a/probe-busybox/libbb/atlas_time.c b/probe-busybox/libbb/atlas_time.c new file mode 100644 index 00000000..9b39007b --- /dev/null +++ b/probe-busybox/libbb/atlas_time.c @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2020 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" + +time_t atlas_time(void) +{ + if (atlas_tests()) + return 999999999; + else + return time(NULL); +} + diff --git a/probe-busybox/libbb/atlas_timesync.c b/probe-busybox/libbb/atlas_timesync.c new file mode 100644 index 00000000..712f086c --- /dev/null +++ b/probe-busybox/libbb/atlas_timesync.c @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2013-2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" +#include +#include "atlas_path.h" + +int get_timesync(void) +{ + char *fn; + FILE *fh; + int lastsync; + + if (atlas_tests()) + return 123; + + asprintf(&fn, "%s/%s", ATLAS_SPOOLDIR, ATLAS_TIMESYNC_FILE_REL); + fh= fopen(fn, "r"); + free(fn); fn= NULL; + if (!fh) + return -1; + fscanf(fh, "%d", &lastsync); + fclose(fh); + return time(NULL)-lastsync; +} + diff --git a/probe-busybox/libbb/atlas_unsafe.c b/probe-busybox/libbb/atlas_unsafe.c new file mode 100644 index 00000000..78be75aa --- /dev/null +++ b/probe-busybox/libbb/atlas_unsafe.c @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2020 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" + +int atlas_unsafe(void) +{ + static int allow_unsafe= -1; + + if (!allow_unsafe) + return 0; + + if (allow_unsafe == -1) + allow_unsafe= (getenv("ATLAS_UNSAFE") != NULL); + return allow_unsafe; +} + diff --git a/probe-busybox/libbb/atlas_version.c b/probe-busybox/libbb/atlas_version.c new file mode 100644 index 00000000..304a83c6 --- /dev/null +++ b/probe-busybox/libbb/atlas_version.c @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2019 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" +#include +#include "atlas_path.h" + +#define ATLAS_FW_VERSION_REL "FIRMWARE_APPS_VERSION" + +#define DBQ(str) "\"" #str "\"" + +static int get_atlas_fw_version(void) +{ + static int fw_version= -1; + + int r, fw; + char *fn; + FILE *file; + + if (fw_version != -1) + return fw_version; + + asprintf(&fn, "%s/%s", ATLAS_DATADIR, ATLAS_FW_VERSION_REL); + file= fopen(fn, "r"); + if (file == NULL) + { + free(fn); fn= NULL; + return -1; + } + r= fscanf(file, "%d", &fw); + fclose(file); + if (r == -1) + { + free(fn); fn= NULL; + return -1; + } + free(fn); fn= NULL; + + fw_version= fw; + return fw; +} + +char *atlas_get_version_json_str(void) +{ + static char version_buf[80]; /* Enough? */ + static int first= 1; + + if (first) + { + first= 0; + + if (getenv("ATLAS_TESTS")) + { + snprintf(version_buf, sizeof(version_buf), + DBQ(fw) ":%d, " DBQ(mver) ": " DBQ(%s), + 9999, "0.0.0"); + } + else + { + snprintf(version_buf, sizeof(version_buf), + DBQ(fw) ":%d, " DBQ(mver) ": " DBQ(%s), + get_atlas_fw_version(), ATLAS_MSM_VERSION); + } + } + return version_buf; +} + diff --git a/probe-busybox/libbb/atlas_write_response.c b/probe-busybox/libbb/atlas_write_response.c new file mode 100644 index 00000000..7d55a3a8 --- /dev/null +++ b/probe-busybox/libbb/atlas_write_response.c @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2020 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" + +void write_response(FILE *file, int type, size_t size, void *data) +{ + fwrite(&type, sizeof(type), 1, file); + fwrite(&size, sizeof(size), 1, file); + fwrite(data, size, 1, file); +} + diff --git a/probe-busybox/libbb/bb_bswap_64.c b/probe-busybox/libbb/bb_bswap_64.c new file mode 100644 index 00000000..ce9d53b7 --- /dev/null +++ b/probe-busybox/libbb/bb_bswap_64.c @@ -0,0 +1,16 @@ +/* + * Utility routines. + * + * Copyright (C) 2010 Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +#if !(ULONG_MAX > 0xffffffff) +uint64_t FAST_FUNC bb_bswap_64(uint64_t x) +{ + return bswap_64(x); +} +#endif diff --git a/probe-busybox/libbb/bb_strtonum.c b/probe-busybox/libbb/bb_strtonum.c new file mode 100644 index 00000000..949f26be --- /dev/null +++ b/probe-busybox/libbb/bb_strtonum.c @@ -0,0 +1,160 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* On exit: errno = 0 only if there was non-empty, '\0' terminated value + * errno = EINVAL if value was not '\0' terminated, but otherwise ok + * Return value is still valid, caller should just check whether end[0] + * is a valid terminating char for particular case. OTOH, if caller + * requires '\0' terminated input, [s]he can just check errno == 0. + * errno = ERANGE if value had alphanumeric terminating char ("1234abcg"). + * errno = ERANGE if value is out of range, missing, etc. + * errno = ERANGE if value had minus sign for strtouXX (even "-0" is not ok ) + * return value is all-ones in this case. + * + * Test code: + * char *endptr; + * const char *minus = "-"; + * errno = 0; + * bb_strtoi(minus, &endptr, 0); // must set ERANGE + * printf("minus:%p endptr:%p errno:%d EINVAL:%d\n", minus, endptr, errno, EINVAL); + * errno = 0; + * bb_strtoi("-0-", &endptr, 0); // must set EINVAL and point to second '-' + * printf("endptr[0]:%c errno:%d EINVAL:%d\n", endptr[0], errno, EINVAL); + */ + +static unsigned long long ret_ERANGE(void) +{ + errno = ERANGE; /* this ain't as small as it looks (on glibc) */ + return ULLONG_MAX; +} + +static unsigned long long handle_errors(unsigned long long v, char **endp) +{ + char next_ch = **endp; + + /* errno is already set to ERANGE by strtoXXX if value overflowed */ + if (next_ch) { + /* "1234abcg" or out-of-range? */ + if (isalnum(next_ch) || errno) + return ret_ERANGE(); + /* good number, just suspicious terminator */ + errno = EINVAL; + } + return v; +} + + +unsigned long long FAST_FUNC bb_strtoull(const char *arg, char **endp, int base) +{ + unsigned long long v; + char *endptr; + + if (!endp) endp = &endptr; + *endp = (char*) arg; + + /* strtoul(" -4200000000") returns 94967296, errno 0 (!) */ + /* I don't think that this is right. Preventing this... */ + if (!isalnum(arg[0])) return ret_ERANGE(); + + /* not 100% correct for lib func, but convenient for the caller */ + errno = 0; + v = strtoull(arg, endp, base); + return handle_errors(v, endp); +} + +long long FAST_FUNC bb_strtoll(const char *arg, char **endp, int base) +{ + unsigned long long v; + char *endptr; + char first; + + if (!endp) endp = &endptr; + *endp = (char*) arg; + + /* Check for the weird "feature": + * a "-" string is apparently a valid "number" for strto[u]l[l]! + * It returns zero and errno is 0! :( */ + first = (arg[0] != '-' ? arg[0] : arg[1]); + if (!isalnum(first)) return ret_ERANGE(); + + errno = 0; + v = strtoll(arg, endp, base); + return handle_errors(v, endp); +} + +#if ULONG_MAX != ULLONG_MAX +unsigned long FAST_FUNC bb_strtoul(const char *arg, char **endp, int base) +{ + unsigned long v; + char *endptr; + + if (!endp) endp = &endptr; + *endp = (char*) arg; + + if (!isalnum(arg[0])) return ret_ERANGE(); + errno = 0; + v = strtoul(arg, endp, base); + return handle_errors(v, endp); +} + +long FAST_FUNC bb_strtol(const char *arg, char **endp, int base) +{ + long v; + char *endptr; + char first; + + if (!endp) endp = &endptr; + *endp = (char*) arg; + + first = (arg[0] != '-' ? arg[0] : arg[1]); + if (!isalnum(first)) return ret_ERANGE(); + + errno = 0; + v = strtol(arg, endp, base); + return handle_errors(v, endp); +} +#endif + +#if UINT_MAX != ULONG_MAX +unsigned FAST_FUNC bb_strtou(const char *arg, char **endp, int base) +{ + unsigned long v; + char *endptr; + + if (!endp) endp = &endptr; + *endp = (char*) arg; + + if (!isalnum(arg[0])) return ret_ERANGE(); + errno = 0; + v = strtoul(arg, endp, base); + if (v > UINT_MAX) return ret_ERANGE(); + return handle_errors(v, endp); +} + +int FAST_FUNC bb_strtoi(const char *arg, char **endp, int base) +{ + long v; + char *endptr; + char first; + + if (!endp) endp = &endptr; + *endp = (char*) arg; + + first = (arg[0] != '-' ? arg[0] : arg[1]); + if (!isalnum(first)) return ret_ERANGE(); + + errno = 0; + v = strtol(arg, endp, base); + if (v > INT_MAX) return ret_ERANGE(); + if (v < INT_MIN) return ret_ERANGE(); + return handle_errors(v, endp); +} +#endif diff --git a/probe-busybox/libbb/bind_interface.c b/probe-busybox/libbb/bind_interface.c new file mode 100644 index 00000000..1eb44141 --- /dev/null +++ b/probe-busybox/libbb/bind_interface.c @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2014 RIPE NCC + * Licensed under GPLv2 or later, see file LICENSE in this tarball for details. + */ + +#include "libbb.h" + +int bind_interface(int socket, int af, char *name) +{ + struct sockaddr_storage sa; + + memset(&sa, '\0', sizeof(sa)); + + if (af == AF_INET) + { + sa.ss_family= AF_INET; + if (inet_pton(af, name, + &((struct sockaddr_in *)&sa)->sin_addr) == 1) + { + return bind(socket, (struct sockaddr *)&sa, + sizeof(sa)); + } + } + else + { + sa.ss_family= AF_INET6; + if (inet_pton(af, name, + &((struct sockaddr_in6 *)&sa)->sin6_addr) == 1) + { + return bind(socket, (struct sockaddr *)&sa, + sizeof(sa)); + } + } + if (setsockopt(socket, SOL_SOCKET, SO_BINDTODEVICE, name, + strlen(name)+1) == -1) + { + return -1; + } + + return 0; +} + diff --git a/probe-busybox/libbb/common_bufsiz.c b/probe-busybox/libbb/common_bufsiz.c new file mode 100644 index 00000000..2847eb57 --- /dev/null +++ b/probe-busybox/libbb/common_bufsiz.c @@ -0,0 +1,83 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 2016 Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +//config:config FEATURE_USE_BSS_TAIL +//config: bool "Use the end of BSS page" +//config: default n +//config: help +//config: Attempt to reclaim a small unused part of BSS. +//config: +//config: Executables have the following parts: +//config: = read-only executable code and constants, also known as "text" +//config: = read-write data +//config: = non-initialized (zeroed on demand) data, also known as "bss" +//config: +//config: At link time, "text" is padded to a full page. At runtime, all "text" +//config: pages are mapped RO and executable. +//config: "Data" starts on the next page boundary, but is not padded +//config: to a full page at the end. "Bss" starts wherever "data" ends. +//config: At runtime, "data" pages are mapped RW and they are file-backed +//config: (this includes a small portion of "bss" which may live in the last +//config: partial page of "data"). +//config: Pages which are fully in "bss" are mapped to anonymous memory. +//config: +//config: "Bss" end is usually not page-aligned. There is an unused space +//config: in the last page. Linker marks its start with the "_end" symbol. +//config: +//config: This option will attempt to use that space for bb_common_bufsiz1[] +//config: array. If it fits after _end, it will be used, and COMMON_BUFSIZE +//config: will be enlarged from its guaranteed minimum size of 1 kbyte. +//config: This may require recompilation a second time, since value of _end +//config: is known only after final link. +//config: +//config: If you are getting a build error like this: +//config: appletlib.c:(.text.main+0xd): undefined reference to '_end' +//config: disable this option. + +//kbuild:lib-y += common_bufsiz.o + +#include "libbb.h" +#include "common_bufsiz.h" + +#if !ENABLE_FEATURE_USE_BSS_TAIL + +/* We use it for "global" data via *(struct global*)bb_common_bufsiz1. + * Since gcc insists on aligning struct global's members, it would be a pity + * (and an alignment fault on some CPUs) to mess it up. */ +char bb_common_bufsiz1[COMMON_BUFSIZE] ALIGNED(sizeof(long long)); + +#else + +# ifndef setup_common_bufsiz +/* For now, this is never used: + * scripts/generate_BUFSIZ.sh never generates "malloced" bufsiz1: + * enum { COMMON_BUFSIZE = 1024 }; + * extern char *const bb_common_bufsiz1; + * void setup_common_bufsiz(void); + * This has proved to be worse than the approach of defining + * larger bb_common_bufsiz1[] array. + */ + +/* + * It is not defined as a dummy macro. + * It means we have to provide this function. + */ +char *const bb_common_bufsiz1 __attribute__ ((section (".data"))); +void setup_common_bufsiz(void) +{ + if (!bb_common_bufsiz1) + *(char**)&bb_common_bufsiz1 = xzalloc(COMMON_BUFSIZE); +} +# else +# ifndef bb_common_bufsiz1 + /* bb_common_bufsiz1[] is not aliased to _end[] */ +char bb_common_bufsiz1[COMMON_BUFSIZE] ALIGNED(sizeof(long long)); +# endif +# endif + +#endif diff --git a/probe-busybox/libbb/compare_string_array.c b/probe-busybox/libbb/compare_string_array.c new file mode 100644 index 00000000..2f51237a --- /dev/null +++ b/probe-busybox/libbb/compare_string_array.c @@ -0,0 +1,171 @@ +/* vi: set sw=4 ts=4: */ +/* + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* + * Return NULL if string is not prefixed with key. Return pointer to the + * first character in string after the prefix key. If key is an empty string, + * return pointer to the beginning of string. + */ +char* FAST_FUNC is_prefixed_with(const char *string, const char *key) +{ +#if 0 /* Two passes over key - probably slower */ + int len = strlen(key); + if (strncmp(string, key, len) == 0) + return string + len; + return NULL; +#else /* Open-coded */ + while (*key != '\0') { + if (*key != *string) + return NULL; + key++; + string++; + } + return (char*)string; +#endif +} + +/* + * Return NULL if string is not suffixed with key. Return pointer to the + * beginning of prefix key in string. If key is an empty string return pointer + * to the end of string. + */ +char* FAST_FUNC is_suffixed_with(const char *string, const char *key) +{ + size_t key_len = strlen(key); + ssize_t len_diff = strlen(string) - key_len; + + if (len_diff >= 0) { + string += len_diff; + if (strcmp(string, key) == 0) { + return (char*)string; + } + } + + return NULL; +} + +/* returns the array index of the string */ +/* (index of first match is returned, or -1) */ +int FAST_FUNC index_in_str_array(const char *const string_array[], const char *key) +{ + int i; + + for (i = 0; string_array[i] != 0; i++) { + if (strcmp(string_array[i], key) == 0) { + return i; + } + } + return -1; +} + +int FAST_FUNC index_in_strings(const char *strings, const char *key) +{ + int idx = 0; + + while (*strings) { + if (strcmp(strings, key) == 0) { + return idx; + } + strings += strlen(strings) + 1; /* skip NUL */ + idx++; + } + return -1; +} + +/* returns the array index of the string, even if it matches only a beginning */ +/* (index of first match is returned, or -1) */ +#ifdef UNUSED +int FAST_FUNC index_in_substr_array(const char *const string_array[], const char *key) +{ + int i; + if (key[0]) { + for (i = 0; string_array[i] != 0; i++) { + if (is_prefixed_with(string_array[i], key)) { + return i; + } + } + } + return -1; +} +#endif + +int FAST_FUNC index_in_substrings(const char *strings, const char *key) +{ + int matched_idx = -1; + const int len = strlen(key); + + if (len) { + int idx = 0; + while (*strings) { + if (strncmp(strings, key, len) == 0) { + if (strings[len] == '\0') + return idx; /* exact match */ + if (matched_idx >= 0) + return -1; /* ambiguous match */ + matched_idx = idx; + } + strings += strlen(strings) + 1; /* skip NUL */ + idx++; + } + } + return matched_idx; +} + +const char* FAST_FUNC nth_string(const char *strings, int n) +{ + while (n) { + n--; + strings += strlen(strings) + 1; + } + return strings; +} + +#ifdef UNUSED_SO_FAR /* only brctl.c needs it yet */ +/* Returns 0 for no, 1 for yes or a negative value on error. */ +smallint FAST_FUNC yesno(const char *str) +{ + static const char no_yes[] ALIGN1 = + "0\0" "off\0" "no\0" + "1\0" "on\0" "yes\0"; + int ret = index_in_substrings(no_yes, str); + return ret / 3; +} +#endif + +#if ENABLE_UNIT_TEST + +BBUNIT_DEFINE_TEST(is_prefixed_with) +{ + BBUNIT_ASSERT_STREQ(" bar", is_prefixed_with("foo bar", "foo")); + BBUNIT_ASSERT_STREQ("bar", is_prefixed_with("foo bar", "foo ")); + BBUNIT_ASSERT_STREQ("", is_prefixed_with("foo", "foo")); + BBUNIT_ASSERT_STREQ("foo", is_prefixed_with("foo", "")); + BBUNIT_ASSERT_STREQ("", is_prefixed_with("", "")); + + BBUNIT_ASSERT_NULL(is_prefixed_with("foo", "bar foo")); + BBUNIT_ASSERT_NULL(is_prefixed_with("foo foo", "bar")); + BBUNIT_ASSERT_NULL(is_prefixed_with("", "foo")); + + BBUNIT_ENDTEST; +} + +BBUNIT_DEFINE_TEST(is_suffixed_with) +{ + BBUNIT_ASSERT_STREQ("bar", is_suffixed_with("foo bar", "bar")); + BBUNIT_ASSERT_STREQ("foo", is_suffixed_with("foo", "foo")); + BBUNIT_ASSERT_STREQ("", is_suffixed_with("foo", "")); + BBUNIT_ASSERT_STREQ("", is_suffixed_with("", "")); + BBUNIT_ASSERT_STREQ("foo", is_suffixed_with("barfoofoo", "foo")); + + BBUNIT_ASSERT_NULL(is_suffixed_with("foo", "bar foo")); + BBUNIT_ASSERT_NULL(is_suffixed_with("foo foo", "bar")); + BBUNIT_ASSERT_NULL(is_suffixed_with("", "foo")); + + BBUNIT_ENDTEST; +} + +#endif /* ENABLE_UNIT_TEST */ diff --git a/probe-busybox/libbb/concat_path_file.c b/probe-busybox/libbb/concat_path_file.c new file mode 100644 index 00000000..9ed29599 --- /dev/null +++ b/probe-busybox/libbb/concat_path_file.c @@ -0,0 +1,29 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) many different people. + * If you wrote this, please acknowledge your work. + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +/* Concatenate path and filename to new allocated buffer. + * Add '/' only as needed (no duplicate // are produced). + * If path is NULL, it is assumed to be "/". + * filename should not be NULL. + */ + +#include "libbb.h" + +char* FAST_FUNC concat_path_file(const char *path, const char *filename) +{ + char *lc; + + if (!path) + path = ""; + lc = last_char_is(path, '/'); + while (*filename == '/') + filename++; + return xasprintf("%s%s%s", path, (lc==NULL ? "/" : ""), filename); +} diff --git a/probe-busybox/libbb/copyfd.c b/probe-busybox/libbb/copyfd.c new file mode 100644 index 00000000..7e353190 --- /dev/null +++ b/probe-busybox/libbb/copyfd.c @@ -0,0 +1,160 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2005 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#if ENABLE_FEATURE_USE_SENDFILE +# include +#else +# define sendfile(a,b,c,d) (-1) +#endif + +/* + * We were using 0x7fff0000 as sendfile chunk size, but it + * was seen to cause largish delays when user tries to ^C a file copy. + * Let's use a saner size. + * Note: needs to be >= max(CONFIG_FEATURE_COPYBUF_KB), + * or else "copy to eof" code will use neddlesly short reads. + */ +#define SENDFILE_BIGBUF (16*1024*1024) + +/* Used by NOFORK applets (e.g. cat) - must not use xmalloc. + * size < 0 means "ignore write errors", used by tar --to-command + * size = 0 means "copy till EOF" + */ +static off_t bb_full_fd_action(int src_fd, int dst_fd, off_t size) +{ + int status = -1; + off_t total = 0; + bool continue_on_write_error = 0; + ssize_t sendfile_sz; +#if CONFIG_FEATURE_COPYBUF_KB > 4 + char *buffer = buffer; /* for compiler */ + int buffer_size = 0; +#else + char buffer[CONFIG_FEATURE_COPYBUF_KB * 1024]; + enum { buffer_size = sizeof(buffer) }; +#endif + + if (size < 0) { + size = -size; + continue_on_write_error = 1; + } + + if (src_fd < 0) + goto out; + + sendfile_sz = !ENABLE_FEATURE_USE_SENDFILE + ? 0 + : SENDFILE_BIGBUF; + if (!size) { + size = SENDFILE_BIGBUF; + status = 1; /* copy until eof */ + } + + while (1) { + ssize_t rd; + + if (sendfile_sz) { + rd = sendfile(dst_fd, src_fd, NULL, + size > sendfile_sz ? sendfile_sz : size); + if (rd >= 0) + goto read_ok; + sendfile_sz = 0; /* do not try sendfile anymore */ + } +#if CONFIG_FEATURE_COPYBUF_KB > 4 + if (buffer_size == 0) { + if (size > 0 && size <= 4 * 1024) + goto use_small_buf; + /* We want page-aligned buffer, just in case kernel is clever + * and can do page-aligned io more efficiently */ + buffer = mmap(NULL, CONFIG_FEATURE_COPYBUF_KB * 1024, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON, + /* ignored: */ -1, 0); + buffer_size = CONFIG_FEATURE_COPYBUF_KB * 1024; + if (buffer == MAP_FAILED) { + use_small_buf: + buffer = alloca(4 * 1024); + buffer_size = 4 * 1024; + } + } +#endif + rd = safe_read(src_fd, buffer, + size > buffer_size ? buffer_size : size); + if (rd < 0) { + bb_perror_msg(bb_msg_read_error); + break; + } + read_ok: + if (!rd) { /* eof - all done */ + status = 0; + break; + } + /* dst_fd == -1 is a fake, else... */ + if (dst_fd >= 0 && !sendfile_sz) { + ssize_t wr = full_write(dst_fd, buffer, rd); + if (wr < rd) { + if (!continue_on_write_error) { + bb_perror_msg(bb_msg_write_error); + break; + } + dst_fd = -1; + } + } + total += rd; + if (status < 0) { /* if we aren't copying till EOF... */ + size -= rd; + if (!size) { + /* 'size' bytes copied - all done */ + status = 0; + break; + } + } + } + out: + + if (buffer_size > 4 * 1024) + munmap(buffer, buffer_size); + return status ? -1 : total; +} + + +#if 0 +void FAST_FUNC complain_copyfd_and_die(off_t sz) +{ + if (sz != -1) + bb_error_msg_and_die("short read"); + /* if sz == -1, bb_copyfd_XX already complained */ + xfunc_die(); +} +#endif + +off_t FAST_FUNC bb_copyfd_size(int fd1, int fd2, off_t size) +{ + if (size) { + return bb_full_fd_action(fd1, fd2, size); + } + return 0; +} + +void FAST_FUNC bb_copyfd_exact_size(int fd1, int fd2, off_t size) +{ + off_t sz = bb_copyfd_size(fd1, fd2, size); + if (sz == (size >= 0 ? size : -size)) + return; + if (sz != -1) + bb_error_msg_and_die("short read"); + /* if sz == -1, bb_copyfd_XX already complained */ + xfunc_die(); +} + +off_t FAST_FUNC bb_copyfd_eof(int fd1, int fd2) +{ + return bb_full_fd_action(fd1, fd2, 0); +} diff --git a/probe-busybox/libbb/crc32.c b/probe-busybox/libbb/crc32.c new file mode 100644 index 00000000..ac9836cc --- /dev/null +++ b/probe-busybox/libbb/crc32.c @@ -0,0 +1,66 @@ +/* vi: set sw=4 ts=4: */ +/* + * CRC32 table fill function + * Copyright (C) 2006 by Rob Sullivan + * (I can't really claim much credit however, as the algorithm is + * very well-known) + * + * The following function creates a CRC32 table depending on whether + * a big-endian (0x04c11db7) or little-endian (0xedb88320) CRC32 is + * required. Admittedly, there are other CRC32 polynomials floating + * around, but Busybox doesn't use them. + * + * endian = 1: big-endian + * endian = 0: little-endian + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +uint32_t *global_crc32_table; + +uint32_t* FAST_FUNC crc32_filltable(uint32_t *crc_table, int endian) +{ + uint32_t polynomial = endian ? 0x04c11db7 : 0xedb88320; + uint32_t c; + int i, j; + + if (!crc_table) + crc_table = xmalloc(256 * sizeof(uint32_t)); + + for (i = 0; i < 256; i++) { + c = endian ? (i << 24) : i; + for (j = 8; j; j--) { + if (endian) + c = (c&0x80000000) ? ((c << 1) ^ polynomial) : (c << 1); + else + c = (c&1) ? ((c >> 1) ^ polynomial) : (c >> 1); + } + *crc_table++ = c; + } + + return crc_table - 256; +} + +uint32_t FAST_FUNC crc32_block_endian1(uint32_t val, const void *buf, unsigned len, uint32_t *crc_table) +{ + const void *end = (uint8_t*)buf + len; + + while (buf != end) { + val = (val << 8) ^ crc_table[(val >> 24) ^ *(uint8_t*)buf]; + buf = (uint8_t*)buf + 1; + } + return val; +} + +uint32_t FAST_FUNC crc32_block_endian0(uint32_t val, const void *buf, unsigned len, uint32_t *crc_table) +{ + const void *end = (uint8_t*)buf + len; + + while (buf != end) { + val = crc_table[(uint8_t)val ^ *(uint8_t*)buf] ^ (val >> 8); + buf = (uint8_t*)buf + 1; + } + return val; +} diff --git a/probe-busybox/libbb/default_error_retval.c b/probe-busybox/libbb/default_error_retval.c new file mode 100644 index 00000000..4f6395fa --- /dev/null +++ b/probe-busybox/libbb/default_error_retval.c @@ -0,0 +1,18 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright (C) 2003 Manuel Novoa III + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +/* Seems silly to copyright a global variable. ;-) Oh well. + * + * At least one applet (cmp) returns a value different from the typical + * EXIT_FAILURE values (1) when an error occurs. So, make it configurable + * by the applet. I suppose we could use a wrapper function to set it, but + * that too seems silly. + */ + +#include "libbb.h" + +uint8_t xfunc_error_retval = EXIT_FAILURE; diff --git a/probe-busybox/libbb/full_write.c b/probe-busybox/libbb/full_write.c new file mode 100644 index 00000000..777fbd91 --- /dev/null +++ b/probe-busybox/libbb/full_write.c @@ -0,0 +1,42 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* + * Write all of the supplied buffer out to a file. + * This does multiple writes as necessary. + * Returns the amount written, or -1 on an error. + */ +ssize_t FAST_FUNC full_write(int fd, const void *buf, size_t len) +{ + ssize_t cc; + ssize_t total; + + total = 0; + + while (len) { + cc = safe_write(fd, buf, len); + + if (cc < 0) { + if (total) { + /* we already wrote some! */ + /* user can do another write to know the error code */ + return total; + } + return cc; /* write() returns -1 on failure. */ + } + + total += cc; + buf = ((const char *)buf) + cc; + len -= cc; + } + + return total; +} diff --git a/probe-busybox/libbb/get_last_path_component.c b/probe-busybox/libbb/get_last_path_component.c new file mode 100644 index 00000000..04fdf2a3 --- /dev/null +++ b/probe-busybox/libbb/get_last_path_component.c @@ -0,0 +1,50 @@ +/* vi: set sw=4 ts=4: */ +/* + * bb_get_last_path_component implementation for busybox + * + * Copyright (C) 2001 Manuel Novoa III + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#include "libbb.h" + +const char* FAST_FUNC bb_basename(const char *name) +{ + const char *cp = strrchr(name, '/'); + if (cp) + return cp + 1; + return name; +} + +/* + * "/" -> "/" + * "abc" -> "abc" + * "abc/def" -> "def" + * "abc/def/" -> "" + */ +char* FAST_FUNC bb_get_last_path_component_nostrip(const char *path) +{ + char *slash = strrchr(path, '/'); + + if (!slash || (slash == path && !slash[1])) + return (char*)path; + + return slash + 1; +} + +/* + * "/" -> "/" + * "abc" -> "abc" + * "abc/def" -> "def" + * "abc/def/" -> "def" !! + */ +char* FAST_FUNC bb_get_last_path_component_strip(char *path) +{ + char *slash = last_char_is(path, '/'); + + if (slash) + while (*slash == '/' && slash != path) + *slash-- = '\0'; + + return bb_get_last_path_component_nostrip(path); +} diff --git a/probe-busybox/libbb/getopt32.c b/probe-busybox/libbb/getopt32.c new file mode 100644 index 00000000..15b6efc0 --- /dev/null +++ b/probe-busybox/libbb/getopt32.c @@ -0,0 +1,651 @@ +/* vi: set sw=4 ts=4: */ +/* + * universal getopt32 implementation for busybox + * + * Copyright (C) 2003-2005 Vladimir Oleynik + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#if ENABLE_LONG_OPTS || ENABLE_FEATURE_GETOPT_LONG +# include +#endif +#include "libbb.h" + +/* Documentation + +uint32_t +getopt32(char **argv, const char *applet_opts, ...) + + The command line options are passed as the applet_opts string. + + If one of the given options is found, a flag value is added to + the return value. + + The flag value is determined by the position of the char in + applet_opts string. For example: + + flags = getopt32(argv, "rnug"); + + "r" will set 1 (bit 0) + "n" will set 2 (bit 1) + "u" will set 4 (bit 2) + "g" will set 8 (bit 3) + + and so on. You can also look at the return value as a bit + field and each option sets one bit. + + On exit, global variable optind is set so that if you + will do argc -= optind; argv += optind; then + argc will be equal to number of remaining non-option + arguments, first one would be in argv[0], next in argv[1] and so on + (options and their parameters will be moved into argv[] + positions prior to argv[optind]). + + "o:" If one of the options requires an argument, then add a ":" + after the char in applet_opts and provide a pointer to store + the argument. For example: + + char *pointer_to_arg_for_a; + char *pointer_to_arg_for_b; + char *pointer_to_arg_for_c; + char *pointer_to_arg_for_d; + + flags = getopt32(argv, "a:b:c:d:", + &pointer_to_arg_for_a, &pointer_to_arg_for_b, + &pointer_to_arg_for_c, &pointer_to_arg_for_d); + + The type of the pointer may be controlled by "o::" or "o+" in + the external string opt_complementary (see below for more info). + + "o::" If option can have an *optional* argument, then add a "::" + after its char in applet_opts and provide a pointer to store + the argument. Note that optional arguments _must_ + immediately follow the option: -oparam, not -o param. + + "o:+" This means that the parameter for this option is a nonnegative integer. + It will be processed with xatoi_positive() - allowed range + is 0..INT_MAX. + + int param; // "unsigned param;" will also work + getopt32(argv, "p:+", ¶m); + + "o:*" This means that the option can occur multiple times. Each occurrence + will be saved as a llist_t element instead of char*. + + For example: + The grep applet can have one or more "-e pattern" arguments. + In this case you should use getopt32() as follows: + + llist_t *patterns = NULL; + + (this pointer must be initializated to NULL if the list is empty + as required by llist_add_to_end(llist_t **old_head, char *new_item).) + + getopt32(argv, "e:*", &patterns); + + $ grep -e user -e root /etc/passwd + root:x:0:0:root:/root:/bin/bash + user:x:500:500::/home/user:/bin/bash + + "+" If the first character in the applet_opts string is a plus, + then option processing will stop as soon as a non-option is + encountered in the argv array. Useful for applets like env + which should not process arguments to subprograms: + env -i ls -d / + Here we want env to process just the '-i', not the '-d'. + + "!" Report bad option, missing required options, + inconsistent options with all-ones return value (instead of abort). + +const char *applet_long_options + + This struct allows you to define long options: + + static const char applet_longopts[] ALIGN1 = + //"name\0" has_arg val + "verbose\0" No_argument "v" + ; + applet_long_options = applet_longopts; + + The last member of struct option (val) typically is set to + matching short option from applet_opts. If there is no matching + char in applet_opts, then: + - return bit has next position after short options + - if has_arg is not "No_argument", use ptr for arg also + - opt_complementary affects it too + + Note: a good applet will make long options configurable via the + config process and not a required feature. The current standard + is to name the config option CONFIG_FEATURE__LONG_OPTIONS. + +const char *opt_complementary + + ":" The colon (":") is used to separate groups of two or more chars + and/or groups of chars and special characters (stating some + conditions to be checked). + + "abc" If groups of two or more chars are specified, the first char + is the main option and the other chars are secondary options. + Their flags will be turned on if the main option is found even + if they are not specifed on the command line. For example: + + opt_complementary = "abc"; + flags = getopt32(argv, "abcd") + + If getopt() finds "-a" on the command line, then + getopt32's return value will be as if "-a -b -c" were + found. + + "ww" Adjacent double options have a counter associated which indicates + the number of occurrences of the option. + For example the ps applet needs: + if w is given once, GNU ps sets the width to 132, + if w is given more than once, it is "unlimited" + + int w_counter = 0; // must be initialized! + opt_complementary = "ww"; + getopt32(argv, "w", &w_counter); + if (w_counter) + width = (w_counter == 1) ? 132 : INT_MAX; + else + get_terminal_width(...&width...); + + w_counter is a pointer to an integer. It has to be passed to + getopt32() after all other option argument sinks. + + For example: accept multiple -v to indicate the level of verbosity + and for each -b optarg, add optarg to my_b. Finally, if b is given, + turn off c and vice versa: + + llist_t *my_b = NULL; + int verbose_level = 0; + opt_complementary = "vv:b-c:c-b"; + f = getopt32(argv, "vb:*c", &my_b, &verbose_level); + if (f & 2) // -c after -b unsets -b flag + while (my_b) dosomething_with(llist_pop(&my_b)); + if (my_b) // but llist is stored if -b is specified + free_llist(my_b); + if (verbose_level) printf("verbose level is %d\n", verbose_level); + +Special characters: + + "-" A group consisting of just a dash forces all arguments + to be treated as options, even if they have no leading dashes. + Next char in this case can't be a digit (0-9), use ':' or end of line. + Example: + + opt_complementary = "-:w-x:x-w"; // "-w-x:x-w" would also work, + getopt32(argv, "wx"); // but is less readable + + This makes it possible to use options without a dash (./program w x) + as well as with a dash (./program -x). + + NB: getopt32() will leak a small amount of memory if you use + this option! Do not use it if there is a possibility of recursive + getopt32() calls. + + "--" A double dash at the beginning of opt_complementary means the + argv[1] string should always be treated as options, even if it isn't + prefixed with a "-". This is useful for special syntax in applets + such as "ar" and "tar": + tar xvf foo.tar + + NB: getopt32() will leak a small amount of memory if you use + this option! Do not use it if there is a possibility of recursive + getopt32() calls. + + "-N" A dash as the first char in a opt_complementary group followed + by a single digit (0-9) means that at least N non-option + arguments must be present on the command line + + "=N" An equal sign as the first char in a opt_complementary group followed + by a single digit (0-9) means that exactly N non-option + arguments must be present on the command line + + "?N" A "?" as the first char in a opt_complementary group followed + by a single digit (0-9) means that at most N arguments must be present + on the command line. + + "V-" An option with dash before colon or end-of-line results in + bb_show_usage() being called if this option is encountered. + This is typically used to implement "print verbose usage message + and exit" option. + + "a-b" A dash between two options causes the second of the two + to be unset (and ignored) if it is given on the command line. + + [FIXME: what if they are the same? like "x-x"? Is it ever useful?] + + For example: + The du applet has the options "-s" and "-d depth". If + getopt32 finds -s, then -d is unset or if it finds -d + then -s is unset. (Note: busybox implements the GNU + "--max-depth" option as "-d".) To obtain this behavior, you + set opt_complementary = "s-d:d-s". Only one flag value is + added to getopt32's return value depending on the + position of the options on the command line. If one of the + two options requires an argument pointer (":" in applet_opts + as in "d:") optarg is set accordingly. + + char *smax_print_depth; + + opt_complementary = "s-d:d-s:x-x"; + opt = getopt32(argv, "sd:x", &smax_print_depth); + + if (opt & 2) + max_print_depth = atoi(smax_print_depth); + if (opt & 4) + printf("Detected odd -x usage\n"); + + "a--b" A double dash between two options, or between an option and a group + of options, means that they are mutually exclusive. Unlike + the "-" case above, an error will be forced if the options + are used together. + + For example: + The cut applet must have only one type of list specified, so + -b, -c and -f are mutually exclusive and should raise an error + if specified together. In this case you must set + opt_complementary = "b--cf:c--bf:f--bc". If two of the + mutually exclusive options are found, getopt32 will call + bb_show_usage() and die. + + "x--x" Variation of the above, it means that -x option should occur + at most once. + + "o+" A plus after a char in opt_complementary means that the parameter + for this option is a nonnegative integer. It will be processed + with xatoi_positive() - allowed range is 0..INT_MAX. + + int param; // "unsigned param;" will also work + opt_complementary = "p+"; + getopt32(argv, "p:", ¶m); + + "o::" A double colon after a char in opt_complementary means that the + option can occur multiple times. Each occurrence will be saved as + a llist_t element instead of char*. + + For example: + The grep applet can have one or more "-e pattern" arguments. + In this case you should use getopt32() as follows: + + llist_t *patterns = NULL; + + (this pointer must be initializated to NULL if the list is empty + as required by llist_add_to_end(llist_t **old_head, char *new_item).) + + opt_complementary = "e::"; + getopt32(argv, "e:", &patterns); + + $ grep -e user -e root /etc/passwd + root:x:0:0:root:/root:/bin/bash + user:x:500:500::/home/user:/bin/bash + + "o+" and "o::" can be handled by "o:+" and "o:*" specifiers + in option string (and it is preferred), but this does not work + for "long options only" cases, such as tar --exclude=PATTERN, + wget --header=HDR cases. + + "a?b" A "?" between an option and a group of options means that + at least one of them is required to occur if the first option + occurs in preceding command line arguments. + + For example from "id" applet: + + // Don't allow -n -r -rn -ug -rug -nug -rnug + opt_complementary = "r?ug:n?ug:u--g:g--u"; + flags = getopt32(argv, "rnug"); + + This example allowed only: + $ id; id -u; id -g; id -ru; id -nu; id -rg; id -ng; id -rnu; id -rng + + "X" A opt_complementary group with just a single letter means + that this option is required. If more than one such group exists, + at least one option is required to occur (not all of them). + For example from "start-stop-daemon" applet: + + // Don't allow -KS -SK, but -S or -K is required + opt_complementary = "K:S:K--S:S--K"; + flags = getopt32(argv, "KS...); + + + Don't forget to use ':'. For example, "?322-22-23X-x-a" + is interpreted as "?3:22:-2:2-2:2-3Xa:2--x" - + max 3 args; count uses of '-2'; min 2 args; if there is + a '-2' option then unset '-3', '-X' and '-a'; if there is + a '-2' and after it a '-x' then error out. + But it's far too obfuscated. Use ':' to separate groups. +*/ + +/* Code here assumes that 'unsigned' is at least 32 bits wide */ + +const char *const bb_argv_dash[] = { "-", NULL }; + +const char *opt_complementary; + +enum { + PARAM_STRING, + PARAM_LIST, + PARAM_INT, +}; + +typedef struct { + unsigned char opt_char; + smallint param_type; + unsigned switch_on; + unsigned switch_off; + unsigned incongruously; + unsigned requires; + void **optarg; /* char**, llist_t** or int *. */ + int *counter; +} t_complementary; + +/* You can set applet_long_options for parse called long options */ +#if ENABLE_LONG_OPTS || ENABLE_FEATURE_GETOPT_LONG +static const struct option bb_null_long_options[1] = { + { 0, 0, 0, 0 } +}; +const char *applet_long_options; +#endif + +uint32_t option_mask32; + +uint32_t FAST_FUNC +getopt32(char **argv, const char *applet_opts, ...) +{ + int argc; + unsigned flags = 0; + unsigned requires = 0; + t_complementary complementary[33]; /* last stays zero-filled */ + char first_char; + int c; + const unsigned char *s; + t_complementary *on_off; + va_list p; +#if ENABLE_LONG_OPTS || ENABLE_FEATURE_GETOPT_LONG + const struct option *l_o; + struct option *long_options = (struct option *) &bb_null_long_options; +#endif + unsigned trigger; + char **pargv; + int min_arg = 0; + int max_arg = -1; + +#define SHOW_USAGE_IF_ERROR 1 +#define ALL_ARGV_IS_OPTS 2 +#define FIRST_ARGV_IS_OPT 4 + + int spec_flgs = 0; + + /* skip 0: some applets cheat: they do not actually HAVE argv[0] */ + argc = 1; + while (argv[argc]) + argc++; + + va_start(p, applet_opts); + + on_off = complementary; + memset(on_off, 0, sizeof(complementary)); + + applet_opts = strcpy(alloca(strlen(applet_opts) + 1), applet_opts); + + /* skip bbox extension */ + first_char = applet_opts[0]; + if (first_char == '!') + applet_opts++; + + /* skip GNU extension */ + s = (const unsigned char *)applet_opts; + if (*s == '+' || *s == '-') + s++; + c = 0; + while (*s) { + if (c >= 32) + break; + on_off->opt_char = *s; + on_off->switch_on = (1 << c); + if (*++s == ':') { + on_off->optarg = va_arg(p, void **); + if (s[1] == '+' || s[1] == '*') { + /* 'o:+' or 'o:*' */ + on_off->param_type = (s[1] == '+') ? + PARAM_INT : PARAM_LIST; + overlapping_strcpy((char*)s + 1, (char*)s + 2); + } + /* skip possible 'o::' (or 'o:+:' !) */ + while (*++s == ':') + continue; + } + on_off++; + c++; + } + +#if ENABLE_LONG_OPTS || ENABLE_FEATURE_GETOPT_LONG + if (applet_long_options) { + const char *optstr; + unsigned i, count; + + count = 1; + optstr = applet_long_options; + while (optstr[0]) { + optstr += strlen(optstr) + 3; /* skip NUL, has_arg, val */ + count++; + } + /* count == no. of longopts + 1 */ + long_options = alloca(count * sizeof(*long_options)); + memset(long_options, 0, count * sizeof(*long_options)); + i = 0; + optstr = applet_long_options; + while (--count) { + long_options[i].name = optstr; + optstr += strlen(optstr) + 1; + long_options[i].has_arg = (unsigned char)(*optstr++); + /* long_options[i].flag = NULL; */ + long_options[i].val = (unsigned char)(*optstr++); + i++; + } + for (l_o = long_options; l_o->name; l_o++) { + if (l_o->flag) + continue; + for (on_off = complementary; on_off->opt_char; on_off++) + if (on_off->opt_char == l_o->val) + goto next_long; + if (c >= 32) + break; + on_off->opt_char = l_o->val; + on_off->switch_on = (1 << c); + if (l_o->has_arg != no_argument) + on_off->optarg = va_arg(p, void **); + c++; + next_long: ; + } + /* Make it unnecessary to clear applet_long_options + * by hand after each call to getopt32 + */ + applet_long_options = NULL; + } +#endif /* ENABLE_LONG_OPTS || ENABLE_FEATURE_GETOPT_LONG */ + + for (s = (const unsigned char *)opt_complementary; s && *s; s++) { + t_complementary *pair; + unsigned *pair_switch; + + if (*s == ':') + continue; + c = s[1]; + if (*s == '?') { + if (c < '0' || c > '9') { + spec_flgs |= SHOW_USAGE_IF_ERROR; + } else { + max_arg = c - '0'; + s++; + } + continue; + } + if (*s == '-') { + if (c < '0' || c > '9') { + if (c == '-') { + spec_flgs |= FIRST_ARGV_IS_OPT; + s++; + } else + spec_flgs |= ALL_ARGV_IS_OPTS; + } else { + min_arg = c - '0'; + s++; + } + continue; + } + if (*s == '=') { + min_arg = max_arg = c - '0'; + s++; + continue; + } + for (on_off = complementary; on_off->opt_char; on_off++) + if (on_off->opt_char == *s) + goto found_opt; + /* Without this, diagnostic of such bugs is not easy */ + bb_error_msg_and_die("NO OPT %c!", *s); + found_opt: + if (c == ':' && s[2] == ':') { + on_off->param_type = PARAM_LIST; + continue; + } + if (c == '+' && (s[2] == ':' || s[2] == '\0')) { + on_off->param_type = PARAM_INT; + s++; + continue; + } + if (c == ':' || c == '\0') { + requires |= on_off->switch_on; + continue; + } + if (c == '-' && (s[2] == ':' || s[2] == '\0')) { + flags |= on_off->switch_on; + on_off->incongruously |= on_off->switch_on; + s++; + continue; + } + if (c == *s) { + on_off->counter = va_arg(p, int *); + s++; + } + pair = on_off; + pair_switch = &pair->switch_on; + for (s++; *s && *s != ':'; s++) { + if (*s == '?') { + pair_switch = &pair->requires; + } else if (*s == '-') { + if (pair_switch == &pair->switch_off) + pair_switch = &pair->incongruously; + else + pair_switch = &pair->switch_off; + } else { + for (on_off = complementary; on_off->opt_char; on_off++) + if (on_off->opt_char == *s) { + *pair_switch |= on_off->switch_on; + break; + } + } + } + s--; + } + opt_complementary = NULL; + va_end(p); + + if (spec_flgs & (FIRST_ARGV_IS_OPT | ALL_ARGV_IS_OPTS)) { + pargv = argv + 1; + while (*pargv) { + if (pargv[0][0] != '-' && pargv[0][0] != '\0') { + /* Can't use alloca: opts with params will + * return pointers to stack! + * NB: we leak these allocations... */ + char *pp = xmalloc(strlen(*pargv) + 2); + *pp = '-'; + strcpy(pp + 1, *pargv); + *pargv = pp; + } + if (!(spec_flgs & ALL_ARGV_IS_OPTS)) + break; + pargv++; + } + } + + /* In case getopt32 was already called: + * reset the libc getopt() function, which keeps internal state. + * run_nofork_applet() does this, but we might end up here + * also via gunzip_main() -> gzip_main(). Play safe. + */ +#ifdef __GLIBC__ + optind = 0; +#else /* BSD style */ + optind = 1; + /* optreset = 1; */ +#endif + /* optarg = NULL; opterr = 0; optopt = 0; - do we need this?? */ + + /* Note: just "getopt() <= 0" will not work well for + * "fake" short options, like this one: + * wget $'-\203' "Test: test" http://kernel.org/ + * (supposed to act as --header, but doesn't) */ +#if ENABLE_LONG_OPTS || ENABLE_FEATURE_GETOPT_LONG + while ((c = getopt_long(argc, argv, applet_opts, + long_options, NULL)) != -1) { +#else + while ((c = getopt(argc, argv, applet_opts)) != -1) { +#endif + /* getopt prints "option requires an argument -- X" + * and returns '?' if an option has no arg, but one is reqd */ + c &= 0xff; /* fight libc's sign extension */ + for (on_off = complementary; on_off->opt_char != c; on_off++) { + /* c can be NUL if long opt has non-NULL ->flag, + * but we construct long opts so that flag + * is always NULL (see above) */ + if (on_off->opt_char == '\0' /* && c != '\0' */) { + /* c is probably '?' - "bad option" */ + goto error; + } + } + if (flags & on_off->incongruously) + goto error; + trigger = on_off->switch_on & on_off->switch_off; + flags &= ~(on_off->switch_off ^ trigger); + flags |= on_off->switch_on ^ trigger; + flags ^= trigger; + if (on_off->counter) + (*(on_off->counter))++; + if (optarg) { + if (on_off->param_type == PARAM_LIST) { + llist_add_to_end((llist_t **)(on_off->optarg), optarg); + } else if (on_off->param_type == PARAM_INT) { +//TODO: xatoi_positive indirectly pulls in printf machinery + *(unsigned*)(on_off->optarg) = xatoi_positive(optarg); + } else if (on_off->optarg) { + *(char **)(on_off->optarg) = optarg; + } + } + } + + /* check depending requires for given options */ + for (on_off = complementary; on_off->opt_char; on_off++) { + if (on_off->requires + && (flags & on_off->switch_on) + && (flags & on_off->requires) == 0 + ) { + goto error; + } + } + if (requires && (flags & requires) == 0) + goto error; + argc -= optind; + if (argc < min_arg || (max_arg >= 0 && argc > max_arg)) + goto error; + + option_mask32 = flags; + return flags; + + error: + if (first_char != '!') + bb_show_usage(); + return (int32_t)-1; +} diff --git a/probe-busybox/libbb/getpty.c b/probe-busybox/libbb/getpty.c new file mode 100644 index 00000000..391d729f --- /dev/null +++ b/probe-busybox/libbb/getpty.c @@ -0,0 +1,66 @@ +/* vi: set sw=4 ts=4: */ +/* + * Mini getpty implementation for busybox + * Bjorn Wesen, Axis Communications AB (bjornw@axis.com) + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +#define DEBUG 0 + +int FAST_FUNC xgetpty(char *line) +{ + int p; + +#if ENABLE_FEATURE_DEVPTS + p = open("/dev/ptmx", O_RDWR); + if (p >= 0) { + grantpt(p); /* chmod+chown corresponding slave pty */ + unlockpt(p); /* (what does this do?) */ +# ifndef HAVE_PTSNAME_R + { + const char *name; + name = ptsname(p); /* find out the name of slave pty */ + if (!name) { + bb_perror_msg_and_die("ptsname error (is /dev/pts mounted?)"); + } + safe_strncpy(line, name, GETPTY_BUFSIZE); + } +# else + /* find out the name of slave pty */ + if (ptsname_r(p, line, GETPTY_BUFSIZE-1) != 0) { + bb_perror_msg_and_die("ptsname error (is /dev/pts mounted?)"); + } + line[GETPTY_BUFSIZE-1] = '\0'; +# endif + return p; + } +#else + struct stat stb; + int i; + int j; + + strcpy(line, "/dev/ptyXX"); + + for (i = 0; i < 16; i++) { + line[8] = "pqrstuvwxyzabcde"[i]; + line[9] = '0'; + if (stat(line, &stb) < 0) { + continue; + } + for (j = 0; j < 16; j++) { + line[9] = j < 10 ? j + '0' : j - 10 + 'a'; + if (DEBUG) + fprintf(stderr, "Trying to open device: %s\n", line); + p = open(line, O_RDWR | O_NOCTTY); + if (p >= 0) { + line[5] = 't'; + return p; + } + } + } +#endif /* FEATURE_DEVPTS */ + bb_error_msg_and_die("can't find free pty"); +} diff --git a/probe-busybox/libbb/hash_md5_sha.c b/probe-busybox/libbb/hash_md5_sha.c new file mode 100644 index 00000000..7e7d8da2 --- /dev/null +++ b/probe-busybox/libbb/hash_md5_sha.c @@ -0,0 +1,1458 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 2010 Denys Vlasenko + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* gcc 4.2.1 optimizes rotr64 better with inline than with macro + * (for rotX32, there is no difference). Why? My guess is that + * macro requires clever common subexpression elimination heuristics + * in gcc, while inline basically forces it to happen. + */ +//#define rotl32(x,n) (((x) << (n)) | ((x) >> (32 - (n)))) +static ALWAYS_INLINE uint32_t rotl32(uint32_t x, unsigned n) +{ + return (x << n) | (x >> (32 - n)); +} +//#define rotr32(x,n) (((x) >> (n)) | ((x) << (32 - (n)))) +static ALWAYS_INLINE uint32_t rotr32(uint32_t x, unsigned n) +{ + return (x >> n) | (x << (32 - n)); +} +/* rotr64 in needed for sha512 only: */ +//#define rotr64(x,n) (((x) >> (n)) | ((x) << (64 - (n)))) +static ALWAYS_INLINE uint64_t rotr64(uint64_t x, unsigned n) +{ + return (x >> n) | (x << (64 - n)); +} + +/* rotl64 only used for sha3 currently */ +static ALWAYS_INLINE uint64_t rotl64(uint64_t x, unsigned n) +{ + return (x << n) | (x >> (64 - n)); +} + +/* Feed data through a temporary buffer. + * The internal buffer remembers previous data until it has 64 + * bytes worth to pass on. + */ +static void FAST_FUNC common64_hash(md5_ctx_t *ctx, const void *buffer, size_t len) +{ + unsigned bufpos = ctx->total64 & 63; + + ctx->total64 += len; + + while (1) { + unsigned remaining = 64 - bufpos; + if (remaining > len) + remaining = len; + /* Copy data into aligned buffer */ + memcpy(ctx->wbuffer + bufpos, buffer, remaining); + len -= remaining; + buffer = (const char *)buffer + remaining; + bufpos += remaining; + /* Clever way to do "if (bufpos != N) break; ... ; bufpos = 0;" */ + bufpos -= 64; + if (bufpos != 0) + break; + /* Buffer is filled up, process it */ + ctx->process_block(ctx); + /*bufpos = 0; - already is */ + } +} + +/* Process the remaining bytes in the buffer */ +static void FAST_FUNC common64_end(md5_ctx_t *ctx, int swap_needed) +{ + unsigned bufpos = ctx->total64 & 63; + /* Pad the buffer to the next 64-byte boundary with 0x80,0,0,0... */ + ctx->wbuffer[bufpos++] = 0x80; + + /* This loop iterates either once or twice, no more, no less */ + while (1) { + unsigned remaining = 64 - bufpos; + memset(ctx->wbuffer + bufpos, 0, remaining); + /* Do we have enough space for the length count? */ + if (remaining >= 8) { + /* Store the 64-bit counter of bits in the buffer */ + uint64_t t = ctx->total64 << 3; + if (swap_needed) + t = bb_bswap_64(t); + /* wbuffer is suitably aligned for this */ + *(bb__aliased_uint64_t *) (&ctx->wbuffer[64 - 8]) = t; + } + ctx->process_block(ctx); + if (remaining >= 8) + break; + bufpos = 0; + } +} + + +/* + * Compute MD5 checksum of strings according to the + * definition of MD5 in RFC 1321 from April 1992. + * + * Written by Ulrich Drepper , 1995. + * + * Copyright (C) 1995-1999 Free Software Foundation, Inc. + * Copyright (C) 2001 Manuel Novoa III + * Copyright (C) 2003 Glenn L. McGrath + * Copyright (C) 2003 Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +/* 0: fastest, 3: smallest */ +#if CONFIG_MD5_SMALL < 0 +# define MD5_SMALL 0 +#elif CONFIG_MD5_SMALL > 3 +# define MD5_SMALL 3 +#else +# define MD5_SMALL CONFIG_MD5_SMALL +#endif + +/* These are the four functions used in the four steps of the MD5 algorithm + * and defined in the RFC 1321. The first function is a little bit optimized + * (as found in Colin Plumbs public domain implementation). + * #define FF(b, c, d) ((b & c) | (~b & d)) + */ +#undef FF +#undef FG +#undef FH +#undef FI +#define FF(b, c, d) (d ^ (b & (c ^ d))) +#define FG(b, c, d) FF(d, b, c) +#define FH(b, c, d) (b ^ c ^ d) +#define FI(b, c, d) (c ^ (b | ~d)) + +/* Hash a single block, 64 bytes long and 4-byte aligned */ +static void FAST_FUNC md5_process_block64(md5_ctx_t *ctx) +{ +#if MD5_SMALL > 0 + /* Before we start, one word to the strange constants. + They are defined in RFC 1321 as + T[i] = (int)(2^32 * fabs(sin(i))), i=1..64 + */ + static const uint32_t C_array[] = { + /* round 1 */ + 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, + 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, + 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, + 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, + /* round 2 */ + 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, + 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, + 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, + 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, + /* round 3 */ + 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, + 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, + 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x4881d05, + 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, + /* round 4 */ + 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, + 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, + 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, + 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391 + }; + static const char P_array[] ALIGN1 = { +# if MD5_SMALL > 1 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 1 */ +# endif + 1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, /* 2 */ + 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2, /* 3 */ + 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9 /* 4 */ + }; +#endif + uint32_t *words = (void*) ctx->wbuffer; + uint32_t A = ctx->hash[0]; + uint32_t B = ctx->hash[1]; + uint32_t C = ctx->hash[2]; + uint32_t D = ctx->hash[3]; + +#if MD5_SMALL >= 2 /* 2 or 3 */ + + static const char S_array[] ALIGN1 = { + 7, 12, 17, 22, + 5, 9, 14, 20, + 4, 11, 16, 23, + 6, 10, 15, 21 + }; + const uint32_t *pc; + const char *pp; + const char *ps; + int i; + uint32_t temp; + + if (BB_BIG_ENDIAN) + for (i = 0; i < 16; i++) + words[i] = SWAP_LE32(words[i]); + +# if MD5_SMALL == 3 + pc = C_array; + pp = P_array; + ps = S_array - 4; + + for (i = 0; i < 64; i++) { + if ((i & 0x0f) == 0) + ps += 4; + temp = A; + switch (i >> 4) { + case 0: + temp += FF(B, C, D); + break; + case 1: + temp += FG(B, C, D); + break; + case 2: + temp += FH(B, C, D); + break; + default: /* case 3 */ + temp += FI(B, C, D); + } + temp += words[(int) (*pp++)] + *pc++; + temp = rotl32(temp, ps[i & 3]); + temp += B; + A = D; + D = C; + C = B; + B = temp; + } +# else /* MD5_SMALL == 2 */ + pc = C_array; + pp = P_array; + ps = S_array; + + for (i = 0; i < 16; i++) { + temp = A + FF(B, C, D) + words[(int) (*pp++)] + *pc++; + temp = rotl32(temp, ps[i & 3]); + temp += B; + A = D; + D = C; + C = B; + B = temp; + } + ps += 4; + for (i = 0; i < 16; i++) { + temp = A + FG(B, C, D) + words[(int) (*pp++)] + *pc++; + temp = rotl32(temp, ps[i & 3]); + temp += B; + A = D; + D = C; + C = B; + B = temp; + } + ps += 4; + for (i = 0; i < 16; i++) { + temp = A + FH(B, C, D) + words[(int) (*pp++)] + *pc++; + temp = rotl32(temp, ps[i & 3]); + temp += B; + A = D; + D = C; + C = B; + B = temp; + } + ps += 4; + for (i = 0; i < 16; i++) { + temp = A + FI(B, C, D) + words[(int) (*pp++)] + *pc++; + temp = rotl32(temp, ps[i & 3]); + temp += B; + A = D; + D = C; + C = B; + B = temp; + } +# endif + /* Add checksum to the starting values */ + ctx->hash[0] += A; + ctx->hash[1] += B; + ctx->hash[2] += C; + ctx->hash[3] += D; + +#else /* MD5_SMALL == 0 or 1 */ + +# if MD5_SMALL == 1 + const uint32_t *pc; + const char *pp; + int i; +# endif + + /* First round: using the given function, the context and a constant + the next context is computed. Because the algorithm's processing + unit is a 32-bit word and it is determined to work on words in + little endian byte order we perhaps have to change the byte order + before the computation. To reduce the work for the next steps + we save swapped words in WORDS array. */ +# undef OP +# define OP(a, b, c, d, s, T) \ + do { \ + a += FF(b, c, d) + (*words IF_BIG_ENDIAN(= SWAP_LE32(*words))) + T; \ + words++; \ + a = rotl32(a, s); \ + a += b; \ + } while (0) + + /* Round 1 */ +# if MD5_SMALL == 1 + pc = C_array; + for (i = 0; i < 4; i++) { + OP(A, B, C, D, 7, *pc++); + OP(D, A, B, C, 12, *pc++); + OP(C, D, A, B, 17, *pc++); + OP(B, C, D, A, 22, *pc++); + } +# else + OP(A, B, C, D, 7, 0xd76aa478); + OP(D, A, B, C, 12, 0xe8c7b756); + OP(C, D, A, B, 17, 0x242070db); + OP(B, C, D, A, 22, 0xc1bdceee); + OP(A, B, C, D, 7, 0xf57c0faf); + OP(D, A, B, C, 12, 0x4787c62a); + OP(C, D, A, B, 17, 0xa8304613); + OP(B, C, D, A, 22, 0xfd469501); + OP(A, B, C, D, 7, 0x698098d8); + OP(D, A, B, C, 12, 0x8b44f7af); + OP(C, D, A, B, 17, 0xffff5bb1); + OP(B, C, D, A, 22, 0x895cd7be); + OP(A, B, C, D, 7, 0x6b901122); + OP(D, A, B, C, 12, 0xfd987193); + OP(C, D, A, B, 17, 0xa679438e); + OP(B, C, D, A, 22, 0x49b40821); +# endif + words -= 16; + + /* For the second to fourth round we have the possibly swapped words + in WORDS. Redefine the macro to take an additional first + argument specifying the function to use. */ +# undef OP +# define OP(f, a, b, c, d, k, s, T) \ + do { \ + a += f(b, c, d) + words[k] + T; \ + a = rotl32(a, s); \ + a += b; \ + } while (0) + + /* Round 2 */ +# if MD5_SMALL == 1 + pp = P_array; + for (i = 0; i < 4; i++) { + OP(FG, A, B, C, D, (int) (*pp++), 5, *pc++); + OP(FG, D, A, B, C, (int) (*pp++), 9, *pc++); + OP(FG, C, D, A, B, (int) (*pp++), 14, *pc++); + OP(FG, B, C, D, A, (int) (*pp++), 20, *pc++); + } +# else + OP(FG, A, B, C, D, 1, 5, 0xf61e2562); + OP(FG, D, A, B, C, 6, 9, 0xc040b340); + OP(FG, C, D, A, B, 11, 14, 0x265e5a51); + OP(FG, B, C, D, A, 0, 20, 0xe9b6c7aa); + OP(FG, A, B, C, D, 5, 5, 0xd62f105d); + OP(FG, D, A, B, C, 10, 9, 0x02441453); + OP(FG, C, D, A, B, 15, 14, 0xd8a1e681); + OP(FG, B, C, D, A, 4, 20, 0xe7d3fbc8); + OP(FG, A, B, C, D, 9, 5, 0x21e1cde6); + OP(FG, D, A, B, C, 14, 9, 0xc33707d6); + OP(FG, C, D, A, B, 3, 14, 0xf4d50d87); + OP(FG, B, C, D, A, 8, 20, 0x455a14ed); + OP(FG, A, B, C, D, 13, 5, 0xa9e3e905); + OP(FG, D, A, B, C, 2, 9, 0xfcefa3f8); + OP(FG, C, D, A, B, 7, 14, 0x676f02d9); + OP(FG, B, C, D, A, 12, 20, 0x8d2a4c8a); +# endif + + /* Round 3 */ +# if MD5_SMALL == 1 + for (i = 0; i < 4; i++) { + OP(FH, A, B, C, D, (int) (*pp++), 4, *pc++); + OP(FH, D, A, B, C, (int) (*pp++), 11, *pc++); + OP(FH, C, D, A, B, (int) (*pp++), 16, *pc++); + OP(FH, B, C, D, A, (int) (*pp++), 23, *pc++); + } +# else + OP(FH, A, B, C, D, 5, 4, 0xfffa3942); + OP(FH, D, A, B, C, 8, 11, 0x8771f681); + OP(FH, C, D, A, B, 11, 16, 0x6d9d6122); + OP(FH, B, C, D, A, 14, 23, 0xfde5380c); + OP(FH, A, B, C, D, 1, 4, 0xa4beea44); + OP(FH, D, A, B, C, 4, 11, 0x4bdecfa9); + OP(FH, C, D, A, B, 7, 16, 0xf6bb4b60); + OP(FH, B, C, D, A, 10, 23, 0xbebfbc70); + OP(FH, A, B, C, D, 13, 4, 0x289b7ec6); + OP(FH, D, A, B, C, 0, 11, 0xeaa127fa); + OP(FH, C, D, A, B, 3, 16, 0xd4ef3085); + OP(FH, B, C, D, A, 6, 23, 0x04881d05); + OP(FH, A, B, C, D, 9, 4, 0xd9d4d039); + OP(FH, D, A, B, C, 12, 11, 0xe6db99e5); + OP(FH, C, D, A, B, 15, 16, 0x1fa27cf8); + OP(FH, B, C, D, A, 2, 23, 0xc4ac5665); +# endif + + /* Round 4 */ +# if MD5_SMALL == 1 + for (i = 0; i < 4; i++) { + OP(FI, A, B, C, D, (int) (*pp++), 6, *pc++); + OP(FI, D, A, B, C, (int) (*pp++), 10, *pc++); + OP(FI, C, D, A, B, (int) (*pp++), 15, *pc++); + OP(FI, B, C, D, A, (int) (*pp++), 21, *pc++); + } +# else + OP(FI, A, B, C, D, 0, 6, 0xf4292244); + OP(FI, D, A, B, C, 7, 10, 0x432aff97); + OP(FI, C, D, A, B, 14, 15, 0xab9423a7); + OP(FI, B, C, D, A, 5, 21, 0xfc93a039); + OP(FI, A, B, C, D, 12, 6, 0x655b59c3); + OP(FI, D, A, B, C, 3, 10, 0x8f0ccc92); + OP(FI, C, D, A, B, 10, 15, 0xffeff47d); + OP(FI, B, C, D, A, 1, 21, 0x85845dd1); + OP(FI, A, B, C, D, 8, 6, 0x6fa87e4f); + OP(FI, D, A, B, C, 15, 10, 0xfe2ce6e0); + OP(FI, C, D, A, B, 6, 15, 0xa3014314); + OP(FI, B, C, D, A, 13, 21, 0x4e0811a1); + OP(FI, A, B, C, D, 4, 6, 0xf7537e82); + OP(FI, D, A, B, C, 11, 10, 0xbd3af235); + OP(FI, C, D, A, B, 2, 15, 0x2ad7d2bb); + OP(FI, B, C, D, A, 9, 21, 0xeb86d391); +# undef OP +# endif + /* Add checksum to the starting values */ + ctx->hash[0] += A; + ctx->hash[1] += B; + ctx->hash[2] += C; + ctx->hash[3] += D; +#endif +} +#undef FF +#undef FG +#undef FH +#undef FI + +/* Initialize structure containing state of computation. + * (RFC 1321, 3.3: Step 3) + */ +void FAST_FUNC md5_begin(md5_ctx_t *ctx) +{ + ctx->hash[0] = 0x67452301; + ctx->hash[1] = 0xefcdab89; + ctx->hash[2] = 0x98badcfe; + ctx->hash[3] = 0x10325476; + ctx->total64 = 0; + ctx->process_block = md5_process_block64; +} + +/* Used also for sha1 and sha256 */ +void FAST_FUNC md5_hash(md5_ctx_t *ctx, const void *buffer, size_t len) +{ + common64_hash(ctx, buffer, len); +} + +/* Process the remaining bytes in the buffer and put result from CTX + * in first 16 bytes following RESBUF. The result is always in little + * endian byte order, so that a byte-wise output yields to the wanted + * ASCII representation of the message digest. + */ +void FAST_FUNC md5_end(md5_ctx_t *ctx, void *resbuf) +{ + /* MD5 stores total in LE, need to swap on BE arches: */ + common64_end(ctx, /*swap_needed:*/ BB_BIG_ENDIAN); + + /* The MD5 result is in little endian byte order */ + if (BB_BIG_ENDIAN) { + ctx->hash[0] = SWAP_LE32(ctx->hash[0]); + ctx->hash[1] = SWAP_LE32(ctx->hash[1]); + ctx->hash[2] = SWAP_LE32(ctx->hash[2]); + ctx->hash[3] = SWAP_LE32(ctx->hash[3]); + } + + memcpy(resbuf, ctx->hash, sizeof(ctx->hash[0]) * 4); +} + + +/* + * SHA1 part is: + * Copyright 2007 Rob Landley + * + * Based on the public domain SHA-1 in C by Steve Reid + * from http://www.mirrors.wiretapped.net/security/cryptography/hashes/sha1/ + * + * Licensed under GPLv2, see file LICENSE in this source tree. + * + * --------------------------------------------------------------------------- + * + * SHA256 and SHA512 parts are: + * Released into the Public Domain by Ulrich Drepper . + * Shrank by Denys Vlasenko. + * + * --------------------------------------------------------------------------- + * + * The best way to test random blocksizes is to go to coreutils/md5_sha1_sum.c + * and replace "4096" with something like "2000 + time(NULL) % 2097", + * then rebuild and compare "shaNNNsum bigfile" results. + */ + +static void FAST_FUNC sha1_process_block64(sha1_ctx_t *ctx) +{ + static const uint32_t rconsts[] = { + 0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6 + }; + int i, j; + int cnt; + uint32_t W[16+16]; + uint32_t a, b, c, d, e; + + /* On-stack work buffer frees up one register in the main loop + * which otherwise will be needed to hold ctx pointer */ + for (i = 0; i < 16; i++) + W[i] = W[i+16] = SWAP_BE32(((uint32_t*)ctx->wbuffer)[i]); + + a = ctx->hash[0]; + b = ctx->hash[1]; + c = ctx->hash[2]; + d = ctx->hash[3]; + e = ctx->hash[4]; + + /* 4 rounds of 20 operations each */ + cnt = 0; + for (i = 0; i < 4; i++) { + j = 19; + do { + uint32_t work; + + work = c ^ d; + if (i == 0) { + work = (work & b) ^ d; + if (j <= 3) + goto ge16; + /* Used to do SWAP_BE32 here, but this + * requires ctx (see comment above) */ + work += W[cnt]; + } else { + if (i == 2) + work = ((b | c) & d) | (b & c); + else /* i = 1 or 3 */ + work ^= b; + ge16: + W[cnt] = W[cnt+16] = rotl32(W[cnt+13] ^ W[cnt+8] ^ W[cnt+2] ^ W[cnt], 1); + work += W[cnt]; + } + work += e + rotl32(a, 5) + rconsts[i]; + + /* Rotate by one for next time */ + e = d; + d = c; + c = /* b = */ rotl32(b, 30); + b = a; + a = work; + cnt = (cnt + 1) & 15; + } while (--j >= 0); + } + + ctx->hash[0] += a; + ctx->hash[1] += b; + ctx->hash[2] += c; + ctx->hash[3] += d; + ctx->hash[4] += e; +} + +/* Constants for SHA512 from FIPS 180-2:4.2.3. + * SHA256 constants from FIPS 180-2:4.2.2 + * are the most significant half of first 64 elements + * of the same array. + */ +static const uint64_t sha_K[80] = { + 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, + 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, + 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, + 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, + 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, + 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, + 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, + 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, + 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, + 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, + 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, + 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, + 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, + 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, + 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, + 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, + 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, + 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, + 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, + 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, + 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, + 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, + 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, + 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, + 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, + 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, + 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, + 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, + 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, + 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, + 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, + 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, + 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, /* [64]+ are used for sha512 only */ + 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, + 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, + 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, + 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, + 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, + 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, + 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL +}; + +#undef Ch +#undef Maj +#undef S0 +#undef S1 +#undef R0 +#undef R1 + +static void FAST_FUNC sha256_process_block64(sha256_ctx_t *ctx) +{ + unsigned t; + uint32_t W[64], a, b, c, d, e, f, g, h; + const uint32_t *words = (uint32_t*) ctx->wbuffer; + + /* Operators defined in FIPS 180-2:4.1.2. */ +#define Ch(x, y, z) ((x & y) ^ (~x & z)) +#define Maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) +#define S0(x) (rotr32(x, 2) ^ rotr32(x, 13) ^ rotr32(x, 22)) +#define S1(x) (rotr32(x, 6) ^ rotr32(x, 11) ^ rotr32(x, 25)) +#define R0(x) (rotr32(x, 7) ^ rotr32(x, 18) ^ (x >> 3)) +#define R1(x) (rotr32(x, 17) ^ rotr32(x, 19) ^ (x >> 10)) + + /* Compute the message schedule according to FIPS 180-2:6.2.2 step 2. */ + for (t = 0; t < 16; ++t) + W[t] = SWAP_BE32(words[t]); + for (/*t = 16*/; t < 64; ++t) + W[t] = R1(W[t - 2]) + W[t - 7] + R0(W[t - 15]) + W[t - 16]; + + a = ctx->hash[0]; + b = ctx->hash[1]; + c = ctx->hash[2]; + d = ctx->hash[3]; + e = ctx->hash[4]; + f = ctx->hash[5]; + g = ctx->hash[6]; + h = ctx->hash[7]; + + /* The actual computation according to FIPS 180-2:6.2.2 step 3. */ + for (t = 0; t < 64; ++t) { + /* Need to fetch upper half of sha_K[t] + * (I hope compiler is clever enough to just fetch + * upper half) + */ + uint32_t K_t = sha_K[t] >> 32; + uint32_t T1 = h + S1(e) + Ch(e, f, g) + K_t + W[t]; + uint32_t T2 = S0(a) + Maj(a, b, c); + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + } +#undef Ch +#undef Maj +#undef S0 +#undef S1 +#undef R0 +#undef R1 + /* Add the starting values of the context according to FIPS 180-2:6.2.2 + step 4. */ + ctx->hash[0] += a; + ctx->hash[1] += b; + ctx->hash[2] += c; + ctx->hash[3] += d; + ctx->hash[4] += e; + ctx->hash[5] += f; + ctx->hash[6] += g; + ctx->hash[7] += h; +} + +static void FAST_FUNC sha512_process_block128(sha512_ctx_t *ctx) +{ + unsigned t; + uint64_t W[80]; + /* On i386, having assignments here (not later as sha256 does) + * produces 99 bytes smaller code with gcc 4.3.1 + */ + uint64_t a = ctx->hash[0]; + uint64_t b = ctx->hash[1]; + uint64_t c = ctx->hash[2]; + uint64_t d = ctx->hash[3]; + uint64_t e = ctx->hash[4]; + uint64_t f = ctx->hash[5]; + uint64_t g = ctx->hash[6]; + uint64_t h = ctx->hash[7]; + const uint64_t *words = (uint64_t*) ctx->wbuffer; + + /* Operators defined in FIPS 180-2:4.1.2. */ +#define Ch(x, y, z) ((x & y) ^ (~x & z)) +#define Maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) +#define S0(x) (rotr64(x, 28) ^ rotr64(x, 34) ^ rotr64(x, 39)) +#define S1(x) (rotr64(x, 14) ^ rotr64(x, 18) ^ rotr64(x, 41)) +#define R0(x) (rotr64(x, 1) ^ rotr64(x, 8) ^ (x >> 7)) +#define R1(x) (rotr64(x, 19) ^ rotr64(x, 61) ^ (x >> 6)) + + /* Compute the message schedule according to FIPS 180-2:6.3.2 step 2. */ + for (t = 0; t < 16; ++t) + W[t] = SWAP_BE64(words[t]); + for (/*t = 16*/; t < 80; ++t) + W[t] = R1(W[t - 2]) + W[t - 7] + R0(W[t - 15]) + W[t - 16]; + + /* The actual computation according to FIPS 180-2:6.3.2 step 3. */ + for (t = 0; t < 80; ++t) { + uint64_t T1 = h + S1(e) + Ch(e, f, g) + sha_K[t] + W[t]; + uint64_t T2 = S0(a) + Maj(a, b, c); + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + } +#undef Ch +#undef Maj +#undef S0 +#undef S1 +#undef R0 +#undef R1 + /* Add the starting values of the context according to FIPS 180-2:6.3.2 + step 4. */ + ctx->hash[0] += a; + ctx->hash[1] += b; + ctx->hash[2] += c; + ctx->hash[3] += d; + ctx->hash[4] += e; + ctx->hash[5] += f; + ctx->hash[6] += g; + ctx->hash[7] += h; +} + + +void FAST_FUNC sha1_begin(sha1_ctx_t *ctx) +{ + ctx->hash[0] = 0x67452301; + ctx->hash[1] = 0xefcdab89; + ctx->hash[2] = 0x98badcfe; + ctx->hash[3] = 0x10325476; + ctx->hash[4] = 0xc3d2e1f0; + ctx->total64 = 0; + ctx->process_block = sha1_process_block64; +} + +static const uint32_t init256[] = { + 0, + 0, + 0x6a09e667, + 0xbb67ae85, + 0x3c6ef372, + 0xa54ff53a, + 0x510e527f, + 0x9b05688c, + 0x1f83d9ab, + 0x5be0cd19, +}; +static const uint32_t init512_lo[] = { + 0, + 0, + 0xf3bcc908, + 0x84caa73b, + 0xfe94f82b, + 0x5f1d36f1, + 0xade682d1, + 0x2b3e6c1f, + 0xfb41bd6b, + 0x137e2179, +}; + +/* Initialize structure containing state of computation. + (FIPS 180-2:5.3.2) */ +void FAST_FUNC sha256_begin(sha256_ctx_t *ctx) +{ + memcpy(&ctx->total64, init256, sizeof(init256)); + /*ctx->total64 = 0; - done by prepending two 32-bit zeros to init256 */ + ctx->process_block = sha256_process_block64; +} + +/* Initialize structure containing state of computation. + (FIPS 180-2:5.3.3) */ +void FAST_FUNC sha512_begin(sha512_ctx_t *ctx) +{ + int i; + /* Two extra iterations zero out ctx->total64[2] */ + uint64_t *tp = ctx->total64; + for (i = 0; i < 2+8; i++) + tp[i] = ((uint64_t)(init256[i]) << 32) + init512_lo[i]; + /*ctx->total64[0] = ctx->total64[1] = 0; - already done */ +} + +void FAST_FUNC sha512_hash(sha512_ctx_t *ctx, const void *buffer, size_t len) +{ + unsigned bufpos = ctx->total64[0] & 127; + unsigned remaining; + + /* First increment the byte count. FIPS 180-2 specifies the possible + length of the file up to 2^128 _bits_. + We compute the number of _bytes_ and convert to bits later. */ + ctx->total64[0] += len; + if (ctx->total64[0] < len) + ctx->total64[1]++; +#if 0 + remaining = 128 - bufpos; + + /* Hash whole blocks */ + while (len >= remaining) { + memcpy(ctx->wbuffer + bufpos, buffer, remaining); + buffer = (const char *)buffer + remaining; + len -= remaining; + remaining = 128; + bufpos = 0; + sha512_process_block128(ctx); + } + + /* Save last, partial blosk */ + memcpy(ctx->wbuffer + bufpos, buffer, len); +#else + while (1) { + remaining = 128 - bufpos; + if (remaining > len) + remaining = len; + /* Copy data into aligned buffer */ + memcpy(ctx->wbuffer + bufpos, buffer, remaining); + len -= remaining; + buffer = (const char *)buffer + remaining; + bufpos += remaining; + /* Clever way to do "if (bufpos != N) break; ... ; bufpos = 0;" */ + bufpos -= 128; + if (bufpos != 0) + break; + /* Buffer is filled up, process it */ + sha512_process_block128(ctx); + /*bufpos = 0; - already is */ + } +#endif +} + +/* Used also for sha256 */ +void FAST_FUNC sha1_end(sha1_ctx_t *ctx, void *resbuf) +{ + unsigned hash_size; + + /* SHA stores total in BE, need to swap on LE arches: */ + common64_end(ctx, /*swap_needed:*/ BB_LITTLE_ENDIAN); + + hash_size = (ctx->process_block == sha1_process_block64) ? 5 : 8; + /* This way we do not impose alignment constraints on resbuf: */ + if (BB_LITTLE_ENDIAN) { + unsigned i; + for (i = 0; i < hash_size; ++i) + ctx->hash[i] = SWAP_BE32(ctx->hash[i]); + } + memcpy(resbuf, ctx->hash, sizeof(ctx->hash[0]) * hash_size); +} + +void FAST_FUNC sha512_end(sha512_ctx_t *ctx, void *resbuf) +{ + unsigned bufpos = ctx->total64[0] & 127; + + /* Pad the buffer to the next 128-byte boundary with 0x80,0,0,0... */ + ctx->wbuffer[bufpos++] = 0x80; + + while (1) { + unsigned remaining = 128 - bufpos; + memset(ctx->wbuffer + bufpos, 0, remaining); + if (remaining >= 16) { + /* Store the 128-bit counter of bits in the buffer in BE format */ + uint64_t t; + t = ctx->total64[0] << 3; + t = SWAP_BE64(t); + *(bb__aliased_uint64_t *) (&ctx->wbuffer[128 - 8]) = t; + t = (ctx->total64[1] << 3) | (ctx->total64[0] >> 61); + t = SWAP_BE64(t); + *(bb__aliased_uint64_t *) (&ctx->wbuffer[128 - 16]) = t; + } + sha512_process_block128(ctx); + if (remaining >= 16) + break; + bufpos = 0; + } + + if (BB_LITTLE_ENDIAN) { + unsigned i; + for (i = 0; i < ARRAY_SIZE(ctx->hash); ++i) + ctx->hash[i] = SWAP_BE64(ctx->hash[i]); + } + memcpy(resbuf, ctx->hash, sizeof(ctx->hash)); +} + + +/* + * The Keccak sponge function, designed by Guido Bertoni, Joan Daemen, + * Michael Peeters and Gilles Van Assche. For more information, feedback or + * questions, please refer to our website: http://keccak.noekeon.org/ + * + * Implementation by Ronny Van Keer, + * hereby denoted as "the implementer". + * + * To the extent possible under law, the implementer has waived all copyright + * and related or neighboring rights to the source code in this file. + * http://creativecommons.org/publicdomain/zero/1.0/ + * + * Busybox modifications (C) Lauri Kasanen, under the GPLv2. + */ + +#if CONFIG_SHA3_SMALL < 0 +# define SHA3_SMALL 0 +#elif CONFIG_SHA3_SMALL > 1 +# define SHA3_SMALL 1 +#else +# define SHA3_SMALL CONFIG_SHA3_SMALL +#endif + +#define OPTIMIZE_SHA3_FOR_32 0 +/* + * SHA3 can be optimized for 32-bit CPUs with bit-slicing: + * every 64-bit word of state[] can be split into two 32-bit words + * by even/odd bits. In this form, all rotations of sha3 round + * are 32-bit - and there are lots of them. + * However, it requires either splitting/combining state words + * before/after sha3 round (code does this now) + * or shuffling bits before xor'ing them into state and in sha3_end. + * Without shuffling, bit-slicing results in -130 bytes of code + * and marginal speedup (but of course it gives wrong result). + * With shuffling it works, but +260 code bytes, and slower. + * Disabled for now: + */ +#if 0 /* LONG_MAX == 0x7fffffff */ +# undef OPTIMIZE_SHA3_FOR_32 +# define OPTIMIZE_SHA3_FOR_32 1 +#endif + +#if OPTIMIZE_SHA3_FOR_32 +/* This splits every 64-bit word into a pair of 32-bit words, + * even bits go into first word, odd bits go to second one. + * The conversion is done in-place. + */ +static void split_halves(uint64_t *state) +{ + /* Credit: Henry S. Warren, Hacker's Delight, Addison-Wesley, 2002 */ + uint32_t *s32 = (uint32_t*)state; + uint32_t t, x0, x1; + int i; + for (i = 24; i >= 0; --i) { + x0 = s32[0]; + t = (x0 ^ (x0 >> 1)) & 0x22222222; x0 = x0 ^ t ^ (t << 1); + t = (x0 ^ (x0 >> 2)) & 0x0C0C0C0C; x0 = x0 ^ t ^ (t << 2); + t = (x0 ^ (x0 >> 4)) & 0x00F000F0; x0 = x0 ^ t ^ (t << 4); + t = (x0 ^ (x0 >> 8)) & 0x0000FF00; x0 = x0 ^ t ^ (t << 8); + x1 = s32[1]; + t = (x1 ^ (x1 >> 1)) & 0x22222222; x1 = x1 ^ t ^ (t << 1); + t = (x1 ^ (x1 >> 2)) & 0x0C0C0C0C; x1 = x1 ^ t ^ (t << 2); + t = (x1 ^ (x1 >> 4)) & 0x00F000F0; x1 = x1 ^ t ^ (t << 4); + t = (x1 ^ (x1 >> 8)) & 0x0000FF00; x1 = x1 ^ t ^ (t << 8); + *s32++ = (x0 & 0x0000FFFF) | (x1 << 16); + *s32++ = (x0 >> 16) | (x1 & 0xFFFF0000); + } +} +/* The reverse operation */ +static void combine_halves(uint64_t *state) +{ + uint32_t *s32 = (uint32_t*)state; + uint32_t t, x0, x1; + int i; + for (i = 24; i >= 0; --i) { + x0 = s32[0]; + x1 = s32[1]; + t = (x0 & 0x0000FFFF) | (x1 << 16); + x1 = (x0 >> 16) | (x1 & 0xFFFF0000); + x0 = t; + t = (x0 ^ (x0 >> 8)) & 0x0000FF00; x0 = x0 ^ t ^ (t << 8); + t = (x0 ^ (x0 >> 4)) & 0x00F000F0; x0 = x0 ^ t ^ (t << 4); + t = (x0 ^ (x0 >> 2)) & 0x0C0C0C0C; x0 = x0 ^ t ^ (t << 2); + t = (x0 ^ (x0 >> 1)) & 0x22222222; x0 = x0 ^ t ^ (t << 1); + *s32++ = x0; + t = (x1 ^ (x1 >> 8)) & 0x0000FF00; x1 = x1 ^ t ^ (t << 8); + t = (x1 ^ (x1 >> 4)) & 0x00F000F0; x1 = x1 ^ t ^ (t << 4); + t = (x1 ^ (x1 >> 2)) & 0x0C0C0C0C; x1 = x1 ^ t ^ (t << 2); + t = (x1 ^ (x1 >> 1)) & 0x22222222; x1 = x1 ^ t ^ (t << 1); + *s32++ = x1; + } +} +#endif + +/* + * In the crypto literature this function is usually called Keccak-f(). + */ +static void sha3_process_block72(uint64_t *state) +{ + enum { NROUNDS = 24 }; + +#if OPTIMIZE_SHA3_FOR_32 + /* + static const uint32_t IOTA_CONST_0[NROUNDS] = { + 0x00000001UL, + 0x00000000UL, + 0x00000000UL, + 0x00000000UL, + 0x00000001UL, + 0x00000001UL, + 0x00000001UL, + 0x00000001UL, + 0x00000000UL, + 0x00000000UL, + 0x00000001UL, + 0x00000000UL, + 0x00000001UL, + 0x00000001UL, + 0x00000001UL, + 0x00000001UL, + 0x00000000UL, + 0x00000000UL, + 0x00000000UL, + 0x00000000UL, + 0x00000001UL, + 0x00000000UL, + 0x00000001UL, + 0x00000000UL, + }; + ** bits are in lsb: 0101 0000 1111 0100 1111 0001 + */ + uint32_t IOTA_CONST_0bits = (uint32_t)(0x0050f4f1); + static const uint32_t IOTA_CONST_1[NROUNDS] = { + 0x00000000UL, + 0x00000089UL, + 0x8000008bUL, + 0x80008080UL, + 0x0000008bUL, + 0x00008000UL, + 0x80008088UL, + 0x80000082UL, + 0x0000000bUL, + 0x0000000aUL, + 0x00008082UL, + 0x00008003UL, + 0x0000808bUL, + 0x8000000bUL, + 0x8000008aUL, + 0x80000081UL, + 0x80000081UL, + 0x80000008UL, + 0x00000083UL, + 0x80008003UL, + 0x80008088UL, + 0x80000088UL, + 0x00008000UL, + 0x80008082UL, + }; + + uint32_t *const s32 = (uint32_t*)state; + unsigned round; + + split_halves(state); + + for (round = 0; round < NROUNDS; round++) { + unsigned x; + + /* Theta */ + { + uint32_t BC[20]; + for (x = 0; x < 10; ++x) { + BC[x+10] = BC[x] = s32[x]^s32[x+10]^s32[x+20]^s32[x+30]^s32[x+40]; + } + for (x = 0; x < 10; x += 2) { + uint32_t ta, tb; + ta = BC[x+8] ^ rotl32(BC[x+3], 1); + tb = BC[x+9] ^ BC[x+2]; + s32[x+0] ^= ta; + s32[x+1] ^= tb; + s32[x+10] ^= ta; + s32[x+11] ^= tb; + s32[x+20] ^= ta; + s32[x+21] ^= tb; + s32[x+30] ^= ta; + s32[x+31] ^= tb; + s32[x+40] ^= ta; + s32[x+41] ^= tb; + } + } + /* RhoPi */ + { + uint32_t t0a,t0b, t1a,t1b; + t1a = s32[1*2+0]; + t1b = s32[1*2+1]; + +#define RhoPi(PI_LANE, ROT_CONST) \ + t0a = s32[PI_LANE*2+0];\ + t0b = s32[PI_LANE*2+1];\ + if (ROT_CONST & 1) {\ + s32[PI_LANE*2+0] = rotl32(t1b, ROT_CONST/2+1);\ + s32[PI_LANE*2+1] = ROT_CONST == 1 ? t1a : rotl32(t1a, ROT_CONST/2+0);\ + } else {\ + s32[PI_LANE*2+0] = rotl32(t1a, ROT_CONST/2);\ + s32[PI_LANE*2+1] = rotl32(t1b, ROT_CONST/2);\ + }\ + t1a = t0a; t1b = t0b; + + RhoPi(10, 1) + RhoPi( 7, 3) + RhoPi(11, 6) + RhoPi(17,10) + RhoPi(18,15) + RhoPi( 3,21) + RhoPi( 5,28) + RhoPi(16,36) + RhoPi( 8,45) + RhoPi(21,55) + RhoPi(24, 2) + RhoPi( 4,14) + RhoPi(15,27) + RhoPi(23,41) + RhoPi(19,56) + RhoPi(13, 8) + RhoPi(12,25) + RhoPi( 2,43) + RhoPi(20,62) + RhoPi(14,18) + RhoPi(22,39) + RhoPi( 9,61) + RhoPi( 6,20) + RhoPi( 1,44) +#undef RhoPi + } + /* Chi */ + for (x = 0; x <= 40;) { + uint32_t BC0, BC1, BC2, BC3, BC4; + BC0 = s32[x + 0*2]; + BC1 = s32[x + 1*2]; + BC2 = s32[x + 2*2]; + s32[x + 0*2] = BC0 ^ ((~BC1) & BC2); + BC3 = s32[x + 3*2]; + s32[x + 1*2] = BC1 ^ ((~BC2) & BC3); + BC4 = s32[x + 4*2]; + s32[x + 2*2] = BC2 ^ ((~BC3) & BC4); + s32[x + 3*2] = BC3 ^ ((~BC4) & BC0); + s32[x + 4*2] = BC4 ^ ((~BC0) & BC1); + x++; + BC0 = s32[x + 0*2]; + BC1 = s32[x + 1*2]; + BC2 = s32[x + 2*2]; + s32[x + 0*2] = BC0 ^ ((~BC1) & BC2); + BC3 = s32[x + 3*2]; + s32[x + 1*2] = BC1 ^ ((~BC2) & BC3); + BC4 = s32[x + 4*2]; + s32[x + 2*2] = BC2 ^ ((~BC3) & BC4); + s32[x + 3*2] = BC3 ^ ((~BC4) & BC0); + s32[x + 4*2] = BC4 ^ ((~BC0) & BC1); + x += 9; + } + /* Iota */ + s32[0] ^= IOTA_CONST_0bits & 1; + IOTA_CONST_0bits >>= 1; + s32[1] ^= IOTA_CONST_1[round]; + } + + combine_halves(state); +#else + /* Native 64-bit algorithm */ + static const uint16_t IOTA_CONST[NROUNDS] = { + /* Elements should be 64-bit, but top half is always zero + * or 0x80000000. We encode 63rd bits in a separate word below. + * Same is true for 31th bits, which lets us use 16-bit table + * instead of 64-bit. The speed penalty is lost in the noise. + */ + 0x0001, + 0x8082, + 0x808a, + 0x8000, + 0x808b, + 0x0001, + 0x8081, + 0x8009, + 0x008a, + 0x0088, + 0x8009, + 0x000a, + 0x808b, + 0x008b, + 0x8089, + 0x8003, + 0x8002, + 0x0080, + 0x800a, + 0x000a, + 0x8081, + 0x8080, + 0x0001, + 0x8008, + }; + /* bit for CONST[0] is in msb: 0011 0011 0000 0111 1101 1101 */ + const uint32_t IOTA_CONST_bit63 = (uint32_t)(0x3307dd00); + /* bit for CONST[0] is in msb: 0001 0110 0011 1000 0001 1011 */ + const uint32_t IOTA_CONST_bit31 = (uint32_t)(0x16381b00); + + static const uint8_t ROT_CONST[24] = { + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44, + }; + static const uint8_t PI_LANE[24] = { + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1, + }; + /*static const uint8_t MOD5[10] = { 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, };*/ + + unsigned x; + unsigned round; + + if (BB_BIG_ENDIAN) { + for (x = 0; x < 25; x++) { + state[x] = SWAP_LE64(state[x]); + } + } + + for (round = 0; round < NROUNDS; ++round) { + /* Theta */ + { + uint64_t BC[10]; + for (x = 0; x < 5; ++x) { + BC[x + 5] = BC[x] = state[x] + ^ state[x + 5] ^ state[x + 10] + ^ state[x + 15] ^ state[x + 20]; + } + /* Using 2x5 vector above eliminates the need to use + * BC[MOD5[x+N]] trick below to fetch BC[(x+N) % 5], + * and the code is a bit _smaller_. + */ + for (x = 0; x < 5; ++x) { + uint64_t temp = BC[x + 4] ^ rotl64(BC[x + 1], 1); + state[x] ^= temp; + state[x + 5] ^= temp; + state[x + 10] ^= temp; + state[x + 15] ^= temp; + state[x + 20] ^= temp; + } + } + + /* Rho Pi */ + if (SHA3_SMALL) { + uint64_t t1 = state[1]; + for (x = 0; x < 24; ++x) { + uint64_t t0 = state[PI_LANE[x]]; + state[PI_LANE[x]] = rotl64(t1, ROT_CONST[x]); + t1 = t0; + } + } else { + /* Especially large benefit for 32-bit arch (75% faster): + * 64-bit rotations by non-constant usually are SLOW on those. + * We resort to unrolling here. + * This optimizes out PI_LANE[] and ROT_CONST[], + * but generates 300-500 more bytes of code. + */ + uint64_t t0; + uint64_t t1 = state[1]; +#define RhoPi_twice(x) \ + t0 = state[PI_LANE[x ]]; \ + state[PI_LANE[x ]] = rotl64(t1, ROT_CONST[x ]); \ + t1 = state[PI_LANE[x+1]]; \ + state[PI_LANE[x+1]] = rotl64(t0, ROT_CONST[x+1]); + RhoPi_twice(0); RhoPi_twice(2); + RhoPi_twice(4); RhoPi_twice(6); + RhoPi_twice(8); RhoPi_twice(10); + RhoPi_twice(12); RhoPi_twice(14); + RhoPi_twice(16); RhoPi_twice(18); + RhoPi_twice(20); RhoPi_twice(22); +#undef RhoPi_twice + } + /* Chi */ +# if LONG_MAX > 0x7fffffff + for (x = 0; x <= 20; x += 5) { + uint64_t BC0, BC1, BC2, BC3, BC4; + BC0 = state[x + 0]; + BC1 = state[x + 1]; + BC2 = state[x + 2]; + state[x + 0] = BC0 ^ ((~BC1) & BC2); + BC3 = state[x + 3]; + state[x + 1] = BC1 ^ ((~BC2) & BC3); + BC4 = state[x + 4]; + state[x + 2] = BC2 ^ ((~BC3) & BC4); + state[x + 3] = BC3 ^ ((~BC4) & BC0); + state[x + 4] = BC4 ^ ((~BC0) & BC1); + } +# else + /* Reduced register pressure version + * for register-starved 32-bit arches + * (i386: -95 bytes, and it is _faster_) + */ + for (x = 0; x <= 40;) { + uint32_t BC0, BC1, BC2, BC3, BC4; + uint32_t *const s32 = (uint32_t*)state; +# if SHA3_SMALL + do_half: +# endif + BC0 = s32[x + 0*2]; + BC1 = s32[x + 1*2]; + BC2 = s32[x + 2*2]; + s32[x + 0*2] = BC0 ^ ((~BC1) & BC2); + BC3 = s32[x + 3*2]; + s32[x + 1*2] = BC1 ^ ((~BC2) & BC3); + BC4 = s32[x + 4*2]; + s32[x + 2*2] = BC2 ^ ((~BC3) & BC4); + s32[x + 3*2] = BC3 ^ ((~BC4) & BC0); + s32[x + 4*2] = BC4 ^ ((~BC0) & BC1); + x++; +# if SHA3_SMALL + if (x & 1) + goto do_half; + x += 8; +# else + BC0 = s32[x + 0*2]; + BC1 = s32[x + 1*2]; + BC2 = s32[x + 2*2]; + s32[x + 0*2] = BC0 ^ ((~BC1) & BC2); + BC3 = s32[x + 3*2]; + s32[x + 1*2] = BC1 ^ ((~BC2) & BC3); + BC4 = s32[x + 4*2]; + s32[x + 2*2] = BC2 ^ ((~BC3) & BC4); + s32[x + 3*2] = BC3 ^ ((~BC4) & BC0); + s32[x + 4*2] = BC4 ^ ((~BC0) & BC1); + x += 9; +# endif + } +# endif /* long is 32-bit */ + /* Iota */ + state[0] ^= IOTA_CONST[round] + | (uint32_t)((IOTA_CONST_bit31 << round) & 0x80000000) + | (uint64_t)((IOTA_CONST_bit63 << round) & 0x80000000) << 32; + } + + if (BB_BIG_ENDIAN) { + for (x = 0; x < 25; x++) { + state[x] = SWAP_LE64(state[x]); + } + } +#endif +} + +void FAST_FUNC sha3_begin(sha3_ctx_t *ctx) +{ + memset(ctx, 0, sizeof(*ctx)); + /* SHA3-512, user can override */ + ctx->input_block_bytes = (1600 - 512*2) / 8; /* 72 bytes */ +} + +void FAST_FUNC sha3_hash(sha3_ctx_t *ctx, const void *buffer, size_t len) +{ +#if SHA3_SMALL + const uint8_t *data = buffer; + unsigned bufpos = ctx->bytes_queued; + + while (1) { + unsigned remaining = ctx->input_block_bytes - bufpos; + if (remaining > len) + remaining = len; + len -= remaining; + /* XOR data into buffer */ + while (remaining != 0) { + uint8_t *buf = (uint8_t*)ctx->state; + buf[bufpos] ^= *data++; + bufpos++; + remaining--; + } + /* Clever way to do "if (bufpos != N) break; ... ; bufpos = 0;" */ + bufpos -= ctx->input_block_bytes; + if (bufpos != 0) + break; + /* Buffer is filled up, process it */ + sha3_process_block72(ctx->state); + /*bufpos = 0; - already is */ + } + ctx->bytes_queued = bufpos + ctx->input_block_bytes; +#else + /* +50 bytes code size, but a bit faster because of long-sized XORs */ + const uint8_t *data = buffer; + unsigned bufpos = ctx->bytes_queued; + unsigned iblk_bytes = ctx->input_block_bytes; + + /* If already data in queue, continue queuing first */ + if (bufpos != 0) { + while (len != 0) { + uint8_t *buf = (uint8_t*)ctx->state; + buf[bufpos] ^= *data++; + len--; + bufpos++; + if (bufpos == iblk_bytes) { + bufpos = 0; + goto do_block; + } + } + } + + /* Absorb complete blocks */ + while (len >= iblk_bytes) { + /* XOR data onto beginning of state[]. + * We try to be efficient - operate one word at a time, not byte. + * Careful wrt unaligned access: can't just use "*(long*)data"! + */ + unsigned count = iblk_bytes / sizeof(long); + long *buf = (long*)ctx->state; + do { + long v; + move_from_unaligned_long(v, (long*)data); + *buf++ ^= v; + data += sizeof(long); + } while (--count); + len -= iblk_bytes; + do_block: + sha3_process_block72(ctx->state); + } + + /* Queue remaining data bytes */ + while (len != 0) { + uint8_t *buf = (uint8_t*)ctx->state; + buf[bufpos] ^= *data++; + bufpos++; + len--; + } + + ctx->bytes_queued = bufpos; +#endif +} + +void FAST_FUNC sha3_end(sha3_ctx_t *ctx, void *resbuf) +{ + /* Padding */ + uint8_t *buf = (uint8_t*)ctx->state; + /* + * Keccak block padding is: add 1 bit after last bit of input, + * then add zero bits until the end of block, and add the last 1 bit + * (the last bit in the block) - the "10*1" pattern. + * SHA3 standard appends additional two bits, 01, before that padding: + * + * SHA3-224(M) = KECCAK[448](M||01, 224) + * SHA3-256(M) = KECCAK[512](M||01, 256) + * SHA3-384(M) = KECCAK[768](M||01, 384) + * SHA3-512(M) = KECCAK[1024](M||01, 512) + * (M is the input, || is bit concatenation) + * + * The 6 below contains 01 "SHA3" bits and the first 1 "Keccak" bit: + */ + buf[ctx->bytes_queued] ^= 6; /* bit pattern 00000110 */ + buf[ctx->input_block_bytes - 1] ^= 0x80; + + sha3_process_block72(ctx->state); + + /* Output */ + memcpy(resbuf, ctx->state, 64); +} diff --git a/probe-busybox/libbb/inet_common.c b/probe-busybox/libbb/inet_common.c new file mode 100644 index 00000000..5b4a4a10 --- /dev/null +++ b/probe-busybox/libbb/inet_common.c @@ -0,0 +1,205 @@ +/* vi: set sw=4 ts=4: */ +/* + * stolen from net-tools-1.59 and stripped down for busybox by + * Erik Andersen + * + * Heavily modified by Manuel Novoa III Mar 12, 2001 + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +#include "libbb.h" +#include "inet_common.h" + +int FAST_FUNC INET_resolve(const char *name, struct sockaddr_in *s_in, int hostfirst) +{ + struct hostent *hp; +#if ENABLE_FEATURE_ETC_NETWORKS + struct netent *np; +#endif + + /* Grmpf. -FvK */ + s_in->sin_family = AF_INET; + s_in->sin_port = 0; + + /* Default is special, meaning 0.0.0.0. */ + if (strcmp(name, "default") == 0) { + s_in->sin_addr.s_addr = INADDR_ANY; + return 1; + } + /* Look to see if it's a dotted quad. */ + if (inet_aton(name, &s_in->sin_addr)) { + return 0; + } + /* If we expect this to be a hostname, try hostname database first */ + if (hostfirst) { +#ifdef DEBUG + bb_error_msg("gethostbyname(%s)", name); +#endif + hp = gethostbyname(name); + if (hp) { + memcpy(&s_in->sin_addr, hp->h_addr_list[0], + sizeof(struct in_addr)); + return 0; + } + } +#if ENABLE_FEATURE_ETC_NETWORKS + /* Try the NETWORKS database to see if this is a known network. */ +#ifdef DEBUG + bb_error_msg("getnetbyname(%s)", name); +#endif + np = getnetbyname(name); + if (np) { + s_in->sin_addr.s_addr = htonl(np->n_net); + return 1; + } +#endif + if (hostfirst) { + /* Don't try again */ + return -1; + } +#ifdef DEBUG + res_init(); + _res.options |= RES_DEBUG; + bb_error_msg("gethostbyname(%s)", name); +#endif + hp = gethostbyname(name); + if (!hp) { + return -1; + } + memcpy(&s_in->sin_addr, hp->h_addr_list[0], sizeof(struct in_addr)); + return 0; +} + + +/* numeric: & 0x8000: "default" instead of "*", + * & 0x4000: host instead of net, + * & 0x0fff: don't resolve + */ +char* FAST_FUNC INET_rresolve(struct sockaddr_in *s_in, int numeric, uint32_t netmask) +{ + /* addr-to-name cache */ + struct addr { + struct addr *next; + uint32_t nip; + smallint is_host; + char name[1]; + }; + static struct addr *cache = NULL; + + struct addr *pn; + char *name; + uint32_t nip; + smallint is_host; + + if (s_in->sin_family != AF_INET) { +#ifdef DEBUG + bb_error_msg("rresolve: unsupported address family %d!", + s_in->sin_family); +#endif + errno = EAFNOSUPPORT; + return NULL; + } + nip = s_in->sin_addr.s_addr; +#ifdef DEBUG + bb_error_msg("rresolve: %08x mask:%08x num:%08x", (unsigned)nip, netmask, numeric); +#endif + if (numeric & 0x0FFF) + return xmalloc_sockaddr2dotted_noport((void*)s_in); + if (nip == INADDR_ANY) { + if (numeric & 0x8000) + return xstrdup("default"); + return xstrdup("*"); + } + + is_host = ((nip & (~netmask)) != 0 || (numeric & 0x4000)); + + pn = cache; + while (pn) { + if (pn->nip == nip && pn->is_host == is_host) { +#ifdef DEBUG + bb_error_msg("rresolve: found %s %08x in cache", + (is_host ? "host" : "net"), (unsigned)nip); +#endif + return xstrdup(pn->name); + } + pn = pn->next; + } + + name = NULL; + if (is_host) { +#ifdef DEBUG + bb_error_msg("sockaddr2host_noport(%08x)", (unsigned)nip); +#endif + name = xmalloc_sockaddr2host_noport((void*)s_in); + } else if (ENABLE_FEATURE_ETC_NETWORKS) { + struct netent *np; +#ifdef DEBUG + bb_error_msg("getnetbyaddr(%08x)", (unsigned)ntohl(nip)); +#endif + np = getnetbyaddr(ntohl(nip), AF_INET); + if (np) + name = xstrdup(np->n_name); + } + if (!name) + name = xmalloc_sockaddr2dotted_noport((void*)s_in); + + pn = xmalloc(sizeof(*pn) + strlen(name)); /* no '+ 1', it's already accounted for */ + pn->next = cache; + pn->nip = nip; + pn->is_host = is_host; + strcpy(pn->name, name); + cache = pn; + + return name; +} + +#if ENABLE_FEATURE_IPV6 + +int FAST_FUNC INET6_resolve(const char *name, struct sockaddr_in6 *sin6) +{ + struct addrinfo req, *ai = NULL; + int s; + + memset(&req, 0, sizeof(req)); + req.ai_family = AF_INET6; + s = getaddrinfo(name, NULL, &req, &ai); + if (s != 0) { + bb_error_msg("getaddrinfo: %s: %d", name, s); + return -1; + } + memcpy(sin6, ai->ai_addr, sizeof(*sin6)); + freeaddrinfo(ai); + return 0; +} + +#ifndef IN6_IS_ADDR_UNSPECIFIED +# define IN6_IS_ADDR_UNSPECIFIED(a) \ + (((uint32_t *) (a))[0] == 0 && ((uint32_t *) (a))[1] == 0 && \ + ((uint32_t *) (a))[2] == 0 && ((uint32_t *) (a))[3] == 0) +#endif + + +char* FAST_FUNC INET6_rresolve(struct sockaddr_in6 *sin6, int numeric) +{ + if (sin6->sin6_family != AF_INET6) { +#ifdef DEBUG + bb_error_msg("rresolve: unsupported address family %d!", + sin6->sin6_family); +#endif + errno = EAFNOSUPPORT; + return NULL; + } + if (numeric & 0x7FFF) { + return xmalloc_sockaddr2dotted_noport((void*)sin6); + } + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { + if (numeric & 0x8000) + return xstrdup("default"); + return xstrdup("*"); + } + + return xmalloc_sockaddr2host_noport((void*)sin6); +} + +#endif /* CONFIG_FEATURE_IPV6 */ diff --git a/probe-busybox/libbb/last_char_is.c b/probe-busybox/libbb/last_char_is.c new file mode 100644 index 00000000..65e6cdf0 --- /dev/null +++ b/probe-busybox/libbb/last_char_is.c @@ -0,0 +1,24 @@ +/* vi: set sw=4 ts=4: */ +/* + * busybox library eXtended function + * + * Copyright (C) 2001 Larry Doolittle, + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* Find out if the last character of a string matches the one given. + * Don't underrun the buffer if the string length is 0. + */ +char* FAST_FUNC last_char_is(const char *s, int c) +{ + if (s && *s) { + size_t sz = strlen(s) - 1; + s += sz; + if ( (unsigned char)*s == c) + return (char*)s; + } + return NULL; +} diff --git a/probe-busybox/libbb/llist.c b/probe-busybox/libbb/llist.c new file mode 100644 index 00000000..032e9fac --- /dev/null +++ b/probe-busybox/libbb/llist.c @@ -0,0 +1,98 @@ +/* vi: set sw=4 ts=4: */ +/* + * linked list helper functions. + * + * Copyright (C) 2003 Glenn McGrath + * Copyright (C) 2005 Vladimir Oleynik + * Copyright (C) 2005 Bernhard Reutner-Fischer + * Copyright (C) 2006 Rob Landley + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* Add data to the start of the linked list. */ +void FAST_FUNC llist_add_to(llist_t **old_head, void *data) +{ + llist_t *new_head = xmalloc(sizeof(llist_t)); + + new_head->data = data; + new_head->link = *old_head; + *old_head = new_head; +} + +/* Add data to the end of the linked list. */ +void FAST_FUNC llist_add_to_end(llist_t **list_head, void *data) +{ + while (*list_head) + list_head = &(*list_head)->link; + *list_head = xzalloc(sizeof(llist_t)); + (*list_head)->data = data; + /*(*list_head)->link = NULL;*/ +} + +/* Remove first element from the list and return it */ +void* FAST_FUNC llist_pop(llist_t **head) +{ + void *data = NULL; + llist_t *temp = *head; + + if (temp) { + data = temp->data; + *head = temp->link; + free(temp); + } + return data; +} + +/* Unlink arbitrary given element from the list */ +void FAST_FUNC llist_unlink(llist_t **head, llist_t *elm) +{ + if (!elm) + return; + while (*head) { + if (*head == elm) { + *head = (*head)->link; + break; + } + head = &(*head)->link; + } +} + +/* Recursively free all elements in the linked list. If freeit != NULL + * call it on each datum in the list */ +void FAST_FUNC llist_free(llist_t *elm, void (*freeit)(void *data)) +{ + while (elm) { + void *data = llist_pop(&elm); + + if (freeit) + freeit(data); + } +} + +/* Reverse list order. */ +llist_t* FAST_FUNC llist_rev(llist_t *list) +{ + llist_t *rev = NULL; + + while (list) { + llist_t *next = list->link; + + list->link = rev; + rev = list; + list = next; + } + return rev; +} + +llist_t* FAST_FUNC llist_find_str(llist_t *list, const char *str) +{ + while (list) { + if (strcmp(list->data, str) == 0) + break; + list = list->link; + } + return list; +} diff --git a/probe-busybox/libbb/login.c b/probe-busybox/libbb/login.c new file mode 100644 index 00000000..5a7acfcf --- /dev/null +++ b/probe-busybox/libbb/login.c @@ -0,0 +1,171 @@ +/* vi: set sw=4 ts=4: */ +/* + * issue.c: issue printing code + * + * Copyright (C) 2003 Bastian Blank + * + * Optimize and correcting OCRNL by Vladimir Oleynik + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" +/* After libbb.h, since it needs sys/types.h on some systems */ +#include + +#define LOGIN " login: " + +static const char fmtstr_d[] ALIGN1 = "%A, %d %B %Y"; + +void FAST_FUNC print_login_issue(const char *issue_file, const char *tty) +{ + FILE *fp; + int c; + char buf[256+1]; + const char *outbuf; + time_t t; + struct utsname uts; + + time(&t); + uname(&uts); + + puts("\r"); /* start a new line */ + + fp = fopen_for_read(issue_file); + if (!fp) + return; + while ((c = fgetc(fp)) != EOF) { + outbuf = buf; + buf[0] = c; + buf[1] = '\0'; + if (c == '\n') { + buf[1] = '\r'; + buf[2] = '\0'; + } + if (c == '\\' || c == '%') { + c = fgetc(fp); + switch (c) { +//From getty manpage (* - supported by us) +//======================================== +//4 or 4{interface} +// Insert the IPv4 address of the network interface (example: \4{eth0}). +// If the interface argument is not specified, then select the first +// fully configured (UP, non-LOOPBACK, RUNNING) interface. +//6 or 6{interface} -- The same as \4 but for IPv6. +//b -- Insert the baudrate of the current line. +//*d -- Insert the current date. +//*t -- Insert the current time. +//e or e{name} +// Translate the human-readable name to an escape sequence and insert it +// (for example: \e{red}Alert text.\e{reset}). If the name argument +// is not specified, then insert \033. The currently supported names are: +// black, blink, blue, bold, brown, cyan, darkgray, gray, green, halfbright, +// lightblue, lightcyan, lightgray, lightgreen, lightmagenta, lightred, +// magenta, red, reset, reverse, and yellow. Unknown names are ignored. +//*s +// Insert the system name (the name of the operating system - `uname -s`) +//*S or S{VARIABLE} +// Insert the VARIABLE data from /etc/os-release. +// If the VARIABLE argument is not specified, use PRETTY_NAME. +// If PRETTY_NAME is not in /etc/os-release, \S is the same as \s. +//*l -- Insert the name of the current tty line. +//*m -- Insert the architecture identifier of the machine: `uname -m`. +//*n -- Insert the nodename of the machine: `uname -n`. +//*o -- Insert the NIS domainname of the machine. Same as `hostname -d'. +//*O -- Insert the DNS domainname of the machine. +//*r -- Insert the release number of the OS: `uname -r`. +//u -- Insert the number of current users logged in. +//U -- Insert the string "1 user" or "N users" (current users logged in). +//*v -- Insert the version of the OS, e.g. the build-date etc: `uname -v`. +//We also implement: +//*D -- same as \O "DNS domainname" +//*h -- same as \n "nodename" + + case 'S': + /* minimal implementation, not reading /etc/os-release */ + /*FALLTHROUGH*/ + case 's': + outbuf = uts.sysname; + break; + case 'n': + case 'h': + outbuf = uts.nodename; + break; + case 'r': + outbuf = uts.release; + break; + case 'v': + outbuf = uts.version; + break; + case 'm': + outbuf = uts.machine; + break; +/* The field domainname of struct utsname is Linux specific. */ +#if defined(__linux__) + case 'D': + case 'o': + case 'O': + outbuf = uts.domainname; + break; +#endif + case 'd': + strftime(buf, sizeof(buf), fmtstr_d, localtime(&t)); + break; + case 't': + strftime_HHMMSS(buf, sizeof(buf), &t); + break; + case 'l': + outbuf = tty; + break; + default: + buf[0] = c; + } + } + fputs(outbuf, stdout); + } + fclose(fp); + fflush_all(); +} + +void FAST_FUNC print_login_prompt(void) +{ + char *hostname = safe_gethostname(); + + fputs(hostname, stdout); + fputs(LOGIN, stdout); + fflush_all(); + free(hostname); +} + +/* Clear dangerous stuff, set PATH */ +static const char forbid[] ALIGN1 = + "ENV" "\0" + "BASH_ENV" "\0" + "HOME" "\0" + "IFS" "\0" + "SHELL" "\0" + "LD_LIBRARY_PATH" "\0" + "LD_PRELOAD" "\0" + "LD_TRACE_LOADED_OBJECTS" "\0" + "LD_BIND_NOW" "\0" + "LD_AOUT_LIBRARY_PATH" "\0" + "LD_AOUT_PRELOAD" "\0" + "LD_NOWARN" "\0" + "LD_KEEPDIR" "\0"; + +int FAST_FUNC sanitize_env_if_suid(void) +{ + const char *p; + + if (getuid() == geteuid()) + return 0; + + p = forbid; + do { + unsetenv(p); + p += strlen(p) + 1; + } while (*p); + putenv((char*)bb_PATH_root_path); + + return 1; /* we indeed were run by different user! */ +} diff --git a/probe-busybox/libbb/match_fstype.c b/probe-busybox/libbb/match_fstype.c new file mode 100644 index 00000000..b066b421 --- /dev/null +++ b/probe-busybox/libbb/match_fstype.c @@ -0,0 +1,45 @@ +/* vi: set sw=4 ts=4: */ +/* + * Match fstypes for use in mount unmount + * We accept notmpfs,nfs but not notmpfs,nonfs + * This allows us to match fstypes that start with no like so + * mount -at ,noddy + * + * Returns 1 for a match, otherwise 0 + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +#ifdef HAVE_MNTENT_H + +int FAST_FUNC match_fstype(const struct mntent *mt, const char *t_fstype) +{ + int match = 1; + + if (!t_fstype) + return match; + + if (t_fstype[0] == 'n' && t_fstype[1] == 'o') { + match--; + t_fstype += 2; + } + + while (1) { + char *after_mnt_type = is_prefixed_with(t_fstype, mt->mnt_type); + if (after_mnt_type + && (*after_mnt_type == '\0' || *after_mnt_type == ',') + ) { + return match; + } + t_fstype = strchr(t_fstype, ','); + if (!t_fstype) + break; + t_fstype++; + } + + return !match; +} + +#endif /* HAVE_MNTENT_H */ diff --git a/probe-busybox/libbb/messages.c b/probe-busybox/libbb/messages.c new file mode 100644 index 00000000..cb0836de --- /dev/null +++ b/probe-busybox/libbb/messages.c @@ -0,0 +1,61 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* allow default system PATH to be extended via CFLAGS */ +#ifndef BB_ADDITIONAL_PATH +#define BB_ADDITIONAL_PATH "" +#endif + +/* allow version to be extended, via CFLAGS */ +#ifndef BB_EXTRA_VERSION +#define BB_EXTRA_VERSION BB_BT +#endif + +#define BANNER "BusyBox v" BB_VER " (" BB_EXTRA_VERSION ")" + +const char bb_banner[] ALIGN1 = BANNER; + + +const char bb_msg_memory_exhausted[] ALIGN1 = "out of memory"; +const char bb_msg_invalid_date[] ALIGN1 = "invalid date '%s'"; +const char bb_msg_unknown[] ALIGN1 = "(unknown)"; +const char bb_msg_can_not_create_raw_socket[] ALIGN1 = "can't create raw socket"; +const char bb_msg_perm_denied_are_you_root[] ALIGN1 = "permission denied (are you root?)"; +const char bb_msg_you_must_be_root[] ALIGN1 = "you must be root"; +const char bb_msg_requires_arg[] ALIGN1 = "%s requires an argument"; +const char bb_msg_invalid_arg_to[] ALIGN1 = "invalid argument '%s' to '%s'"; +const char bb_msg_standard_input[] ALIGN1 = "standard input"; +const char bb_msg_standard_output[] ALIGN1 = "standard output"; + +const char bb_hexdigits_upcase[] ALIGN1 = "0123456789ABCDEF"; + +const char bb_busybox_exec_path[] ALIGN1 = CONFIG_BUSYBOX_EXEC_PATH; +const char bb_default_login_shell[] ALIGN1 = LIBBB_DEFAULT_LOGIN_SHELL; +/* util-linux manpage says /sbin:/bin:/usr/sbin:/usr/bin, + * but I want to save a few bytes here. Check libbb.h before changing! */ +const char bb_PATH_root_path[] ALIGN1 = + "PATH=/sbin:/usr/sbin:/bin:/usr/bin" BB_ADDITIONAL_PATH; + + +//const int const_int_1 = 1; +/* explicitly = 0, otherwise gcc may make it a common variable + * and it will end up in bss */ +const int const_int_0 = 0; + +#if ENABLE_FEATURE_WTMP +/* This is usually something like "/var/adm/wtmp" or "/var/log/wtmp" */ +const char bb_path_wtmp_file[] ALIGN1 = +# if defined _PATH_WTMP + _PATH_WTMP; +# elif defined WTMP_FILE + WTMP_FILE; +# else +# error unknown path to wtmp file +# endif +#endif diff --git a/probe-busybox/libbb/parse_config.c b/probe-busybox/libbb/parse_config.c new file mode 100644 index 00000000..a138f109 --- /dev/null +++ b/probe-busybox/libbb/parse_config.c @@ -0,0 +1,243 @@ +/* vi: set sw=4 ts=4: */ +/* + * config file parser helper + * + * Copyright (C) 2008 by Vladimir Dronnikov + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + * Also for use in uClibc (http://uclibc.org/) licensed under LGPLv2.1 or later. + */ + +/* Uncomment to enable test applet */ +////config:config PARSE +////config: bool "Uniform config file parser debugging applet: parse" +////config: default n +////config: help +////config: Typical usage of parse API: +////config: char *t[3]; +////config: parser_t *p = config_open(filename); +////config: while (config_read(p, t, 3, 0, delimiters, flags)) { // 1..3 tokens +////config: bb_error_msg("TOKENS: '%s''%s''%s'", t[0], t[1], t[2]); +////config: } +////config: config_close(p); + +////applet:IF_PARSE(APPLET(parse, BB_DIR_ROOT, BB_SUID_DROP)) + +//kbuild:lib-y += parse_config.o + +//usage:#define parse_trivial_usage +//usage: "[-x] [-n MAXTOKENS] [-m MINTOKENS] [-d DELIMS] [-f FLAGS] FILE..." +//usage:#define parse_full_usage "\n\n" +//usage: " -x Suppress output (for benchmarking)" + +#include "libbb.h" + +#if defined ENABLE_PARSE && ENABLE_PARSE +int parse_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; +int parse_main(int argc UNUSED_PARAM, char **argv) +{ + const char *delims = "# \t"; + char **t; + unsigned flags = PARSE_NORMAL; + int mintokens = 0, ntokens = 128; + unsigned noout; + + opt_complementary = "-1"; + noout = 1 & getopt32(argv, "xn:+m:+d:f:+", &ntokens, &mintokens, &delims, &flags); + //argc -= optind; + argv += optind; + + t = xmalloc(sizeof(t[0]) * ntokens); + while (*argv) { + int n; + parser_t *p = config_open(*argv); + while ((n = config_read(p, t, ntokens, mintokens, delims, flags)) != 0) { + if (!noout) { + for (int i = 0; i < n; ++i) + printf("[%s]", t[i]); + puts(""); + } + } + config_close(p); + argv++; + } + return EXIT_SUCCESS; +} +#endif + +parser_t* FAST_FUNC config_open2(const char *filename, FILE* FAST_FUNC (*fopen_func)(const char *path)) +{ + FILE* fp; + parser_t *parser; + + fp = fopen_func(filename); + if (!fp) + return NULL; + parser = xzalloc(sizeof(*parser)); + parser->fp = fp; + return parser; +} + +parser_t* FAST_FUNC config_open(const char *filename) +{ + return config_open2(filename, fopen_or_warn_stdin); +} + +void FAST_FUNC config_close(parser_t *parser) +{ + if (parser) { + if (PARSE_KEEP_COPY) /* compile-time constant */ + free(parser->data); + fclose(parser->fp); + free(parser->line); + free(parser->nline); + free(parser); + } +} + +/* This function reads an entire line from a text file, + * up to a newline, exclusive. + * Trailing '\' is recognized as line continuation. + * Returns -1 if EOF/error. + */ +static int get_line_with_continuation(parser_t *parser) +{ + ssize_t len, nlen; + char *line; + + len = getline(&parser->line, &parser->line_alloc, parser->fp); + if (len <= 0) + return len; + + line = parser->line; + for (;;) { + parser->lineno++; + if (line[len - 1] == '\n') + len--; + if (len == 0 || line[len - 1] != '\\') + break; + len--; + + nlen = getline(&parser->nline, &parser->nline_alloc, parser->fp); + if (nlen <= 0) + break; + + if (parser->line_alloc < len + nlen + 1) { + parser->line_alloc = len + nlen + 1; + line = parser->line = xrealloc(line, parser->line_alloc); + } + memcpy(&line[len], parser->nline, nlen); + len += nlen; + } + + line[len] = '\0'; + return len; +} + + +/* +0. If parser is NULL return 0. +1. Read a line from config file. If nothing to read then return 0. + Handle continuation character. Advance lineno for each physical line. + Discard everything past comment character. +2. if PARSE_TRIM is set (default), remove leading and trailing delimiters. +3. If resulting line is empty goto 1. +4. Look for first delimiter. If !PARSE_COLLAPSE or !PARSE_TRIM is set then + remember the token as empty. +5. Else (default) if number of seen tokens is equal to max number of tokens + (token is the last one) and PARSE_GREEDY is set then the remainder + of the line is the last token. + Else (token is not last or PARSE_GREEDY is not set) just replace + first delimiter with '\0' thus delimiting the token. +6. Advance line pointer past the end of token. If number of seen tokens + is less than required number of tokens then goto 4. +7. Check the number of seen tokens is not less the min number of tokens. + Complain or die otherwise depending on PARSE_MIN_DIE. +8. Return the number of seen tokens. + +mintokens > 0 make config_read() print error message if less than mintokens +(but more than 0) are found. Empty lines are always skipped (not warned about). +*/ +#undef config_read +int FAST_FUNC config_read(parser_t *parser, char **tokens, unsigned flags, const char *delims) +{ + char *line; + int ntokens, mintokens; + int t; + + if (!parser) + return 0; + + ntokens = (uint8_t)flags; + mintokens = (uint8_t)(flags >> 8); + + again: + memset(tokens, 0, sizeof(tokens[0]) * ntokens); + + /* Read one line (handling continuations with backslash) */ + if (get_line_with_continuation(parser) < 0) + return 0; + + line = parser->line; + + /* Skip token in the start of line? */ + if (flags & PARSE_TRIM) + line += strspn(line, delims + 1); + + if (line[0] == '\0' || line[0] == delims[0]) + goto again; + + if (flags & PARSE_KEEP_COPY) { + free(parser->data); + parser->data = xstrdup(line); + } + + /* Tokenize the line */ + t = 0; + do { + /* Pin token */ + tokens[t] = line; + + /* Combine remaining arguments? */ + if ((t != (ntokens-1)) || !(flags & PARSE_GREEDY)) { + /* Vanilla token, find next delimiter */ + line += strcspn(line, delims[0] ? delims : delims + 1); + } else { + /* Combining, find comment char if any */ + line = strchrnul(line, PARSE_EOL_COMMENTS ? delims[0] : '\0'); + + /* Trim any extra delimiters from the end */ + if (flags & PARSE_TRIM) { + while (strchr(delims + 1, line[-1]) != NULL) + line--; + } + } + + /* Token not terminated? */ + if (*line == delims[0]) + *line = '\0'; + else if (*line != '\0') + *line++ = '\0'; + +#if 0 /* unused so far */ + if (flags & PARSE_ESCAPE) { + strcpy_and_process_escape_sequences(tokens[t], tokens[t]); + } +#endif + /* Skip possible delimiters */ + if (flags & PARSE_COLLAPSE) + line += strspn(line, delims + 1); + + t++; + } while (*line && *line != delims[0] && t < ntokens); + + if (t < mintokens) { + bb_error_msg("bad line %u: %d tokens found, %d needed", + parser->lineno, t, mintokens); + if (flags & PARSE_MIN_DIE) + xfunc_die(); + goto again; + } + + return t; +} diff --git a/probe-busybox/libbb/perror_msg.c b/probe-busybox/libbb/perror_msg.c new file mode 100644 index 00000000..fa1f0d33 --- /dev/null +++ b/probe-busybox/libbb/perror_msg.c @@ -0,0 +1,40 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#include "libbb.h" + +void FAST_FUNC bb_perror_msg(const char *s, ...) +{ + va_list p; + + va_start(p, s); + /* Guard against ": Success" */ + bb_verror_msg(s, p, errno ? strerror(errno) : NULL); + va_end(p); +} + +void FAST_FUNC bb_perror_msg_and_die(const char *s, ...) +{ + va_list p; + + va_start(p, s); + /* Guard against ": Success" */ + bb_verror_msg(s, p, errno ? strerror(errno) : NULL); + va_end(p); + xfunc_die(); +} + +void FAST_FUNC bb_simple_perror_msg(const char *s) +{ + bb_perror_msg("%s", s); +} + +void FAST_FUNC bb_simple_perror_msg_and_die(const char *s) +{ + bb_perror_msg_and_die("%s", s); +} diff --git a/probe-busybox/libbb/pidfile.c b/probe-busybox/libbb/pidfile.c new file mode 100644 index 00000000..89e700fa --- /dev/null +++ b/probe-busybox/libbb/pidfile.c @@ -0,0 +1,89 @@ +/* vi: set sw=4 ts=4: */ +/* + * pid file routines + * + * Copyright (C) 2007 by Stephane Billiart + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +/* Override ENABLE_FEATURE_PIDFILE */ +#define WANT_PIDFILE 1 +#include "libbb.h" +#include +#include +#include + +smallint wrote_pidfile; + +int FAST_FUNC write_pidfile(const char *path) +{ + int pid_fd; + char *end; + char buf[sizeof(int)*3 + 2]; + struct stat sb; + ssize_t written; + int errno_save; + + written = 0; + + if (!path) { + errno = EINVAL; + return -1; + } + + /* we will overwrite stale pidfile */ + pid_fd = open(path, O_WRONLY|O_CREAT|O_TRUNC, 0666); + if (pid_fd < 0) { + /* errno is updated by open() */ + return -1; + } + + /* path can be "/dev/null"! Test for such cases */ + wrote_pidfile = (fstat(pid_fd, &sb) == 0) && S_ISREG(sb.st_mode); + + if (wrote_pidfile) { + /* few bytes larger, but doesn't use stdio */ + end = utoa_to_buf(getpid(), buf, sizeof(buf)); + *end = '\n'; + written = full_write(pid_fd, buf, end - buf + 1); + if ( !written ) + errno = ENOSPC; + } + errno_save = errno; + close(pid_fd); + errno = errno_save; + + return written > 0 ? 0 : -1; +} + +int FAST_FUNC check_pidfile(const char* path) +{ + int r; + pid_t pid; + char buf[sizeof(int)*3]; + + r = -1; + + if (path == NULL) { + errno = ENOENT; + goto exit; + } + + if (open_read_close(path, buf, sizeof(buf)) < 0) + goto exit; + + errno = 0; + pid = strtol(buf, NULL, 10); + if (errno) + goto exit; + + r = kill(pid, 0); + if ((r < 0) && (errno == EPERM)) { + errno = 0; + r = 0; + } + +exit: + return r; +} diff --git a/probe-busybox/libbb/ptr_to_globals.c b/probe-busybox/libbb/ptr_to_globals.c new file mode 100644 index 00000000..1074538f --- /dev/null +++ b/probe-busybox/libbb/ptr_to_globals.c @@ -0,0 +1,35 @@ +/* vi: set sw=4 ts=4: */ +/* + * Copyright (C) 2008 by Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +#include + +struct globals; + +#ifndef GCC_COMBINE + +/* We cheat here. It is declared as const ptr in libbb.h, + * but here we make it live in R/W memory */ +struct globals *ptr_to_globals; + +#ifdef __GLIBC__ +int *bb_errno; +#endif + + +#else + + +/* gcc -combine will see through and complain */ +/* Using alternative method which is more likely to break + * on weird architectures, compilers, linkers and so on */ +struct globals *const ptr_to_globals __attribute__ ((section (".data"))); + +#ifdef __GLIBC__ +int *const bb_errno __attribute__ ((section (".data"))); +#endif + +#endif diff --git a/probe-busybox/libbb/read.c b/probe-busybox/libbb/read.c new file mode 100644 index 00000000..6ffb35f3 --- /dev/null +++ b/probe-busybox/libbb/read.c @@ -0,0 +1,72 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#include "libbb.h" + +ssize_t FAST_FUNC safe_read(int fd, void *buf, size_t count) +{ + ssize_t n; + + do { + n = read(fd, buf, count); + } while (n < 0 && errno == EINTR); + + return n; +} + +/* + * Read all of the supplied buffer from a file. + * This does multiple reads as necessary. + * Returns the amount read, or -1 on an error. + * A short read is returned on an end of file. + */ +ssize_t FAST_FUNC full_read(int fd, void *buf, size_t len) +{ + ssize_t cc; + ssize_t total; + + total = 0; + + while (len) { + cc = safe_read(fd, buf, len); + + if (cc < 0) { + if (total) { + /* we already have some! */ + /* user can do another read to know the error code */ + return total; + } + return cc; /* read() returns -1 on failure. */ + } + if (cc == 0) + break; + buf = ((char *)buf) + cc; + total += cc; + len -= cc; + } + + return total; +} + +ssize_t FAST_FUNC read_close(int fd, void *buf, size_t size) +{ + int e; + size = full_read(fd, buf, size); + e = errno; + close(fd); + errno = e; + return size; +} + +ssize_t FAST_FUNC open_read_close(const char *filename, void *buf, size_t size) +{ + int fd = open(filename, O_RDONLY); + if (fd < 0) + return fd; + return read_close(fd, buf, size); +} diff --git a/probe-busybox/libbb/read_printf.c b/probe-busybox/libbb/read_printf.c new file mode 100644 index 00000000..b6a17cc3 --- /dev/null +++ b/probe-busybox/libbb/read_printf.c @@ -0,0 +1,236 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#include "libbb.h" + + +/* Suppose that you are a shell. You start child processes. + * They work and eventually exit. You want to get user input. + * You read stdin. But what happens if last child switched + * its stdin into O_NONBLOCK mode? + * + * *** SURPRISE! It will affect the parent too! *** + * *** BIG SURPRISE! It stays even after child exits! *** + * + * This is a design bug in UNIX API. + * fcntl(0, F_SETFL, fcntl(0, F_GETFL) | O_NONBLOCK); + * will set nonblocking mode not only on _your_ stdin, but + * also on stdin of your parent, etc. + * + * In general, + * fd2 = dup(fd1); + * fcntl(fd2, F_SETFL, fcntl(fd2, F_GETFL) | O_NONBLOCK); + * sets both fd1 and fd2 to O_NONBLOCK. This includes cases + * where duping is done implicitly by fork() etc. + * + * We need + * fcntl(fd2, F_SETFD, fcntl(fd2, F_GETFD) | O_NONBLOCK); + * (note SETFD, not SETFL!) but such thing doesn't exist. + * + * Alternatively, we need nonblocking_read(fd, ...) which doesn't + * require O_NONBLOCK dance at all. Actually, it exists: + * n = recv(fd, buf, len, MSG_DONTWAIT); + * "MSG_DONTWAIT: + * Enables non-blocking operation; if the operation + * would block, EAGAIN is returned." + * but recv() works only for sockets! + * + * So far I don't see any good solution, I can only propose + * that affected readers should be careful and use this routine, + * which detects EAGAIN and uses poll() to wait on the fd. + * Thankfully, poll() doesn't care about O_NONBLOCK flag. + */ +ssize_t FAST_FUNC nonblock_immune_read(int fd, void *buf, size_t count) +{ + struct pollfd pfd[1]; + ssize_t n; + + while (1) { + n = safe_read(fd, buf, count); + if (n >= 0 || errno != EAGAIN) + return n; + /* fd is in O_NONBLOCK mode. Wait using poll and repeat */ + pfd[0].fd = fd; + pfd[0].events = POLLIN; + /* note: safe_poll pulls in printf */ + safe_poll(pfd, 1, -1); + } +} + +// Reads one line a-la fgets (but doesn't save terminating '\n'). +// Reads byte-by-byte. Useful when it is important to not read ahead. +// Bytes are appended to pfx (which must be malloced, or NULL). +char* FAST_FUNC xmalloc_reads(int fd, size_t *maxsz_p) +{ + char *p; + char *buf = NULL; + size_t sz = 0; + size_t maxsz = maxsz_p ? *maxsz_p : (INT_MAX - 4095); + + goto jump_in; + + while (sz < maxsz) { + if ((size_t)(p - buf) == sz) { + jump_in: + buf = xrealloc(buf, sz + 128); + p = buf + sz; + sz += 128; + } + if (nonblock_immune_read(fd, p, 1) != 1) { + /* EOF/error */ + if (p == buf) { /* we read nothing */ + free(buf); + return NULL; + } + break; + } + if (*p == '\n') + break; + p++; + } + *p = '\0'; + if (maxsz_p) + *maxsz_p = p - buf; + p++; + return xrealloc(buf, p - buf); +} + +// Read (potentially big) files in one go. File size is estimated +// by stat. Extra '\0' byte is appended. +void* FAST_FUNC xmalloc_read(int fd, size_t *maxsz_p) +{ + char *buf; + size_t size, rd_size, total; + size_t to_read; + struct stat st; + + to_read = maxsz_p ? *maxsz_p : (INT_MAX - 4095); /* max to read */ + + /* Estimate file size */ + st.st_size = 0; /* in case fstat fails, assume 0 */ + fstat(fd, &st); + /* /proc/N/stat files report st_size 0 */ + /* In order to make such files readable, we add small const */ + size = (st.st_size | 0x3ff) + 1; + + total = 0; + buf = NULL; + while (1) { + if (to_read < size) + size = to_read; + buf = xrealloc(buf, total + size + 1); + rd_size = full_read(fd, buf + total, size); + if ((ssize_t)rd_size == (ssize_t)(-1)) { /* error */ + free(buf); + return NULL; + } + total += rd_size; + if (rd_size < size) /* EOF */ + break; + if (to_read <= rd_size) + break; + to_read -= rd_size; + /* grow by 1/8, but in [1k..64k] bounds */ + size = ((total / 8) | 0x3ff) + 1; + if (size > 64*1024) + size = 64*1024; + } + buf = xrealloc(buf, total + 1); + buf[total] = '\0'; + + if (maxsz_p) + *maxsz_p = total; + return buf; +} + +#ifdef USING_LSEEK_TO_GET_SIZE +/* Alternatively, file size can be obtained by lseek to the end. + * The code is slightly bigger. Retained in case fstat approach + * will not work for some weird cases (/proc, block devices, etc). + * (NB: lseek also can fail to work for some weird files) */ + +// Read (potentially big) files in one go. File size is estimated by +// lseek to end. +void* FAST_FUNC xmalloc_open_read_close(const char *filename, size_t *maxsz_p) +{ + char *buf; + size_t size; + int fd; + off_t len; + + fd = open(filename, O_RDONLY); + if (fd < 0) + return NULL; + + /* /proc/N/stat files report len 0 here */ + /* In order to make such files readable, we add small const */ + size = 0x3ff; /* read only 1k on unseekable files */ + len = lseek(fd, 0, SEEK_END) | 0x3ff; /* + up to 1k */ + if (len != (off_t)-1) { + xlseek(fd, 0, SEEK_SET); + size = maxsz_p ? *maxsz_p : (INT_MAX - 4095); + if (len < size) + size = len; + } + + buf = xmalloc(size + 1); + size = read_close(fd, buf, size); + if ((ssize_t)size < 0) { + free(buf); + return NULL; + } + buf = xrealloc(buf, size + 1); + buf[size] = '\0'; + + if (maxsz_p) + *maxsz_p = size; + return buf; +} +#endif + +// Read (potentially big) files in one go. File size is estimated +// by stat. +void* FAST_FUNC xmalloc_open_read_close(const char *filename, size_t *maxsz_p) +{ + char *buf; + int fd; + + fd = open(filename, O_RDONLY); + if (fd < 0) + return NULL; + + buf = xmalloc_read(fd, maxsz_p); + close(fd); + return buf; +} + +/* Die with an error message if we can't read the entire buffer. */ +void FAST_FUNC xread(int fd, void *buf, size_t count) +{ + if (count) { + ssize_t size = full_read(fd, buf, count); + if ((size_t)size != count) + bb_error_msg_and_die("short read"); + } +} + +/* Die with an error message if we can't read one character. */ +unsigned char FAST_FUNC xread_char(int fd) +{ + char tmp; + xread(fd, &tmp, 1); + return tmp; +} + +void* FAST_FUNC xmalloc_xopen_read_close(const char *filename, size_t *maxsz_p) +{ + void *buf = xmalloc_open_read_close(filename, maxsz_p); + if (!buf) + bb_perror_msg_and_die("can't read '%s'", filename); + return buf; +} diff --git a/probe-busybox/libbb/route_set_flags.c b/probe-busybox/libbb/route_set_flags.c new file mode 100644 index 00000000..68c74cda --- /dev/null +++ b/probe-busybox/libbb/route_set_flags.c @@ -0,0 +1,42 @@ +#include +#include + +#include "libbb.h" + +static const +IF_NOT_FEATURE_IPV6(uint16_t) +IF_FEATURE_IPV6(unsigned) +flagvals[] = { /* Must agree with flagchars[]. */ + RTF_UP, + RTF_GATEWAY, + RTF_HOST, + RTF_REINSTATE, + RTF_DYNAMIC, + RTF_MODIFIED, +#if ENABLE_FEATURE_IPV6 + RTF_DEFAULT, + RTF_ADDRCONF, + RTF_CACHE, + RTF_REJECT, + RTF_NONEXTHOP, /* this one doesn't fit into 16 bits */ +#endif +}; +/* Must agree with flagvals[]. */ +static const char flagchars[] ALIGN1 = + "UGHRDM" +#if ENABLE_FEATURE_IPV6 + "DAC!n" +#endif +; + +void route_set_flags(char *flagstr, int flags) +{ + int i; + + for (i = 0; (*flagstr = flagchars[i]) != 0; i++) { + if (flags & flagvals[i]) { + ++flagstr; + } + } +} + diff --git a/probe-busybox/libbb/safe_gethostname.c b/probe-busybox/libbb/safe_gethostname.c new file mode 100644 index 00000000..cac99ae0 --- /dev/null +++ b/probe-busybox/libbb/safe_gethostname.c @@ -0,0 +1,52 @@ +/* vi: set sw=4 ts=4: */ +/* + * Safe gethostname implementation for busybox + * + * Copyright (C) 2008 Tito Ragusa + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +/* + * SUSv2 guarantees that "Host names are limited to 255 bytes" + * POSIX.1-2001 guarantees that "Host names (not including the terminating + * null byte) are limited to HOST_NAME_MAX bytes" (64 bytes on my box). + * + * RFC1123 says: + * + * The syntax of a legal Internet host name was specified in RFC-952 + * [DNS:4]. One aspect of host name syntax is hereby changed: the + * restriction on the first character is relaxed to allow either a + * letter or a digit. Host software MUST support this more liberal + * syntax. + * + * Host software MUST handle host names of up to 63 characters and + * SHOULD handle host names of up to 255 characters. + */ + +#include "libbb.h" +#include + +/* + * On success return the current malloced and NUL terminated hostname. + * On error return malloced and NUL terminated string "?". + * This is an illegal first character for a hostname. + * The returned malloced string must be freed by the caller. + */ +char* FAST_FUNC safe_gethostname(void) +{ + struct utsname uts; + + /* The length of the arrays in a struct utsname is unspecified; + * the fields are terminated by a null byte. + * Note that there is no standard that says that the hostname + * set by sethostname(2) is the same string as the nodename field of the + * struct returned by uname (indeed, some systems allow a 256-byte host- + * name and an 8-byte nodename), but this is true on Linux. The same holds + * for setdomainname(2) and the domainname field. + */ + + /* Uname can fail only if you pass a bad pointer to it. */ + uname(&uts); + return xstrndup(!uts.nodename[0] ? "?" : uts.nodename, sizeof(uts.nodename)); +} diff --git a/probe-busybox/libbb/safe_strncpy.c b/probe-busybox/libbb/safe_strncpy.c new file mode 100644 index 00000000..5eb0db0b --- /dev/null +++ b/probe-busybox/libbb/safe_strncpy.c @@ -0,0 +1,32 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* Like strncpy but make sure the resulting string is always 0 terminated. */ +char* FAST_FUNC safe_strncpy(char *dst, const char *src, size_t size) +{ + if (!size) return dst; + dst[--size] = '\0'; + return strncpy(dst, src, size); +} + +/* Like strcpy but can copy overlapping strings. */ +void FAST_FUNC overlapping_strcpy(char *dst, const char *src) +{ + /* Cheap optimization for dst == src case - + * better to have it here than in many callers. + */ + if (dst != src) { + while ((*dst = *src) != '\0') { + dst++; + src++; + } + } +} diff --git a/probe-busybox/libbb/safe_write.c b/probe-busybox/libbb/safe_write.c new file mode 100644 index 00000000..8f762801 --- /dev/null +++ b/probe-busybox/libbb/safe_write.c @@ -0,0 +1,21 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +ssize_t FAST_FUNC safe_write(int fd, const void *buf, size_t count) +{ + ssize_t n; + + do { + n = write(fd, buf, count); + } while (n < 0 && errno == EINTR); + + return n; +} diff --git a/probe-busybox/libbb/signals.c b/probe-busybox/libbb/signals.c new file mode 100644 index 00000000..56512473 --- /dev/null +++ b/probe-busybox/libbb/signals.c @@ -0,0 +1,121 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * Copyright (C) 2006 Rob Landley + * Copyright (C) 2006 Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* All known arches use small ints for signals */ +smallint bb_got_signal; + +void record_signo(int signo) +{ + bb_got_signal = signo; +} + +/* Saves 2 bytes on x86! Oh my... */ +int FAST_FUNC sigaction_set(int signum, const struct sigaction *act) +{ + return sigaction(signum, act, NULL); +} + +int FAST_FUNC sigprocmask_allsigs(int how) +{ + sigset_t set; + sigfillset(&set); + return sigprocmask(how, &set, NULL); +} + +void FAST_FUNC bb_signals(int sigs, void (*f)(int)) +{ + int sig_no = 0; + int bit = 1; + + while (sigs) { + if (sigs & bit) { + sigs -= bit; + signal(sig_no, f); + } + sig_no++; + bit <<= 1; + } +} + +void FAST_FUNC bb_signals_recursive_norestart(int sigs, void (*f)(int)) +{ + int sig_no = 0; + int bit = 1; + struct sigaction sa; + + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = f; + /*sa.sa_flags = 0;*/ + /*sigemptyset(&sa.sa_mask); - hope memset did it*/ + + while (sigs) { + if (sigs & bit) { + sigs -= bit; + sigaction_set(sig_no, &sa); + } + sig_no++; + bit <<= 1; + } +} + +void FAST_FUNC sig_block(int sig) +{ + sigset_t ss; + sigemptyset(&ss); + sigaddset(&ss, sig); + sigprocmask(SIG_BLOCK, &ss, NULL); +} + +void FAST_FUNC sig_unblock(int sig) +{ + sigset_t ss; + sigemptyset(&ss); + sigaddset(&ss, sig); + sigprocmask(SIG_UNBLOCK, &ss, NULL); +} + +void FAST_FUNC wait_for_any_sig(void) +{ + sigset_t ss; + sigemptyset(&ss); + sigsuspend(&ss); +} + +/* Assuming the sig is fatal */ +void FAST_FUNC kill_myself_with_sig(int sig) +{ + signal(sig, SIG_DFL); + sig_unblock(sig); + raise(sig); + _exit(sig | 128); /* Should not reach it */ +} + +void FAST_FUNC signal_SA_RESTART_empty_mask(int sig, void (*handler)(int)) +{ + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + /*sigemptyset(&sa.sa_mask);*/ + sa.sa_flags = SA_RESTART; + sa.sa_handler = handler; + sigaction_set(sig, &sa); +} + +void FAST_FUNC signal_no_SA_RESTART_empty_mask(int sig, void (*handler)(int)) +{ + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + /*sigemptyset(&sa.sa_mask);*/ + /*sa.sa_flags = 0;*/ + sa.sa_handler = handler; + sigaction_set(sig, &sa); +} diff --git a/probe-busybox/libbb/skip_whitespace.c b/probe-busybox/libbb/skip_whitespace.c new file mode 100644 index 00000000..b6cfbba4 --- /dev/null +++ b/probe-busybox/libbb/skip_whitespace.c @@ -0,0 +1,39 @@ +/* vi: set sw=4 ts=4: */ +/* + * skip_whitespace implementation for busybox + * + * Copyright (C) 2003 Manuel Novoa III + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +char* FAST_FUNC skip_whitespace(const char *s) +{ + /* In POSIX/C locale (the only locale we care about: do we REALLY want + * to allow Unicode whitespace in, say, .conf files? nuts!) + * isspace is only these chars: "\t\n\v\f\r" and space. + * "\t\n\v\f\r" happen to have ASCII codes 9,10,11,12,13. + * Use that. + */ + while (*s == ' ' || (unsigned char)(*s - 9) <= (13 - 9)) + s++; + + return (char *) s; +} + +char* FAST_FUNC skip_non_whitespace(const char *s) +{ + while (*s != '\0' && *s != ' ' && (unsigned char)(*s - 9) > (13 - 9)) + s++; + + return (char *) s; +} + +char* FAST_FUNC skip_dev_pfx(const char *tty_name) +{ + if (is_prefixed_with(tty_name, "/dev/")) + tty_name += 5; + return (char*)tty_name; +} diff --git a/probe-busybox/libbb/strlcat.c b/probe-busybox/libbb/strlcat.c new file mode 100644 index 00000000..6dd77475 --- /dev/null +++ b/probe-busybox/libbb/strlcat.c @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2002 Manuel Novoa III + * Copyright (C) 2000-2005 Erik Andersen + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ + +/* OpenBSD function: + * Append at most n-1-strlen(dst) chars from src to dst and nul-terminate dst. + * Returns strlen(src) + strlen({original} dst), so truncation occurred if the + * return val is >= n. + * Note: If dst doesn't contain a nul in the first n chars, strlen(dst) is + * taken as n. */ + +#include "libbb.h" + +size_t strlcat(register char *__restrict dst, + register const char *__restrict src, + size_t n) +{ + size_t len; + char dummy[1]; + + len = 0; + + while (1) { + if (len >= n) { + dst = dummy; + break; + } + if (!*dst) { + break; + } + ++dst; + ++len; + } + + while ((*dst = *src) != 0) { + if (++len < n) { + ++dst; + } + ++src; + } + + return len; +} diff --git a/probe-busybox/libbb/strlcpy.c b/probe-busybox/libbb/strlcpy.c new file mode 100644 index 00000000..02cd55a8 --- /dev/null +++ b/probe-busybox/libbb/strlcpy.c @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2002 Manuel Novoa III + * Copyright (C) 2000-2005 Erik Andersen + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ + +#include "libbb.h" + +#ifdef WANT_WIDE +# define Wstrlcpy __wcslcpy +# define Wstrxfrm wcsxfrm +#else +// libc_hidden_proto(strlcpy) +# define Wstrlcpy strlcpy +# define Wstrxfrm strxfrm +# define Wchar char +#endif + + +/* OpenBSD function: + * Copy at most n-1 chars from src to dst and nul-terminate dst. + * Returns strlen(src), so truncation occurred if the return value is >= n. */ + +#ifdef WANT_WIDE +size_t Wstrlcpy(register Wchar *__restrict dst, + register const Wchar *__restrict src, + size_t n) attribute_hidden; +#endif +size_t Wstrlcpy(register Wchar *__restrict dst, + register const Wchar *__restrict src, + size_t n) +{ + const Wchar *src0 = src; + Wchar dummy[1]; + + if (!n) { + dst = dummy; + } else { + --n; + } + + while ((*dst = *src) != 0) { + if (n) { + --n; + ++dst; + } + ++src; + } + + return src - src0; +} +#ifndef WANT_WIDE +//libc_hidden_def(strlcpy) +#ifndef __UCLIBC_HAS_LOCALE__ +//libc_hidden_proto(strxfrm) +//strong_alias(strlcpy,strxfrm) +//libc_hidden_def(strxfrm) +#endif +#else +#ifndef __UCLIBC_HAS_LOCALE__ +strong_alias(__wcslcpy,wcsxfrm) +#endif +#endif diff --git a/probe-busybox/libbb/time.c b/probe-busybox/libbb/time.c new file mode 100644 index 00000000..99526776 --- /dev/null +++ b/probe-busybox/libbb/time.c @@ -0,0 +1,299 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 2007 Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#include "libbb.h" + +void FAST_FUNC parse_datestr(const char *date_str, struct tm *ptm) +{ + char end = '\0'; + const char *last_colon = strrchr(date_str, ':'); + + if (last_colon != NULL) { + /* Parse input and assign appropriately to ptm */ +#if ENABLE_DESKTOP + const char *endp; +#endif + + /* HH:MM */ + if (sscanf(date_str, "%u:%u%c", + &ptm->tm_hour, + &ptm->tm_min, + &end) >= 2 + ) { + /* no adjustments needed */ + } else + /* mm.dd-HH:MM */ + if (sscanf(date_str, "%u.%u-%u:%u%c", + &ptm->tm_mon, &ptm->tm_mday, + &ptm->tm_hour, &ptm->tm_min, + &end) >= 4 + ) { + /* Adjust month from 1-12 to 0-11 */ + ptm->tm_mon -= 1; + } else + /* yyyy.mm.dd-HH:MM */ + if (sscanf(date_str, "%u.%u.%u-%u:%u%c", &ptm->tm_year, + &ptm->tm_mon, &ptm->tm_mday, + &ptm->tm_hour, &ptm->tm_min, + &end) >= 5 + /* yyyy-mm-dd HH:MM */ + || sscanf(date_str, "%u-%u-%u %u:%u%c", &ptm->tm_year, + &ptm->tm_mon, &ptm->tm_mday, + &ptm->tm_hour, &ptm->tm_min, + &end) >= 5 + ) { + ptm->tm_year -= 1900; /* Adjust years */ + ptm->tm_mon -= 1; /* Adjust month from 1-12 to 0-11 */ + } else +#if ENABLE_DESKTOP /* strptime is BIG: ~1k in uclibc, ~10k in glibc */ + /* month_name d HH:MM:SS YYYY. Supported by GNU date */ + if ((endp = strptime(date_str, "%b %d %T %Y", ptm)) != NULL + && *endp == '\0' + ) { + return; /* don't fall through to end == ":" check */ + } else +#endif + { + bb_error_msg_and_die(bb_msg_invalid_date, date_str); + } + if (end == ':') { + /* xxx:SS */ + if (sscanf(last_colon + 1, "%u%c", &ptm->tm_sec, &end) == 1) + end = '\0'; + /* else end != NUL and we error out */ + } + } else + if (strchr(date_str, '-') + /* Why strchr('-') check? + * sscanf below will trash ptm->tm_year, this breaks + * if parse_str is "10101010" (iow, "MMddhhmm" form) + * because we destroy year. Do these sscanf + * only if we saw a dash in parse_str. + */ + /* yyyy-mm-dd HH */ + && (sscanf(date_str, "%u-%u-%u %u%c", &ptm->tm_year, + &ptm->tm_mon, &ptm->tm_mday, + &ptm->tm_hour, + &end) >= 4 + /* yyyy-mm-dd */ + || sscanf(date_str, "%u-%u-%u%c", &ptm->tm_year, + &ptm->tm_mon, &ptm->tm_mday, + &end) >= 3 + ) + ) { + ptm->tm_year -= 1900; /* Adjust years */ + ptm->tm_mon -= 1; /* Adjust month from 1-12 to 0-11 */ + } else + if (date_str[0] == '@') { + time_t t = bb_strtol(date_str + 1, NULL, 10); + if (!errno) { + struct tm *lt = localtime(&t); + if (lt) { + *ptm = *lt; + return; + } + } + end = '1'; + } else { + /* Googled the following on an old date manpage: + * + * The canonical representation for setting the date/time is: + * cc Century (either 19 or 20) + * yy Year in abbreviated form (e.g. 89, 06) + * mm Numeric month, a number from 1 to 12 + * dd Day, a number from 1 to 31 + * HH Hour, a number from 0 to 23 + * MM Minutes, a number from 0 to 59 + * .SS Seconds, a number from 0 to 61 (with leap seconds) + * Everything but the minutes is optional + * + * "touch -t DATETIME" format: [[[[[YY]YY]MM]DD]hh]mm[.ss] + * Some, but not all, Unix "date DATETIME" commands + * move [[YY]YY] past minutes mm field (!). + * Coreutils date does it, and SUS mandates it. + * (date -s DATETIME does not support this format. lovely!) + * In bbox, this format is special-cased in date applet + * (IOW: this function assumes "touch -t" format). + */ + unsigned cur_year = ptm->tm_year; + int len = strchrnul(date_str, '.') - date_str; + + /* MM[.SS] */ + if (len == 2 && sscanf(date_str, "%2u%2u%2u%2u""%2u%c" + 12, + &ptm->tm_min, + &end) >= 1) { + } else + /* HHMM[.SS] */ + if (len == 4 && sscanf(date_str, "%2u%2u%2u""%2u%2u%c" + 9, + &ptm->tm_hour, + &ptm->tm_min, + &end) >= 2) { + } else + /* ddHHMM[.SS] */ + if (len == 6 && sscanf(date_str, "%2u%2u""%2u%2u%2u%c" + 6, + &ptm->tm_mday, + &ptm->tm_hour, + &ptm->tm_min, + &end) >= 3) { + } else + /* mmddHHMM[.SS] */ + if (len == 8 && sscanf(date_str, "%2u""%2u%2u%2u%2u%c" + 3, + &ptm->tm_mon, + &ptm->tm_mday, + &ptm->tm_hour, + &ptm->tm_min, + &end) >= 4) { + /* Adjust month from 1-12 to 0-11 */ + ptm->tm_mon -= 1; + } else + /* yymmddHHMM[.SS] */ + if (len == 10 && sscanf(date_str, "%2u%2u%2u%2u%2u%c", + &ptm->tm_year, + &ptm->tm_mon, + &ptm->tm_mday, + &ptm->tm_hour, + &ptm->tm_min, + &end) >= 5) { + /* Adjust month from 1-12 to 0-11 */ + ptm->tm_mon -= 1; + if ((int)cur_year >= 50) { /* >= 1950 */ + /* Adjust year: */ + /* 1. Put it in the current century */ + ptm->tm_year += (cur_year / 100) * 100; + /* 2. If too far in the past, +100 years */ + if (ptm->tm_year < cur_year - 50) + ptm->tm_year += 100; + /* 3. If too far in the future, -100 years */ + if (ptm->tm_year > cur_year + 50) + ptm->tm_year -= 100; + } + } else + /* ccyymmddHHMM[.SS] */ + if (len == 12 && sscanf(date_str, "%4u%2u%2u%2u%2u%c", + &ptm->tm_year, + &ptm->tm_mon, + &ptm->tm_mday, + &ptm->tm_hour, + &ptm->tm_min, + &end) >= 5) { + ptm->tm_year -= 1900; /* Adjust years */ + ptm->tm_mon -= 1; /* Adjust month from 1-12 to 0-11 */ + } else { + bb_error_msg_and_die(bb_msg_invalid_date, date_str); + } + ptm->tm_sec = 0; /* assume zero if [.SS] is not given */ + if (end == '.') { + /* xxx.SS */ + if (sscanf(strchr(date_str, '.') + 1, "%u%c", + &ptm->tm_sec, &end) == 1) + end = '\0'; + /* else end != NUL and we error out */ + } + } + if (end != '\0') { + bb_error_msg_and_die(bb_msg_invalid_date, date_str); + } +} + +time_t FAST_FUNC validate_tm_time(const char *date_str, struct tm *ptm) +{ + time_t t = mktime(ptm); + if (t == (time_t) -1L) { + bb_error_msg_and_die(bb_msg_invalid_date, date_str); + } + return t; +} + +static char* strftime_fmt(char *buf, unsigned len, time_t *tp, const char *fmt) +{ + time_t t; + if (!tp) { + tp = &t; + time(tp); + } + /* Returns pointer to NUL */ + return buf + strftime(buf, len, fmt, localtime(tp)); +} + +char* FAST_FUNC strftime_HHMMSS(char *buf, unsigned len, time_t *tp) +{ + return strftime_fmt(buf, len, tp, "%H:%M:%S"); +} + +char* FAST_FUNC strftime_YYYYMMDDHHMMSS(char *buf, unsigned len, time_t *tp) +{ + return strftime_fmt(buf, len, tp, "%Y-%m-%d %H:%M:%S"); +} + +#if ENABLE_MONOTONIC_SYSCALL + +#include +/* Old glibc (< 2.3.4) does not provide this constant. We use syscall + * directly so this definition is safe. */ +#ifndef CLOCK_MONOTONIC +#define CLOCK_MONOTONIC 1 +#endif + +/* libc has incredibly messy way of doing this, + * typically requiring -lrt. We just skip all this mess */ +static void get_mono(struct timespec *ts) +{ + if (clock_gettime(CLOCK_MONOTONIC, ts)) + bb_error_msg_and_die("clock_gettime(MONOTONIC) failed"); +} +unsigned long long FAST_FUNC monotonic_ns(void) +{ + struct timespec ts; + get_mono(&ts); + return ts.tv_sec * 1000000000ULL + ts.tv_nsec; +} +unsigned long long FAST_FUNC monotonic_us(void) +{ + struct timespec ts; + get_mono(&ts); + return ts.tv_sec * 1000000ULL + ts.tv_nsec/1000; +} +unsigned long long FAST_FUNC monotonic_ms(void) +{ + struct timespec ts; + get_mono(&ts); + return ts.tv_sec * 1000ULL + ts.tv_nsec/1000000; +} +unsigned FAST_FUNC monotonic_sec(void) +{ + struct timespec ts; + get_mono(&ts); + return ts.tv_sec; +} + +#else + +unsigned long long FAST_FUNC monotonic_ns(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec * 1000000000ULL + tv.tv_usec * 1000; +} +unsigned long long FAST_FUNC monotonic_us(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec * 1000000ULL + tv.tv_usec; +} +unsigned long long FAST_FUNC monotonic_ms(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec * 1000ULL + tv.tv_usec / 1000; +} +unsigned FAST_FUNC monotonic_sec(void) +{ + return time(NULL); +} + +#endif diff --git a/probe-busybox/libbb/unicode.c b/probe-busybox/libbb/unicode.c new file mode 100644 index 00000000..9c4da50d --- /dev/null +++ b/probe-busybox/libbb/unicode.c @@ -0,0 +1,1178 @@ +/* vi: set sw=4 ts=4: */ +/* + * Unicode support routines. + * + * Copyright (C) 2009 Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +#include "libbb.h" +#include "unicode.h" + +/* If it's not #defined as a constant in unicode.h... */ +#ifndef unicode_status +uint8_t unicode_status; +#endif + +/* This file is compiled only if UNICODE_SUPPORT is on. + * We check other options and decide whether to use libc support + * via locale, or use our own logic: + */ + +#if ENABLE_UNICODE_USING_LOCALE + +/* Unicode support using libc locale support. */ + +void FAST_FUNC reinit_unicode(const char *LANG) +{ + static const char unicode_0x394[] = { 0xce, 0x94, 0 }; + size_t width; + + /* We pass "" instead of "C" because some libc's have + * non-ASCII default locale for setlocale("") call + * (this allows users of such libc to have Unicoded + * system without having to mess with env). + * + * We set LC_CTYPE because (a) we may be called with $LC_CTYPE + * value in LANG, not with $LC_ALL, (b) internationalized + * LC_NUMERIC and LC_TIME are more PITA than benefit + * (for one, some utilities have hard time with comma + * used as a fractional separator). + */ +//TODO: avoid repeated calls by caching last string? + setlocale(LC_CTYPE, LANG ? LANG : ""); + + /* In unicode, this is a one character string */ + width = unicode_strlen(unicode_0x394); + unicode_status = (width == 1 ? UNICODE_ON : UNICODE_OFF); +} + +void FAST_FUNC init_unicode(void) +{ + /* Some people set only $LC_CTYPE, not $LC_ALL, because they want + * only Unicode to be activated on their system, not the whole + * shebang of wrong decimal points, strange date formats and so on. + */ + if (unicode_status == UNICODE_UNKNOWN) { + char *s = getenv("LC_ALL"); + if (!s) s = getenv("LC_CTYPE"); + if (!s) s = getenv("LANG"); + reinit_unicode(s); + } +} + +#else + +/* Homegrown Unicode support. It knows only C and Unicode locales. */ + +# if ENABLE_FEATURE_CHECK_UNICODE_IN_ENV +void FAST_FUNC reinit_unicode(const char *LANG) +{ + unicode_status = UNICODE_OFF; + if (!LANG || !(strstr(LANG, ".utf") || strstr(LANG, ".UTF"))) + return; + unicode_status = UNICODE_ON; +} + +void FAST_FUNC init_unicode(void) +{ + if (unicode_status == UNICODE_UNKNOWN) { + char *s = getenv("LC_ALL"); + if (!s) s = getenv("LC_CTYPE"); + if (!s) s = getenv("LANG"); + reinit_unicode(s); + } +} +# endif + +static size_t wcrtomb_internal(char *s, wchar_t wc) +{ + int n, i; + uint32_t v = wc; + + if (v <= 0x7f) { + *s = v; + return 1; + } + + /* RFC 3629 says that Unicode ends at 10FFFF, + * but we cover entire 32 bits */ + + /* 4000000-FFFFFFFF -> 111111tt 10tttttt 10zzzzzz 10zzyyyy 10yyyyxx 10xxxxxx */ + /* 200000-3FFFFFF -> 111110tt 10zzzzzz 10zzyyyy 10yyyyxx 10xxxxxx */ + /* 10000-1FFFFF -> 11110zzz 10zzyyyy 10yyyyxx 10xxxxxx */ + /* 800-FFFF -> 1110yyyy 10yyyyxx 10xxxxxx */ + /* 80-7FF -> 110yyyxx 10xxxxxx */ + + /* How many bytes do we need? */ + n = 2; + /* (0x80000000+ would result in n = 7, limiting n to 6) */ + while (v >= 0x800 && n < 6) { + v >>= 5; + n++; + } + /* Fill bytes n-1..1 */ + i = n; + while (--i) { + s[i] = (wc & 0x3f) | 0x80; + wc >>= 6; + } + /* Fill byte 0 */ + s[0] = wc | (uint8_t)(0x3f00 >> n); + return n; +} +size_t FAST_FUNC wcrtomb(char *s, wchar_t wc, mbstate_t *ps UNUSED_PARAM) +{ + if (unicode_status != UNICODE_ON) { + *s = wc; + return 1; + } + + return wcrtomb_internal(s, wc); +} +size_t FAST_FUNC wcstombs(char *dest, const wchar_t *src, size_t n) +{ + size_t org_n = n; + + if (unicode_status != UNICODE_ON) { + while (n) { + wchar_t c = *src++; + *dest++ = c; + if (c == 0) + break; + n--; + } + return org_n - n; + } + + while (n >= MB_CUR_MAX) { + wchar_t wc = *src++; + size_t len = wcrtomb_internal(dest, wc); + + if (wc == L'\0') + return org_n - n; + dest += len; + n -= len; + } + while (n) { + char tbuf[MB_CUR_MAX]; + wchar_t wc = *src++; + size_t len = wcrtomb_internal(tbuf, wc); + + if (len > n) + break; + memcpy(dest, tbuf, len); + if (wc == L'\0') + return org_n - n; + dest += len; + n -= len; + } + return org_n - n; +} + +# define ERROR_WCHAR (~(wchar_t)0) + +static const char *mbstowc_internal(wchar_t *res, const char *src) +{ + int bytes; + unsigned c = (unsigned char) *src++; + + if (c <= 0x7f) { + *res = c; + return src; + } + + /* 80-7FF -> 110yyyxx 10xxxxxx */ + /* 800-FFFF -> 1110yyyy 10yyyyxx 10xxxxxx */ + /* 10000-1FFFFF -> 11110zzz 10zzyyyy 10yyyyxx 10xxxxxx */ + /* 200000-3FFFFFF -> 111110tt 10zzzzzz 10zzyyyy 10yyyyxx 10xxxxxx */ + /* 4000000-FFFFFFFF -> 111111tt 10tttttt 10zzzzzz 10zzyyyy 10yyyyxx 10xxxxxx */ + bytes = 0; + do { + c <<= 1; + bytes++; + } while ((c & 0x80) && bytes < 6); + if (bytes == 1) { + /* A bare "continuation" byte. Say, 80 */ + *res = ERROR_WCHAR; + return src; + } + c = (uint8_t)(c) >> bytes; + + while (--bytes) { + unsigned ch = (unsigned char) *src; + if ((ch & 0xc0) != 0x80) { + /* Missing "continuation" byte. Example: e0 80 */ + *res = ERROR_WCHAR; + return src; + } + c = (c << 6) + (ch & 0x3f); + src++; + } + + /* TODO */ + /* Need to check that c isn't produced by overlong encoding */ + /* Example: 11000000 10000000 converts to NUL */ + /* 11110000 10000000 10000100 10000000 converts to 0x100 */ + /* correct encoding: 11000100 10000000 */ + if (c <= 0x7f) { /* crude check */ + *res = ERROR_WCHAR; + return src; + } + + *res = c; + return src; +} +size_t FAST_FUNC mbstowcs(wchar_t *dest, const char *src, size_t n) +{ + size_t org_n = n; + + if (unicode_status != UNICODE_ON) { + while (n) { + unsigned char c = *src++; + + if (dest) + *dest++ = c; + if (c == 0) + break; + n--; + } + return org_n - n; + } + + while (n) { + wchar_t wc; + src = mbstowc_internal(&wc, src); + if (wc == ERROR_WCHAR) /* error */ + return (size_t) -1L; + if (dest) + *dest++ = wc; + if (wc == 0) /* end-of-string */ + break; + n--; + } + + return org_n - n; +} + +int FAST_FUNC iswspace(wint_t wc) +{ + return (unsigned)wc <= 0x7f && isspace(wc); +} + +int FAST_FUNC iswalnum(wint_t wc) +{ + return (unsigned)wc <= 0x7f && isalnum(wc); +} + +int FAST_FUNC iswpunct(wint_t wc) +{ + return (unsigned)wc <= 0x7f && ispunct(wc); +} + + +# if CONFIG_LAST_SUPPORTED_WCHAR >= 0x300 +struct interval { + uint16_t first; + uint16_t last; +}; + +/* auxiliary function for binary search in interval table */ +static int in_interval_table(unsigned ucs, const struct interval *table, unsigned max) +{ + unsigned min; + unsigned mid; + + if (ucs < table[0].first || ucs > table[max].last) + return 0; + + min = 0; + while (max >= min) { + mid = (min + max) / 2; + if (ucs > table[mid].last) + min = mid + 1; + else if (ucs < table[mid].first) + max = mid - 1; + else + return 1; + } + return 0; +} + +static int in_uint16_table(unsigned ucs, const uint16_t *table, unsigned max) +{ + unsigned min; + unsigned mid; + unsigned first, last; + + first = table[0] >> 2; + last = first + (table[0] & 3); + if (ucs < first || ucs > last) + return 0; + + min = 0; + while (max >= min) { + mid = (min + max) / 2; + first = table[mid] >> 2; + last = first + (table[mid] & 3); + if (ucs > last) + min = mid + 1; + else if (ucs < first) + max = mid - 1; + else + return 1; + } + return 0; +} +# endif + + +/* + * This is an implementation of wcwidth() and wcswidth() (defined in + * IEEE Std 1002.1-2001) for Unicode. + * + * http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html + * http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html + * + * In fixed-width output devices, Latin characters all occupy a single + * "cell" position of equal width, whereas ideographic CJK characters + * occupy two such cells. Interoperability between terminal-line + * applications and (teletype-style) character terminals using the + * UTF-8 encoding requires agreement on which character should advance + * the cursor by how many cell positions. No established formal + * standards exist at present on which Unicode character shall occupy + * how many cell positions on character terminals. These routines are + * a first attempt of defining such behavior based on simple rules + * applied to data provided by the Unicode Consortium. + * + * For some graphical characters, the Unicode standard explicitly + * defines a character-cell width via the definition of the East Asian + * FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes. + * In all these cases, there is no ambiguity about which width a + * terminal shall use. For characters in the East Asian Ambiguous (A) + * class, the width choice depends purely on a preference of backward + * compatibility with either historic CJK or Western practice. + * Choosing single-width for these characters is easy to justify as + * the appropriate long-term solution, as the CJK practice of + * displaying these characters as double-width comes from historic + * implementation simplicity (8-bit encoded characters were displayed + * single-width and 16-bit ones double-width, even for Greek, + * Cyrillic, etc.) and not any typographic considerations. + * + * Much less clear is the choice of width for the Not East Asian + * (Neutral) class. Existing practice does not dictate a width for any + * of these characters. It would nevertheless make sense + * typographically to allocate two character cells to characters such + * as for instance EM SPACE or VOLUME INTEGRAL, which cannot be + * represented adequately with a single-width glyph. The following + * routines at present merely assign a single-cell width to all + * neutral characters, in the interest of simplicity. This is not + * entirely satisfactory and should be reconsidered before + * establishing a formal standard in this area. At the moment, the + * decision which Not East Asian (Neutral) characters should be + * represented by double-width glyphs cannot yet be answered by + * applying a simple rule from the Unicode database content. Setting + * up a proper standard for the behavior of UTF-8 character terminals + * will require a careful analysis not only of each Unicode character, + * but also of each presentation form, something the author of these + * routines has avoided to do so far. + * + * http://www.unicode.org/unicode/reports/tr11/ + * + * Markus Kuhn -- 2007-05-26 (Unicode 5.0) + * + * Permission to use, copy, modify, and distribute this software + * for any purpose and without fee is hereby granted. The author + * disclaims all warranties with regard to this software. + * + * Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c + */ + +/* Assigned Unicode character ranges: + * Plane Range + * 0 0000–FFFF Basic Multilingual Plane + * 1 10000–1FFFF Supplementary Multilingual Plane + * 2 20000–2FFFF Supplementary Ideographic Plane + * 3 30000-3FFFF Tertiary Ideographic Plane (no chars assigned yet) + * 4-13 40000–DFFFF currently unassigned + * 14 E0000–EFFFF Supplementary Special-purpose Plane + * 15 F0000–FFFFF Supplementary Private Use Area-A + * 16 100000–10FFFF Supplementary Private Use Area-B + * + * "Supplementary Special-purpose Plane currently contains non-graphical + * characters in two blocks of 128 and 240 characters. The first block + * is for language tag characters for use when language cannot be indicated + * through other protocols (such as the xml:lang attribute in XML). + * The other block contains glyph variation selectors to indicate + * an alternate glyph for a character that cannot be determined by context." + * + * In simpler terms: it is a tool to fix the "Han unification" mess + * created by Unicode committee, to select Chinese/Japanese/Korean/Taiwan + * version of a character. (They forgot that the whole purpose of the Unicode + * was to be able to write all chars in one charset without such tricks). + * Until East Asian users say it is actually necessary to support these + * code points in console applications like busybox + * (i.e. do these chars ever appear in filenames, hostnames, text files + * and such?), we are treating these code points as invalid. + * + * Tertiary Ideographic Plane is also ignored for now, + * until Unicode committee assigns something there. + */ +/* The following two functions define the column width of an ISO 10646 + * character as follows: + * + * - The null character (U+0000) has a column width of 0. + * + * - Other C0/C1 control characters and DEL will lead to a return + * value of -1. + * + * - Non-spacing and enclosing combining characters (general + * category code Mn or Me in the Unicode database) have a + * column width of 0. + * + * - SOFT HYPHEN (U+00AD) has a column width of 1. + * + * - Other format characters (general category code Cf in the Unicode + * database) and ZERO WIDTH SPACE (U+200B) have a column width of 0. + * + * - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF) + * have a column width of 0. + * + * - Spacing characters in the East Asian Wide (W) or East Asian + * Full-width (F) category as defined in Unicode Technical + * Report #11 have a column width of 2. + * + * - All remaining characters (including all printable + * ISO 8859-1 and WGL4 characters, Unicode control characters, + * etc.) have a column width of 1. + * + * This implementation assumes that wchar_t characters are encoded + * in ISO 10646. + */ +int FAST_FUNC wcwidth(unsigned ucs) +{ +# if CONFIG_LAST_SUPPORTED_WCHAR >= 0x300 + /* sorted list of non-overlapping intervals of non-spacing characters */ + /* generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c" */ +# define BIG_(a,b) { a, b }, +# define PAIR(a,b) +# define ARRAY /* PAIR if < 0x4000 and no more than 4 chars big */ \ + BIG_(0x0300, 0x036F) \ + PAIR(0x0483, 0x0486) \ + PAIR(0x0488, 0x0489) \ + BIG_(0x0591, 0x05BD) \ + PAIR(0x05BF, 0x05BF) \ + PAIR(0x05C1, 0x05C2) \ + PAIR(0x05C4, 0x05C5) \ + PAIR(0x05C7, 0x05C7) \ + PAIR(0x0600, 0x0603) \ + BIG_(0x0610, 0x0615) \ + BIG_(0x064B, 0x065E) \ + PAIR(0x0670, 0x0670) \ + BIG_(0x06D6, 0x06E4) \ + PAIR(0x06E7, 0x06E8) \ + PAIR(0x06EA, 0x06ED) \ + PAIR(0x070F, 0x070F) \ + PAIR(0x0711, 0x0711) \ + BIG_(0x0730, 0x074A) \ + BIG_(0x07A6, 0x07B0) \ + BIG_(0x07EB, 0x07F3) \ + PAIR(0x0901, 0x0902) \ + PAIR(0x093C, 0x093C) \ + BIG_(0x0941, 0x0948) \ + PAIR(0x094D, 0x094D) \ + PAIR(0x0951, 0x0954) \ + PAIR(0x0962, 0x0963) \ + PAIR(0x0981, 0x0981) \ + PAIR(0x09BC, 0x09BC) \ + PAIR(0x09C1, 0x09C4) \ + PAIR(0x09CD, 0x09CD) \ + PAIR(0x09E2, 0x09E3) \ + PAIR(0x0A01, 0x0A02) \ + PAIR(0x0A3C, 0x0A3C) \ + PAIR(0x0A41, 0x0A42) \ + PAIR(0x0A47, 0x0A48) \ + PAIR(0x0A4B, 0x0A4D) \ + PAIR(0x0A70, 0x0A71) \ + PAIR(0x0A81, 0x0A82) \ + PAIR(0x0ABC, 0x0ABC) \ + BIG_(0x0AC1, 0x0AC5) \ + PAIR(0x0AC7, 0x0AC8) \ + PAIR(0x0ACD, 0x0ACD) \ + PAIR(0x0AE2, 0x0AE3) \ + PAIR(0x0B01, 0x0B01) \ + PAIR(0x0B3C, 0x0B3C) \ + PAIR(0x0B3F, 0x0B3F) \ + PAIR(0x0B41, 0x0B43) \ + PAIR(0x0B4D, 0x0B4D) \ + PAIR(0x0B56, 0x0B56) \ + PAIR(0x0B82, 0x0B82) \ + PAIR(0x0BC0, 0x0BC0) \ + PAIR(0x0BCD, 0x0BCD) \ + PAIR(0x0C3E, 0x0C40) \ + PAIR(0x0C46, 0x0C48) \ + PAIR(0x0C4A, 0x0C4D) \ + PAIR(0x0C55, 0x0C56) \ + PAIR(0x0CBC, 0x0CBC) \ + PAIR(0x0CBF, 0x0CBF) \ + PAIR(0x0CC6, 0x0CC6) \ + PAIR(0x0CCC, 0x0CCD) \ + PAIR(0x0CE2, 0x0CE3) \ + PAIR(0x0D41, 0x0D43) \ + PAIR(0x0D4D, 0x0D4D) \ + PAIR(0x0DCA, 0x0DCA) \ + PAIR(0x0DD2, 0x0DD4) \ + PAIR(0x0DD6, 0x0DD6) \ + PAIR(0x0E31, 0x0E31) \ + BIG_(0x0E34, 0x0E3A) \ + BIG_(0x0E47, 0x0E4E) \ + PAIR(0x0EB1, 0x0EB1) \ + BIG_(0x0EB4, 0x0EB9) \ + PAIR(0x0EBB, 0x0EBC) \ + BIG_(0x0EC8, 0x0ECD) \ + PAIR(0x0F18, 0x0F19) \ + PAIR(0x0F35, 0x0F35) \ + PAIR(0x0F37, 0x0F37) \ + PAIR(0x0F39, 0x0F39) \ + BIG_(0x0F71, 0x0F7E) \ + BIG_(0x0F80, 0x0F84) \ + PAIR(0x0F86, 0x0F87) \ + PAIR(0x0FC6, 0x0FC6) \ + BIG_(0x0F90, 0x0F97) \ + BIG_(0x0F99, 0x0FBC) \ + PAIR(0x102D, 0x1030) \ + PAIR(0x1032, 0x1032) \ + PAIR(0x1036, 0x1037) \ + PAIR(0x1039, 0x1039) \ + PAIR(0x1058, 0x1059) \ + BIG_(0x1160, 0x11FF) \ + PAIR(0x135F, 0x135F) \ + PAIR(0x1712, 0x1714) \ + PAIR(0x1732, 0x1734) \ + PAIR(0x1752, 0x1753) \ + PAIR(0x1772, 0x1773) \ + PAIR(0x17B4, 0x17B5) \ + BIG_(0x17B7, 0x17BD) \ + PAIR(0x17C6, 0x17C6) \ + BIG_(0x17C9, 0x17D3) \ + PAIR(0x17DD, 0x17DD) \ + PAIR(0x180B, 0x180D) \ + PAIR(0x18A9, 0x18A9) \ + PAIR(0x1920, 0x1922) \ + PAIR(0x1927, 0x1928) \ + PAIR(0x1932, 0x1932) \ + PAIR(0x1939, 0x193B) \ + PAIR(0x1A17, 0x1A18) \ + PAIR(0x1B00, 0x1B03) \ + PAIR(0x1B34, 0x1B34) \ + BIG_(0x1B36, 0x1B3A) \ + PAIR(0x1B3C, 0x1B3C) \ + PAIR(0x1B42, 0x1B42) \ + BIG_(0x1B6B, 0x1B73) \ + BIG_(0x1DC0, 0x1DCA) \ + PAIR(0x1DFE, 0x1DFF) \ + BIG_(0x200B, 0x200F) \ + BIG_(0x202A, 0x202E) \ + PAIR(0x2060, 0x2063) \ + BIG_(0x206A, 0x206F) \ + BIG_(0x20D0, 0x20EF) \ + BIG_(0x302A, 0x302F) \ + PAIR(0x3099, 0x309A) \ + /* Too big to be packed in PAIRs: */ \ + BIG_(0xA806, 0xA806) \ + BIG_(0xA80B, 0xA80B) \ + BIG_(0xA825, 0xA826) \ + BIG_(0xFB1E, 0xFB1E) \ + BIG_(0xFE00, 0xFE0F) \ + BIG_(0xFE20, 0xFE23) \ + BIG_(0xFEFF, 0xFEFF) \ + BIG_(0xFFF9, 0xFFFB) + static const struct interval combining[] = { ARRAY }; +# undef BIG_ +# undef PAIR +# define BIG_(a,b) +# define PAIR(a,b) (a << 2) | (b-a), + static const uint16_t combining1[] = { ARRAY }; +# undef BIG_ +# undef PAIR +# define BIG_(a,b) char big_##a[b < 0x4000 && b-a <= 3 ? -1 : 1]; +# define PAIR(a,b) char pair##a[b >= 0x4000 || b-a > 3 ? -1 : 1]; + struct CHECK { ARRAY }; +# undef BIG_ +# undef PAIR +# undef ARRAY +# endif + + if (ucs == 0) + return 0; + + /* Test for 8-bit control characters (00-1f, 80-9f, 7f) */ + if ((ucs & ~0x80) < 0x20 || ucs == 0x7f) + return -1; + /* Quick abort if it is an obviously invalid char */ + if (ucs > CONFIG_LAST_SUPPORTED_WCHAR) + return -1; + + /* Optimization: no combining chars below 0x300 */ + if (CONFIG_LAST_SUPPORTED_WCHAR < 0x300 || ucs < 0x300) + return 1; + +# if CONFIG_LAST_SUPPORTED_WCHAR >= 0x300 + /* Binary search in table of non-spacing characters */ + if (in_interval_table(ucs, combining, ARRAY_SIZE(combining) - 1)) + return 0; + if (in_uint16_table(ucs, combining1, ARRAY_SIZE(combining1) - 1)) + return 0; + + /* Optimization: all chars below 0x1100 are not double-width */ + if (CONFIG_LAST_SUPPORTED_WCHAR < 0x1100 || ucs < 0x1100) + return 1; + +# if CONFIG_LAST_SUPPORTED_WCHAR >= 0x1100 + /* Invalid code points: */ + /* High (d800..dbff) and low (dc00..dfff) surrogates (valid only in UTF16) */ + /* Private Use Area (e000..f8ff) */ + /* Noncharacters fdd0..fdef */ + if ((CONFIG_LAST_SUPPORTED_WCHAR >= 0xd800 && ucs >= 0xd800 && ucs <= 0xf8ff) + || (CONFIG_LAST_SUPPORTED_WCHAR >= 0xfdd0 && ucs >= 0xfdd0 && ucs <= 0xfdef) + ) { + return -1; + } + /* 0xfffe and 0xffff in every plane are invalid */ + if (CONFIG_LAST_SUPPORTED_WCHAR >= 0xfffe && ((ucs & 0xfffe) == 0xfffe)) { + return -1; + } + +# if CONFIG_LAST_SUPPORTED_WCHAR >= 0x10000 + if (ucs >= 0x10000) { + /* Combining chars in Supplementary Multilingual Plane 0x1xxxx */ + static const struct interval combining0x10000[] = { + { 0x0A01, 0x0A03 }, { 0x0A05, 0x0A06 }, { 0x0A0C, 0x0A0F }, + { 0x0A38, 0x0A3A }, { 0x0A3F, 0x0A3F }, { 0xD167, 0xD169 }, + { 0xD173, 0xD182 }, { 0xD185, 0xD18B }, { 0xD1AA, 0xD1AD }, + { 0xD242, 0xD244 } + }; + /* Binary search in table of non-spacing characters in Supplementary Multilingual Plane */ + if (in_interval_table(ucs ^ 0x10000, combining0x10000, ARRAY_SIZE(combining0x10000) - 1)) + return 0; + /* Check a few non-spacing chars in Supplementary Special-purpose Plane 0xExxxx */ + if (CONFIG_LAST_SUPPORTED_WCHAR >= 0xE0001 + && ( ucs == 0xE0001 + || (ucs >= 0xE0020 && ucs <= 0xE007F) + || (ucs >= 0xE0100 && ucs <= 0xE01EF) + ) + ) { + return 0; + } + } +# endif + + /* If we arrive here, ucs is not a combining or C0/C1 control character. + * Check whether it's 1 char or 2-shar wide. + */ + return 1 + + ( (/*ucs >= 0x1100 &&*/ ucs <= 0x115f) /* Hangul Jamo init. consonants */ + || ucs == 0x2329 /* left-pointing angle bracket; also CJK punct. char */ + || ucs == 0x232a /* right-pointing angle bracket; also CJK punct. char */ + || (ucs >= 0x2e80 && ucs <= 0xa4cf && ucs != 0x303f) /* CJK ... Yi */ +# if CONFIG_LAST_SUPPORTED_WCHAR >= 0xac00 + || (ucs >= 0xac00 && ucs <= 0xd7a3) /* Hangul Syllables */ + || (ucs >= 0xf900 && ucs <= 0xfaff) /* CJK Compatibility Ideographs */ + || (ucs >= 0xfe10 && ucs <= 0xfe19) /* Vertical forms */ + || (ucs >= 0xfe30 && ucs <= 0xfe6f) /* CJK Compatibility Forms */ + || (ucs >= 0xff00 && ucs <= 0xff60) /* Fullwidth Forms */ + || (ucs >= 0xffe0 && ucs <= 0xffe6) + || ((ucs >> 17) == (2 >> 1)) /* 20000..3ffff: Supplementary and Tertiary Ideographic Planes */ +# endif + ); +# endif /* >= 0x1100 */ +# endif /* >= 0x300 */ +} + + +# if ENABLE_UNICODE_BIDI_SUPPORT +int FAST_FUNC unicode_bidi_isrtl(wint_t wc) +{ + /* ranges taken from + * http://www.unicode.org/Public/5.2.0/ucd/extracted/DerivedBidiClass.txt + * Bidi_Class=Left_To_Right | Bidi_Class=Arabic_Letter + */ +# define BIG_(a,b) { a, b }, +# define PAIR(a,b) +# define ARRAY \ + PAIR(0x0590, 0x0590) \ + PAIR(0x05BE, 0x05BE) \ + PAIR(0x05C0, 0x05C0) \ + PAIR(0x05C3, 0x05C3) \ + PAIR(0x05C6, 0x05C6) \ + BIG_(0x05C8, 0x05FF) \ + PAIR(0x0604, 0x0605) \ + PAIR(0x0608, 0x0608) \ + PAIR(0x060B, 0x060B) \ + PAIR(0x060D, 0x060D) \ + BIG_(0x061B, 0x064A) \ + PAIR(0x065F, 0x065F) \ + PAIR(0x066D, 0x066F) \ + BIG_(0x0671, 0x06D5) \ + PAIR(0x06E5, 0x06E6) \ + PAIR(0x06EE, 0x06EF) \ + BIG_(0x06FA, 0x070E) \ + PAIR(0x0710, 0x0710) \ + BIG_(0x0712, 0x072F) \ + BIG_(0x074B, 0x07A5) \ + BIG_(0x07B1, 0x07EA) \ + PAIR(0x07F4, 0x07F5) \ + BIG_(0x07FA, 0x0815) \ + PAIR(0x081A, 0x081A) \ + PAIR(0x0824, 0x0824) \ + PAIR(0x0828, 0x0828) \ + BIG_(0x082E, 0x08FF) \ + PAIR(0x200F, 0x200F) \ + PAIR(0x202B, 0x202B) \ + PAIR(0x202E, 0x202E) \ + BIG_(0xFB1D, 0xFB1D) \ + BIG_(0xFB1F, 0xFB28) \ + BIG_(0xFB2A, 0xFD3D) \ + BIG_(0xFD40, 0xFDCF) \ + BIG_(0xFDC8, 0xFDCF) \ + BIG_(0xFDF0, 0xFDFC) \ + BIG_(0xFDFE, 0xFDFF) \ + BIG_(0xFE70, 0xFEFE) + /* Probably not necessary + {0x10800, 0x1091E}, + {0x10920, 0x10A00}, + {0x10A04, 0x10A04}, + {0x10A07, 0x10A0B}, + {0x10A10, 0x10A37}, + {0x10A3B, 0x10A3E}, + {0x10A40, 0x10A7F}, + {0x10B36, 0x10B38}, + {0x10B40, 0x10E5F}, + {0x10E7F, 0x10FFF}, + {0x1E800, 0x1EFFF} + */ + static const struct interval rtl_b[] = { ARRAY }; +# undef BIG_ +# undef PAIR +# define BIG_(a,b) +# define PAIR(a,b) (a << 2) | (b-a), + static const uint16_t rtl_p[] = { ARRAY }; +# undef BIG_ +# undef PAIR +# define BIG_(a,b) char big_##a[b < 0x4000 && b-a <= 3 ? -1 : 1]; +# define PAIR(a,b) char pair##a[b >= 0x4000 || b-a > 3 ? -1 : 1]; + struct CHECK { ARRAY }; +# undef BIG_ +# undef PAIR +# undef ARRAY + + if (in_interval_table(wc, rtl_b, ARRAY_SIZE(rtl_b) - 1)) + return 1; + if (in_uint16_table(wc, rtl_p, ARRAY_SIZE(rtl_p) - 1)) + return 1; + return 0; +} + +# if ENABLE_UNICODE_NEUTRAL_TABLE +int FAST_FUNC unicode_bidi_is_neutral_wchar(wint_t wc) +{ + /* ranges taken from + * http://www.unicode.org/Public/5.2.0/ucd/extracted/DerivedBidiClass.txt + * Bidi_Classes: Paragraph_Separator, Segment_Separator, + * White_Space, Other_Neutral, European_Number, European_Separator, + * European_Terminator, Arabic_Number, Common_Separator + */ +# define BIG_(a,b) { a, b }, +# define PAIR(a,b) +# define ARRAY \ + BIG_(0x0009, 0x000D) \ + BIG_(0x001C, 0x0040) \ + BIG_(0x005B, 0x0060) \ + PAIR(0x007B, 0x007E) \ + PAIR(0x0085, 0x0085) \ + BIG_(0x00A0, 0x00A9) \ + PAIR(0x00AB, 0x00AC) \ + BIG_(0x00AE, 0x00B4) \ + PAIR(0x00B6, 0x00B9) \ + BIG_(0x00BB, 0x00BF) \ + PAIR(0x00D7, 0x00D7) \ + PAIR(0x00F7, 0x00F7) \ + PAIR(0x02B9, 0x02BA) \ + BIG_(0x02C2, 0x02CF) \ + BIG_(0x02D2, 0x02DF) \ + BIG_(0x02E5, 0x02FF) \ + PAIR(0x0374, 0x0375) \ + PAIR(0x037E, 0x037E) \ + PAIR(0x0384, 0x0385) \ + PAIR(0x0387, 0x0387) \ + PAIR(0x03F6, 0x03F6) \ + PAIR(0x058A, 0x058A) \ + PAIR(0x0600, 0x0603) \ + PAIR(0x0606, 0x0607) \ + PAIR(0x0609, 0x060A) \ + PAIR(0x060C, 0x060C) \ + PAIR(0x060E, 0x060F) \ + BIG_(0x0660, 0x066C) \ + PAIR(0x06DD, 0x06DD) \ + PAIR(0x06E9, 0x06E9) \ + BIG_(0x06F0, 0x06F9) \ + PAIR(0x07F6, 0x07F9) \ + PAIR(0x09F2, 0x09F3) \ + PAIR(0x09FB, 0x09FB) \ + PAIR(0x0AF1, 0x0AF1) \ + BIG_(0x0BF3, 0x0BFA) \ + BIG_(0x0C78, 0x0C7E) \ + PAIR(0x0CF1, 0x0CF2) \ + PAIR(0x0E3F, 0x0E3F) \ + PAIR(0x0F3A, 0x0F3D) \ + BIG_(0x1390, 0x1400) \ + PAIR(0x1680, 0x1680) \ + PAIR(0x169B, 0x169C) \ + PAIR(0x17DB, 0x17DB) \ + BIG_(0x17F0, 0x17F9) \ + BIG_(0x1800, 0x180A) \ + PAIR(0x180E, 0x180E) \ + PAIR(0x1940, 0x1940) \ + PAIR(0x1944, 0x1945) \ + BIG_(0x19DE, 0x19FF) \ + PAIR(0x1FBD, 0x1FBD) \ + PAIR(0x1FBF, 0x1FC1) \ + PAIR(0x1FCD, 0x1FCF) \ + PAIR(0x1FDD, 0x1FDF) \ + PAIR(0x1FED, 0x1FEF) \ + PAIR(0x1FFD, 0x1FFE) \ + BIG_(0x2000, 0x200A) \ + BIG_(0x2010, 0x2029) \ + BIG_(0x202F, 0x205F) \ + PAIR(0x2070, 0x2070) \ + BIG_(0x2074, 0x207E) \ + BIG_(0x2080, 0x208E) \ + BIG_(0x20A0, 0x20B8) \ + PAIR(0x2100, 0x2101) \ + PAIR(0x2103, 0x2106) \ + PAIR(0x2108, 0x2109) \ + PAIR(0x2114, 0x2114) \ + PAIR(0x2116, 0x2118) \ + BIG_(0x211E, 0x2123) \ + PAIR(0x2125, 0x2125) \ + PAIR(0x2127, 0x2127) \ + PAIR(0x2129, 0x2129) \ + PAIR(0x212E, 0x212E) \ + PAIR(0x213A, 0x213B) \ + BIG_(0x2140, 0x2144) \ + PAIR(0x214A, 0x214D) \ + BIG_(0x2150, 0x215F) \ + PAIR(0x2189, 0x2189) \ + BIG_(0x2190, 0x2335) \ + BIG_(0x237B, 0x2394) \ + BIG_(0x2396, 0x23E8) \ + BIG_(0x2400, 0x2426) \ + BIG_(0x2440, 0x244A) \ + BIG_(0x2460, 0x249B) \ + BIG_(0x24EA, 0x26AB) \ + BIG_(0x26AD, 0x26CD) \ + BIG_(0x26CF, 0x26E1) \ + PAIR(0x26E3, 0x26E3) \ + BIG_(0x26E8, 0x26FF) \ + PAIR(0x2701, 0x2704) \ + PAIR(0x2706, 0x2709) \ + BIG_(0x270C, 0x2727) \ + BIG_(0x2729, 0x274B) \ + PAIR(0x274D, 0x274D) \ + PAIR(0x274F, 0x2752) \ + BIG_(0x2756, 0x275E) \ + BIG_(0x2761, 0x2794) \ + BIG_(0x2798, 0x27AF) \ + BIG_(0x27B1, 0x27BE) \ + BIG_(0x27C0, 0x27CA) \ + PAIR(0x27CC, 0x27CC) \ + BIG_(0x27D0, 0x27FF) \ + BIG_(0x2900, 0x2B4C) \ + BIG_(0x2B50, 0x2B59) \ + BIG_(0x2CE5, 0x2CEA) \ + BIG_(0x2CF9, 0x2CFF) \ + BIG_(0x2E00, 0x2E99) \ + BIG_(0x2E9B, 0x2EF3) \ + BIG_(0x2F00, 0x2FD5) \ + BIG_(0x2FF0, 0x2FFB) \ + BIG_(0x3000, 0x3004) \ + BIG_(0x3008, 0x3020) \ + PAIR(0x3030, 0x3030) \ + PAIR(0x3036, 0x3037) \ + PAIR(0x303D, 0x303D) \ + PAIR(0x303E, 0x303F) \ + PAIR(0x309B, 0x309C) \ + PAIR(0x30A0, 0x30A0) \ + PAIR(0x30FB, 0x30FB) \ + BIG_(0x31C0, 0x31E3) \ + PAIR(0x321D, 0x321E) \ + BIG_(0x3250, 0x325F) \ + PAIR(0x327C, 0x327E) \ + BIG_(0x32B1, 0x32BF) \ + PAIR(0x32CC, 0x32CF) \ + PAIR(0x3377, 0x337A) \ + PAIR(0x33DE, 0x33DF) \ + PAIR(0x33FF, 0x33FF) \ + BIG_(0x4DC0, 0x4DFF) \ + BIG_(0xA490, 0xA4C6) \ + BIG_(0xA60D, 0xA60F) \ + BIG_(0xA673, 0xA673) \ + BIG_(0xA67E, 0xA67F) \ + BIG_(0xA700, 0xA721) \ + BIG_(0xA788, 0xA788) \ + BIG_(0xA828, 0xA82B) \ + BIG_(0xA838, 0xA839) \ + BIG_(0xA874, 0xA877) \ + BIG_(0xFB29, 0xFB29) \ + BIG_(0xFD3E, 0xFD3F) \ + BIG_(0xFDFD, 0xFDFD) \ + BIG_(0xFE10, 0xFE19) \ + BIG_(0xFE30, 0xFE52) \ + BIG_(0xFE54, 0xFE66) \ + BIG_(0xFE68, 0xFE6B) \ + BIG_(0xFF01, 0xFF20) \ + BIG_(0xFF3B, 0xFF40) \ + BIG_(0xFF5B, 0xFF65) \ + BIG_(0xFFE0, 0xFFE6) \ + BIG_(0xFFE8, 0xFFEE) \ + BIG_(0xFFF9, 0xFFFD) + /* + {0x10101, 0x10101}, + {0x10140, 0x1019B}, + {0x1091F, 0x1091F}, + {0x10B39, 0x10B3F}, + {0x10E60, 0x10E7E}, + {0x1D200, 0x1D241}, + {0x1D245, 0x1D245}, + {0x1D300, 0x1D356}, + {0x1D6DB, 0x1D6DB}, + {0x1D715, 0x1D715}, + {0x1D74F, 0x1D74F}, + {0x1D789, 0x1D789}, + {0x1D7C3, 0x1D7C3}, + {0x1D7CE, 0x1D7FF}, + {0x1F000, 0x1F02B}, + {0x1F030, 0x1F093}, + {0x1F100, 0x1F10A} + */ + static const struct interval neutral_b[] = { ARRAY }; +# undef BIG_ +# undef PAIR +# define BIG_(a,b) +# define PAIR(a,b) (a << 2) | (b-a), + static const uint16_t neutral_p[] = { ARRAY }; +# undef BIG_ +# undef PAIR +# define BIG_(a,b) char big_##a[b < 0x4000 && b-a <= 3 ? -1 : 1]; +# define PAIR(a,b) char pair##a[b >= 0x4000 || b-a > 3 ? -1 : 1]; + struct CHECK { ARRAY }; +# undef BIG_ +# undef PAIR +# undef ARRAY + + if (in_interval_table(wc, neutral_b, ARRAY_SIZE(neutral_b) - 1)) + return 1; + if (in_uint16_table(wc, neutral_p, ARRAY_SIZE(neutral_p) - 1)) + return 1; + return 0; +} +# endif + +# endif /* UNICODE_BIDI_SUPPORT */ + +#endif /* Homegrown Unicode support */ + + +/* The rest is mostly same for libc and for "homegrown" support */ + +size_t FAST_FUNC unicode_strlen(const char *string) +{ + size_t width = mbstowcs(NULL, string, INT_MAX); + if (width == (size_t)-1L) + return strlen(string); + return width; +} + +size_t FAST_FUNC unicode_strwidth(const char *string) +{ + uni_stat_t uni_stat; + printable_string(&uni_stat, string); + return uni_stat.unicode_width; +} + +static char* FAST_FUNC unicode_conv_to_printable2(uni_stat_t *stats, const char *src, unsigned width, int flags) +{ + char *dst; + unsigned dst_len; + unsigned uni_count; + unsigned uni_width; + + if (unicode_status != UNICODE_ON) { + char *d; + if (flags & UNI_FLAG_PAD) { + d = dst = xmalloc(width + 1); + while ((int)--width >= 0) { + unsigned char c = *src; + if (c == '\0') { + do + *d++ = ' '; + while ((int)--width >= 0); + break; + } + *d++ = (c >= ' ' && c < 0x7f) ? c : '?'; + src++; + } + *d = '\0'; + } else { + d = dst = xstrndup(src, width); + while (*d) { + unsigned char c = *d; + if (c < ' ' || c >= 0x7f) + *d = '?'; + d++; + } + } + if (stats) { + stats->byte_count = (d - dst); + stats->unicode_count = (d - dst); + stats->unicode_width = (d - dst); + } + return dst; + } + + dst = NULL; + uni_count = uni_width = 0; + dst_len = 0; + while (1) { + int w; + wchar_t wc; + +#if ENABLE_UNICODE_USING_LOCALE + { + mbstate_t mbst = { 0 }; + ssize_t rc = mbsrtowcs(&wc, &src, 1, &mbst); + /* If invalid sequence is seen: -1 is returned, + * src points to the invalid sequence, errno = EILSEQ. + * Else number of wchars (excluding terminating L'\0') + * written to dest is returned. + * If len (here: 1) non-L'\0' wchars stored at dest, + * src points to the next char to be converted. + * If string is completely converted: src = NULL. + */ + if (rc == 0) /* end-of-string */ + break; + if (rc < 0) { /* error */ + src++; + goto subst; + } + if (!iswprint(wc)) + goto subst; + } +#else + src = mbstowc_internal(&wc, src); + /* src is advanced to next mb char + * wc == ERROR_WCHAR: invalid sequence is seen + * else: wc is set + */ + if (wc == ERROR_WCHAR) /* error */ + goto subst; + if (wc == 0) /* end-of-string */ + break; +#endif + if (CONFIG_LAST_SUPPORTED_WCHAR && wc > CONFIG_LAST_SUPPORTED_WCHAR) + goto subst; + w = wcwidth(wc); + if ((ENABLE_UNICODE_COMBINING_WCHARS && w < 0) /* non-printable wchar */ + || (!ENABLE_UNICODE_COMBINING_WCHARS && w <= 0) + || (!ENABLE_UNICODE_WIDE_WCHARS && w > 1) + ) { + subst: + wc = CONFIG_SUBST_WCHAR; + w = 1; + } + width -= w; + /* Note: if width == 0, we still may add more chars, + * they may be zero-width or combining ones */ + if ((int)width < 0) { + /* can't add this wc, string would become longer than width */ + width += w; + break; + } + + uni_count++; + uni_width += w; + dst = xrealloc(dst, dst_len + MB_CUR_MAX); +#if ENABLE_UNICODE_USING_LOCALE + { + mbstate_t mbst = { 0 }; + dst_len += wcrtomb(&dst[dst_len], wc, &mbst); + } +#else + dst_len += wcrtomb_internal(&dst[dst_len], wc); +#endif + } + + /* Pad to remaining width */ + if (flags & UNI_FLAG_PAD) { + dst = xrealloc(dst, dst_len + width + 1); + uni_count += width; + uni_width += width; + while ((int)--width >= 0) { + dst[dst_len++] = ' '; + } + } + dst[dst_len] = '\0'; + if (stats) { + stats->byte_count = dst_len; + stats->unicode_count = uni_count; + stats->unicode_width = uni_width; + } + + return dst; +} +char* FAST_FUNC unicode_conv_to_printable(uni_stat_t *stats, const char *src) +{ + return unicode_conv_to_printable2(stats, src, INT_MAX, 0); +} +char* FAST_FUNC unicode_conv_to_printable_fixedwidth(/*uni_stat_t *stats,*/ const char *src, unsigned width) +{ + return unicode_conv_to_printable2(/*stats:*/ NULL, src, width, UNI_FLAG_PAD); +} + +#ifdef UNUSED +char* FAST_FUNC unicode_conv_to_printable_maxwidth(uni_stat_t *stats, const char *src, unsigned maxwidth) +{ + return unicode_conv_to_printable2(stats, src, maxwidth, 0); +} + +unsigned FAST_FUNC unicode_padding_to_width(unsigned width, const char *src) +{ + if (unicode_status != UNICODE_ON) { + return width - strnlen(src, width); + } + + while (1) { + int w; + wchar_t wc; + +#if ENABLE_UNICODE_USING_LOCALE + { + mbstate_t mbst = { 0 }; + ssize_t rc = mbsrtowcs(&wc, &src, 1, &mbst); + if (rc <= 0) /* error, or end-of-string */ + return width; + } +#else + src = mbstowc_internal(&wc, src); + if (wc == ERROR_WCHAR || wc == 0) /* error, or end-of-string */ + return width; +#endif + w = wcwidth(wc); + if (w < 0) /* non-printable wchar */ + return width; + width -= w; + if ((int)width <= 0) /* string is longer than width */ + return 0; + } +} +#endif diff --git a/probe-busybox/libbb/validate_atlas_id.c b/probe-busybox/libbb/validate_atlas_id.c new file mode 100644 index 00000000..b4458b17 --- /dev/null +++ b/probe-busybox/libbb/validate_atlas_id.c @@ -0,0 +1,8 @@ +#include "libbb.h" + +int validate_atlas_id(const char *atlas_id) +{ + if (strspn(atlas_id, "0123456789") == strlen(atlas_id)) + return 1; + return 0; +} diff --git a/probe-busybox/libbb/validate_filename.c b/probe-busybox/libbb/validate_filename.c new file mode 100644 index 00000000..2960f9c9 --- /dev/null +++ b/probe-busybox/libbb/validate_filename.c @@ -0,0 +1,134 @@ +#include "libbb.h" + +static char *rebased_validated_common(const char* base, const char *path, + const char *prefix, int require_slash) +{ + size_t path_len, prefix_len, atlas_home_len, new_base_len, new_len; + int do_replace, failed; + const char *new_base; + char *new_path; + + + if (atlas_unsafe()) + return strdup(path); + + /* Check for the following properties: + * 1) path starts with prefix or if prefix is relative, + * ether '/home/atlas' or atlas_base() followed by the prefix. + * 2) the next character after prefix is a '/' (or '\0' if + * require_slash is false) + * 3) path does not contain '/../' + * 4) path does not end in '/..' + * return NULL if any of the properties does not hold + * return a new string that replaces '/home/atlas' with base. + */ + path_len= strlen(path); + prefix_len= strlen(prefix); + atlas_home_len= strlen(ATLAS_HOME); + new_base= base; + new_base_len= strlen(new_base); + + do_replace= 0; + if (prefix[0] == '/') + { + if (path_len < prefix_len) + return NULL; + + if (memcmp(path, prefix, prefix_len) != 0) + return NULL; /* property 1 */ + + if (path[prefix_len] != '/') + { + if (require_slash || path[prefix_len] != '\0') + return NULL; /* property 2 */ + } + } + else + { + failed= 0; + do_replace= 1; /* Assume /home/atlas */ + if (path_len < atlas_home_len + 1 + prefix_len) + failed= 1; + if (!failed && memcmp(path, ATLAS_HOME, atlas_home_len) != 0) + failed= 1; /* property 1 */ + if (!failed && path[atlas_home_len] != '/') + failed= 1; /* property 1 */ + if (!failed && + memcmp(path+atlas_home_len+1, prefix, prefix_len) != 0) + { + failed= 1; /* property 1 */ + } + + if (!failed && path[atlas_home_len+1+prefix_len] != '/') + { + if (require_slash || + path[atlas_home_len+1+prefix_len] != '\0') + { + failed =1; /* property 2 */ + } + } + + if (failed) + { + do_replace= 0; + failed= 0; + if (path_len < new_base_len + 1 + prefix_len) + failed= 1; + if (!failed && + memcmp(path, new_base, new_base_len) != 0) + { + failed= 1; /* property 1 */ + } + if (!failed && path[new_base_len] != '/') + failed= 1; /* property 1 */ + if (!failed && + memcmp(path+new_base_len+1, prefix, + prefix_len) != 0) + { + failed= 1; /* property 1 */ + } + + if (!failed && path[new_base_len+1+prefix_len] != + '/') + { + if (require_slash || + path[new_base_len+1+prefix_len] != + '\0') + { + failed =1; /* property 2 */ + } + } + } + + if (failed) + return NULL; + } + + if (strstr(path, "/../") != NULL) + return NULL; /* property 3 */ + + if (path_len >= 3 && strcmp(&path[path_len-3], "/..") == 0) + return NULL; /* property 4 */ + + if (do_replace) + { + new_len= strlen(new_base) + (path_len-atlas_home_len) + 1; + new_path= xmalloc(new_len); + strlcpy(new_path, new_base, new_len); + strlcat(new_path, path+atlas_home_len, new_len); + } + else + new_path= strdup(path); + + return new_path; +} + +char *rebased_validated_filename(const char *base, const char *path, const char *prefix) +{ + return rebased_validated_common(base, path, prefix, 1 /*require_slash*/); +} + +char *rebased_validated_dir(const char *base, const char *path, const char *prefix) +{ + return rebased_validated_common(base, path, prefix, 0 /*!require_slash*/); +} diff --git a/probe-busybox/libbb/verror_msg.c b/probe-busybox/libbb/verror_msg.c new file mode 100644 index 00000000..22c30357 --- /dev/null +++ b/probe-busybox/libbb/verror_msg.c @@ -0,0 +1,182 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ +#include "libbb.h" +#if ENABLE_FEATURE_SYSLOG +# include +#endif + +#if ENABLE_FEATURE_SYSLOG +smallint syslog_level = LOG_ERR; +#endif +smallint logmode = LOGMODE_STDIO; +const char *msg_eol = "\n"; + +void FAST_FUNC bb_verror_msg(const char *s, va_list p, const char* strerr) +{ + char *msg, *msg1; + char stack_msg[80]; + int applet_len, strerr_len, msgeol_len, used; + + if (!logmode) + return; + + if (!s) /* nomsg[_and_die] uses NULL fmt */ + s = ""; /* some libc don't like printf(NULL) */ + + applet_len = strlen(applet_name) + 2; /* "applet: " */ + strerr_len = strerr ? strlen(strerr) : 0; + msgeol_len = strlen(msg_eol); + + /* This costs ~90 bytes of code, but avoids costly + * malloc()[in vasprintf]+realloc()+memmove()+free() in 99% of cases. + * ~40% speedup. + */ + if ((int)sizeof(stack_msg) - applet_len > 0) { + va_list p2; + + /* It is not portable to use va_list twice, need to va_copy it */ + va_copy(p2, p); + used = vsnprintf(stack_msg + applet_len, (int)sizeof(stack_msg) - applet_len, s, p2); + va_end(p2); + msg = stack_msg; + used += applet_len; + if (used < (int)sizeof(stack_msg) - 3 - msgeol_len - strerr_len) + goto add_pfx_and_sfx; + } + + used = vasprintf(&msg, s, p); + if (used < 0) + return; + + /* This is ugly and costs +60 bytes compared to multiple + * fprintf's, but is guaranteed to do a single write. + * This is needed for e.g. httpd logging, when multiple + * children can produce log messages simultaneously. */ + + /* can't use xrealloc: it calls error_msg on failure, + * that may result in a recursion */ + /* +3 is for ": " before strerr and for terminating NUL */ + msg1 = realloc(msg, applet_len + used + strerr_len + msgeol_len + 3); + if (!msg1) { + msg[used++] = '\n'; /* overwrites NUL */ + applet_len = 0; + } else { + msg = msg1; + /* TODO: maybe use writev instead of memmoving? Need full_writev? */ + memmove(msg + applet_len, msg, used); + used += applet_len; + add_pfx_and_sfx: + strcpy(msg, applet_name); + msg[applet_len - 2] = ':'; + msg[applet_len - 1] = ' '; + if (strerr) { + if (s[0]) { /* not perror_nomsg? */ + msg[used++] = ':'; + msg[used++] = ' '; + } + strcpy(&msg[used], strerr); + used += strerr_len; + } + strcpy(&msg[used], msg_eol); + used += msgeol_len; + } + + if (logmode & LOGMODE_STDIO) { + fflush_all(); + full_write(STDERR_FILENO, msg, used); + } +#if ENABLE_FEATURE_SYSLOG + if (logmode & LOGMODE_SYSLOG) { + syslog(syslog_level, "%s", msg + applet_len); + } +#endif + if (msg != stack_msg) + free(msg); +} + +#ifdef VERSION_WITH_WRITEV +/* Code size is approximately the same, but currently it's the only user + * of writev in entire bbox. __libc_writev in uclibc is ~50 bytes. */ +void FAST_FUNC bb_verror_msg(const char *s, va_list p, const char* strerr) +{ + int strerr_len, msgeol_len; + struct iovec iov[3]; + +#define used (iov[2].iov_len) +#define msgv (iov[2].iov_base) +#define msgc ((char*)(iov[2].iov_base)) +#define msgptr (&(iov[2].iov_base)) + + if (!logmode) + return; + + if (!s) /* nomsg[_and_die] uses NULL fmt */ + s = ""; /* some libc don't like printf(NULL) */ + + /* Prevent "derefing type-punned ptr will break aliasing rules" */ + used = vasprintf((char**)(void*)msgptr, s, p); + if (used < 0) + return; + + /* This is ugly and costs +60 bytes compared to multiple + * fprintf's, but is guaranteed to do a single write. + * This is needed for e.g. httpd logging, when multiple + * children can produce log messages simultaneously. */ + + strerr_len = strerr ? strlen(strerr) : 0; + msgeol_len = strlen(msg_eol); + /* +3 is for ": " before strerr and for terminating NUL */ + msgv = xrealloc(msgv, used + strerr_len + msgeol_len + 3); + if (strerr) { + msgc[used++] = ':'; + msgc[used++] = ' '; + strcpy(msgc + used, strerr); + used += strerr_len; + } + strcpy(msgc + used, msg_eol); + used += msgeol_len; + + if (logmode & LOGMODE_STDIO) { + iov[0].iov_base = (char*)applet_name; + iov[0].iov_len = strlen(applet_name); + iov[1].iov_base = (char*)": "; + iov[1].iov_len = 2; + /*iov[2].iov_base = msgc;*/ + /*iov[2].iov_len = used;*/ + fflush_all(); + writev(STDERR_FILENO, iov, 3); + } +# if ENABLE_FEATURE_SYSLOG + if (logmode & LOGMODE_SYSLOG) { + syslog(LOG_ERR, "%s", msgc); + } +# endif + free(msgc); +} +#endif + + +void FAST_FUNC bb_error_msg_and_die(const char *s, ...) +{ + va_list p; + + va_start(p, s); + bb_verror_msg(s, p, NULL); + va_end(p); + xfunc_die(); +} + +void FAST_FUNC bb_error_msg(const char *s, ...) +{ + va_list p; + + va_start(p, s); + bb_verror_msg(s, p, NULL); + va_end(p); +} diff --git a/probe-busybox/libbb/vfork_daemon_rexec.c b/probe-busybox/libbb/vfork_daemon_rexec.c new file mode 100644 index 00000000..c192829b --- /dev/null +++ b/probe-busybox/libbb/vfork_daemon_rexec.c @@ -0,0 +1,292 @@ +/* vi: set sw=4 ts=4: */ +/* + * Rexec program for system have fork() as vfork() with foreground option + * + * Copyright (C) Vladimir N. Oleynik + * Copyright (C) 2003 Russ Dill + * + * daemon() portion taken from uClibc: + * + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Modified for uClibc by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "busybox.h" /* uses applet tables */ + +/* This does a fork/exec in one call, using vfork(). Returns PID of new child, + * -1 for failure. Runs argv[0], searching path if that has no / in it. */ +pid_t FAST_FUNC spawn(char **argv) +{ + /* Compiler should not optimize stores here */ + volatile int failed; + pid_t pid; + + fflush_all(); + + /* Be nice to nommu machines. */ + failed = 0; + pid = vfork(); + if (pid < 0) /* error */ + return pid; + if (!pid) { /* child */ + /* This macro is ok - it doesn't do NOEXEC/NOFORK tricks */ + BB_EXECVP(argv[0], argv); + + /* We are (maybe) sharing a stack with blocked parent, + * let parent know we failed and then exit to unblock parent + * (but don't run atexit() stuff, which would screw up parent.) + */ + failed = errno; + /* mount, for example, does not want the message */ + /*bb_perror_msg("can't execute '%s'", argv[0]);*/ + _exit(111); + } + /* parent */ + /* Unfortunately, this is not reliable: according to standards + * vfork() can be equivalent to fork() and we won't see value + * of 'failed'. + * Interested party can wait on pid and learn exit code. + * If 111 - then it (most probably) failed to exec */ + if (failed) { + safe_waitpid(pid, NULL, 0); /* prevent zombie */ + errno = failed; + return -1; + } + return pid; +} + +/* Die with an error message if we can't spawn a child process. */ +pid_t FAST_FUNC xspawn(char **argv) +{ + pid_t pid = spawn(argv); + if (pid < 0) + bb_simple_perror_msg_and_die(*argv); + return pid; +} + +#if ENABLE_FEATURE_PREFER_APPLETS \ + || ENABLE_FEATURE_SH_NOFORK +static jmp_buf die_jmp; +static void jump(void) +{ + /* Special case. We arrive here if NOFORK applet + * calls xfunc, which then decides to die. + * We don't die, but jump instead back to caller. + * NOFORK applets still cannot carelessly call xfuncs: + * p = xmalloc(10); + * q = xmalloc(10); // BUG! if this dies, we leak p! + */ + /* | 0x100 allows to pass zero exitcode (longjmp can't pass 0). + * This works because exitcodes are bytes, + * run_nofork_applet() ensures that by "& 0xff" */ + longjmp(die_jmp, xfunc_error_retval | 0x100); +} + +struct nofork_save_area { + jmp_buf die_jmp; + void (*die_func)(void); + const char *applet_name; + uint32_t option_mask32; + uint8_t xfunc_error_retval; +}; +static void save_nofork_data(struct nofork_save_area *save) +{ + memcpy(&save->die_jmp, &die_jmp, sizeof(die_jmp)); + save->die_func = die_func; + save->applet_name = applet_name; + save->option_mask32 = option_mask32; + save->xfunc_error_retval = xfunc_error_retval; +} +static void restore_nofork_data(struct nofork_save_area *save) +{ + memcpy(&die_jmp, &save->die_jmp, sizeof(die_jmp)); + die_func = save->die_func; + applet_name = save->applet_name; + option_mask32 = save->option_mask32; + xfunc_error_retval = save->xfunc_error_retval; +} + +int FAST_FUNC run_nofork_applet(int applet_no, char **argv) +{ + int rc, argc; + struct nofork_save_area old; + + save_nofork_data(&old); + + xfunc_error_retval = EXIT_FAILURE; + + /* In case getopt() or getopt32() was already called: + * reset the libc getopt() function, which keeps internal state. + * + * BSD-derived getopt() functions require that optind be set to 1 in + * order to reset getopt() state. This used to be generally accepted + * way of resetting getopt(). However, glibc's getopt() + * has additional getopt() state beyond optind, and requires that + * optind be set to zero to reset its state. So the unfortunate state of + * affairs is that BSD-derived versions of getopt() misbehave if + * optind is set to 0 in order to reset getopt(), and glibc's getopt() + * will core dump if optind is set 1 in order to reset getopt(). + * + * More modern versions of BSD require that optreset be set to 1 in + * order to reset getopt(). Sigh. Standards, anyone? + */ +#ifdef __GLIBC__ + optind = 0; +#else /* BSD style */ + optind = 1; + /* optreset = 1; */ +#endif + /* optarg = NULL; opterr = 1; optopt = 63; - do we need this too? */ + /* (values above are what they initialized to in glibc and uclibc) */ + /* option_mask32 = 0; - not needed, no applet depends on it being 0 */ + + argc = 1; + while (argv[argc]) + argc++; + + /* If xfunc "dies" in NOFORK applet, die_func longjmp's here instead */ + die_func = jump; + rc = setjmp(die_jmp); + if (!rc) { + /* Some callers (xargs) + * need argv untouched because they free argv[i]! */ + char *tmp_argv[argc+1]; + memcpy(tmp_argv, argv, (argc+1) * sizeof(tmp_argv[0])); + applet_name = tmp_argv[0]; + /* Finally we can call NOFORK applet's main() */ + rc = applet_main[applet_no](argc, tmp_argv); + } else { + /* xfunc died in NOFORK applet */ + } + + /* Restoring some globals */ + restore_nofork_data(&old); + + /* Other globals can be simply reset to defaults */ +#ifdef __GLIBC__ + optind = 0; +#else /* BSD style */ + optind = 1; +#endif + + return rc & 0xff; /* don't confuse people with "exitcodes" >255 */ +} +#endif /* FEATURE_PREFER_APPLETS || FEATURE_SH_NOFORK */ + +int FAST_FUNC spawn_and_wait(char **argv) +{ + int rc; +#if ENABLE_FEATURE_PREFER_APPLETS + int a = find_applet_by_name(argv[0]); + + if (a >= 0 && (APPLET_IS_NOFORK(a) +# if BB_MMU + || APPLET_IS_NOEXEC(a) /* NOEXEC trick needs fork() */ +# endif + )) { +# if BB_MMU + if (APPLET_IS_NOFORK(a)) +# endif + { + return run_nofork_applet(a, argv); + } +# if BB_MMU + /* MMU only */ + /* a->noexec is true */ + rc = fork(); + if (rc) /* parent or error */ + return wait4pid(rc); + /* child */ + xfunc_error_retval = EXIT_FAILURE; + run_applet_no_and_exit(a, argv); +# endif + } +#endif /* FEATURE_PREFER_APPLETS */ + rc = spawn(argv); + return wait4pid(rc); +} + +#if !BB_MMU +void FAST_FUNC re_exec(char **argv) +{ + /* high-order bit of first char in argv[0] is a hidden + * "we have (already) re-execed, don't do it again" flag */ + argv[0][0] |= 0x80; + execv(bb_busybox_exec_path, argv); + bb_perror_msg_and_die("can't execute '%s'", bb_busybox_exec_path); +} + +pid_t FAST_FUNC fork_or_rexec(char **argv) +{ + pid_t pid; + /* Maybe we are already re-execed and come here again? */ + if (re_execed) + return 0; + pid = xvfork(); + if (pid) /* parent */ + return pid; + /* child - re-exec ourself */ + re_exec(argv); +} +#endif + +/* Due to a #define in libbb.h on MMU systems we actually have 1 argument - + * char **argv "vanishes" */ +void FAST_FUNC bb_daemonize_or_rexec(int flags, char **argv) +{ + int fd; + + if (flags & DAEMON_CHDIR_ROOT) + xchdir("/"); + + if (flags & DAEMON_DEVNULL_STDIO) { + close(0); + close(1); + close(2); + } + + fd = open(bb_dev_null, O_RDWR); + if (fd < 0) { + /* NB: we can be called as bb_sanitize_stdio() from init + * or mdev, and there /dev/null may legitimately not (yet) exist! + * Do not use xopen above, but obtain _ANY_ open descriptor, + * even bogus one as below. */ + fd = xopen("/", O_RDONLY); /* don't believe this can fail */ + } + + while ((unsigned)fd < 2) + fd = dup(fd); /* have 0,1,2 open at least to /dev/null */ + + if (!(flags & DAEMON_ONLY_SANITIZE)) { + if (fork_or_rexec(argv)) + exit(EXIT_SUCCESS); /* parent */ + /* if daemonizing, detach from stdio & ctty */ + setsid(); + dup2(fd, 0); + dup2(fd, 1); + dup2(fd, 2); + if (flags & DAEMON_DOUBLE_FORK) { + /* On Linux, session leader can acquire ctty + * unknowingly, by opening a tty. + * Prevent this: stop being a session leader. + */ + if (fork_or_rexec(argv)) + exit(EXIT_SUCCESS); /* parent */ + } + } + while (fd > 2) { + close(fd--); + if (!(flags & DAEMON_CLOSE_EXTRA_FDS)) + return; + /* else close everything after fd#2 */ + } +} + +void FAST_FUNC bb_sanitize_stdio(void) +{ + bb_daemonize_or_rexec(DAEMON_ONLY_SANITIZE, NULL); +} diff --git a/probe-busybox/libbb/wfopen.c b/probe-busybox/libbb/wfopen.c new file mode 100644 index 00000000..76dc8b82 --- /dev/null +++ b/probe-busybox/libbb/wfopen.c @@ -0,0 +1,56 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +FILE* FAST_FUNC fopen_or_warn(const char *path, const char *mode) +{ + FILE *fp = fopen(path, mode); + if (!fp) { + bb_simple_perror_msg(path); + //errno = 0; /* why? */ + } + return fp; +} + +FILE* FAST_FUNC fopen_for_read(const char *path) +{ + return fopen(path, "r"); +} + +FILE* FAST_FUNC xfopen_for_read(const char *path) +{ + return xfopen(path, "r"); +} + +FILE* FAST_FUNC fopen_for_write(const char *path) +{ + return fopen(path, "w"); +} + +FILE* FAST_FUNC xfopen_for_write(const char *path) +{ + return xfopen(path, "w"); +} + +static FILE* xfdopen_helper(unsigned fd_and_rw_bit) +{ + FILE* fp = fdopen(fd_and_rw_bit >> 1, fd_and_rw_bit & 1 ? "w" : "r"); + if (!fp) + bb_error_msg_and_die(bb_msg_memory_exhausted); + return fp; +} +FILE* FAST_FUNC xfdopen_for_read(int fd) +{ + return xfdopen_helper(fd << 1); +} +FILE* FAST_FUNC xfdopen_for_write(int fd) +{ + return xfdopen_helper((fd << 1) + 1); +} diff --git a/probe-busybox/libbb/wfopen_input.c b/probe-busybox/libbb/wfopen_input.c new file mode 100644 index 00000000..d8b1c4a3 --- /dev/null +++ b/probe-busybox/libbb/wfopen_input.c @@ -0,0 +1,56 @@ +/* vi: set sw=4 ts=4: */ +/* + * wfopen_input implementation for busybox + * + * Copyright (C) 2003 Manuel Novoa III + * + * Licensed under GPLv2 or later, see file LICENSE in this source tree. + */ + +/* A number of applets need to open a file for reading, where the filename + * is a command line arg. Since often that arg is '-' (meaning stdin), + * we avoid testing everywhere by consolidating things in this routine. + */ + +#include "libbb.h" + +FILE* FAST_FUNC fopen_or_warn_stdin(const char *filename) +{ + FILE *fp = stdin; + + if (filename != bb_msg_standard_input + && NOT_LONE_DASH(filename) + ) { + fp = fopen_or_warn(filename, "r"); + } + return fp; +} + +FILE* FAST_FUNC xfopen_stdin(const char *filename) +{ + FILE *fp = fopen_or_warn_stdin(filename); + if (fp) + return fp; + xfunc_die(); /* We already output an error message. */ +} + +int FAST_FUNC open_or_warn_stdin(const char *filename) +{ + int fd = STDIN_FILENO; + + if (filename != bb_msg_standard_input + && NOT_LONE_DASH(filename) + ) { + fd = open_or_warn(filename, O_RDONLY); + } + + return fd; +} + +int FAST_FUNC xopen_stdin(const char *filename) +{ + int fd = open_or_warn_stdin(filename); + if (fd >= 0) + return fd; + xfunc_die(); /* We already output an error message. */ +} diff --git a/probe-busybox/libbb/xatonum.c b/probe-busybox/libbb/xatonum.c new file mode 100644 index 00000000..b63b7f54 --- /dev/null +++ b/probe-busybox/libbb/xatonum.c @@ -0,0 +1,117 @@ +/* vi: set sw=4 ts=4: */ +/* + * ascii-to-numbers implementations for busybox + * + * Copyright (C) 2003 Manuel Novoa III + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +#define type long long +#define xstrtou(rest) xstrtoull##rest +#define xstrto(rest) xstrtoll##rest +#define xatou(rest) xatoull##rest +#define xato(rest) xatoll##rest +#define XSTR_UTYPE_MAX ULLONG_MAX +#define XSTR_TYPE_MAX LLONG_MAX +#define XSTR_TYPE_MIN LLONG_MIN +#define XSTR_STRTOU strtoull +#include "xatonum_template.c" + +#if ULONG_MAX != ULLONG_MAX +#define type long +#define xstrtou(rest) xstrtoul##rest +#define xstrto(rest) xstrtol##rest +#define xatou(rest) xatoul##rest +#define xato(rest) xatol##rest +#define XSTR_UTYPE_MAX ULONG_MAX +#define XSTR_TYPE_MAX LONG_MAX +#define XSTR_TYPE_MIN LONG_MIN +#define XSTR_STRTOU strtoul +#include "xatonum_template.c" +#endif + +#if UINT_MAX != ULONG_MAX +static ALWAYS_INLINE +unsigned bb_strtoui(const char *str, char **end, int b) +{ + unsigned long v = strtoul(str, end, b); + if (v > UINT_MAX) { + errno = ERANGE; + return UINT_MAX; + } + return v; +} +#define type int +#define xstrtou(rest) xstrtou##rest +#define xstrto(rest) xstrtoi##rest +#define xatou(rest) xatou##rest +#define xato(rest) xatoi##rest +#define XSTR_UTYPE_MAX UINT_MAX +#define XSTR_TYPE_MAX INT_MAX +#define XSTR_TYPE_MIN INT_MIN +/* libc has no strtoui, so we need to create/use our own */ +#define XSTR_STRTOU bb_strtoui +#include "xatonum_template.c" +#endif + +/* A few special cases */ + +int FAST_FUNC xatoi_positive(const char *numstr) +{ + return xatou_range(numstr, 0, INT_MAX); +} + +uint16_t FAST_FUNC xatou16(const char *numstr) +{ + return xatou_range(numstr, 0, 0xffff); +} + +const struct suffix_mult bkm_suffixes[] = { + { "b", 512 }, + { "k", 1024 }, + { "m", 1024*1024 }, + { "", 0 } +}; + +const struct suffix_mult cwbkMG_suffixes[] = { + { "c", 1 }, + { "w", 2 }, + { "b", 512 }, + { "kB", 1000 }, + { "kD", 1000 }, + { "k", 1024 }, + { "KB", 1000 }, /* compat with coreutils dd */ + { "KD", 1000 }, /* compat with coreutils dd */ + { "K", 1024 }, /* compat with coreutils dd */ + { "MB", 1000000 }, + { "MD", 1000000 }, + { "M", 1024*1024 }, + { "GB", 1000000000 }, + { "GD", 1000000000 }, + { "G", 1024*1024*1024 }, + /* "D" suffix for decimal is not in coreutils manpage, looks like it's deprecated */ + /* coreutils also understands TPEZY suffixes for tera- and so on, with B suffix for decimal */ + { "", 0 } +}; + +const struct suffix_mult kmg_i_suffixes[] = { + { "KiB", 1024 }, + { "kiB", 1024 }, + { "K", 1024 }, + { "k", 1024 }, + { "MiB", 1048576 }, + { "miB", 1048576 }, + { "M", 1048576 }, + { "m", 1048576 }, + { "GiB", 1073741824 }, + { "giB", 1073741824 }, + { "G", 1073741824 }, + { "g", 1073741824 }, + { "KB", 1000 }, + { "MB", 1000000 }, + { "GB", 1000000000 }, + { "", 0 } +}; diff --git a/probe-busybox/libbb/xatonum_template.c b/probe-busybox/libbb/xatonum_template.c new file mode 100644 index 00000000..e0471983 --- /dev/null +++ b/probe-busybox/libbb/xatonum_template.c @@ -0,0 +1,195 @@ +/* + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ +/* +You need to define the following (example): + +#define type long +#define xstrtou(rest) xstrtoul##rest +#define xstrto(rest) xstrtol##rest +#define xatou(rest) xatoul##rest +#define xato(rest) xatol##rest +#define XSTR_UTYPE_MAX ULONG_MAX +#define XSTR_TYPE_MAX LONG_MAX +#define XSTR_TYPE_MIN LONG_MIN +#define XSTR_STRTOU strtoul +*/ + +unsigned type FAST_FUNC xstrtou(_range_sfx)(const char *numstr, int base, + unsigned type lower, + unsigned type upper, + const struct suffix_mult *suffixes) +{ + unsigned type r; + int old_errno; + char *e; + + /* Disallow '-' and any leading whitespace. */ + if (*numstr == '-' || *numstr == '+' || isspace(*numstr)) + goto inval; + + /* Since this is a lib function, we're not allowed to reset errno to 0. + * Doing so could break an app that is deferring checking of errno. + * So, save the old value so that we can restore it if successful. */ + old_errno = errno; + errno = 0; + r = XSTR_STRTOU(numstr, &e, base); + /* Do the initial validity check. Note: The standards do not + * guarantee that errno is set if no digits were found. So we + * must test for this explicitly. */ + if (errno || numstr == e) + goto inval; /* error / no digits / illegal trailing chars */ + + errno = old_errno; /* Ok. So restore errno. */ + + /* Do optional suffix parsing. Allow 'empty' suffix tables. + * Note that we also allow nul suffixes with associated multipliers, + * to allow for scaling of the numstr by some default multiplier. */ + if (suffixes) { + while (suffixes->mult) { + if (strcmp(suffixes->suffix, e) == 0) { + if (XSTR_UTYPE_MAX / suffixes->mult < r) + goto range; /* overflow! */ + r *= suffixes->mult; + goto chk_range; + } + ++suffixes; + } + } + + /* Note: trailing space is an error. + * It would be easy enough to allow though if desired. */ + if (*e) + goto inval; + chk_range: + /* Finally, check for range limits. */ + if (r >= lower && r <= upper) + return r; + range: + bb_error_msg_and_die("number %s is not in %llu..%llu range", + numstr, (unsigned long long)lower, + (unsigned long long)upper); + inval: + bb_error_msg_and_die("invalid number '%s'", numstr); +} + +unsigned type FAST_FUNC xstrtou(_range)(const char *numstr, int base, + unsigned type lower, + unsigned type upper) +{ + return xstrtou(_range_sfx)(numstr, base, lower, upper, NULL); +} + +unsigned type FAST_FUNC xstrtou(_sfx)(const char *numstr, int base, + const struct suffix_mult *suffixes) +{ + return xstrtou(_range_sfx)(numstr, base, 0, XSTR_UTYPE_MAX, suffixes); +} + +unsigned type FAST_FUNC xstrtou()(const char *numstr, int base) +{ + return xstrtou(_range_sfx)(numstr, base, 0, XSTR_UTYPE_MAX, NULL); +} + +unsigned type FAST_FUNC xatou(_range_sfx)(const char *numstr, + unsigned type lower, + unsigned type upper, + const struct suffix_mult *suffixes) +{ + return xstrtou(_range_sfx)(numstr, 10, lower, upper, suffixes); +} + +unsigned type FAST_FUNC xatou(_range)(const char *numstr, + unsigned type lower, + unsigned type upper) +{ + return xstrtou(_range_sfx)(numstr, 10, lower, upper, NULL); +} + +unsigned type FAST_FUNC xatou(_sfx)(const char *numstr, + const struct suffix_mult *suffixes) +{ + return xstrtou(_range_sfx)(numstr, 10, 0, XSTR_UTYPE_MAX, suffixes); +} + +unsigned type FAST_FUNC xatou()(const char *numstr) +{ + return xatou(_sfx)(numstr, NULL); +} + +/* Signed ones */ + +type FAST_FUNC xstrto(_range_sfx)(const char *numstr, int base, + type lower, + type upper, + const struct suffix_mult *suffixes) +{ + unsigned type u = XSTR_TYPE_MAX; + type r; + const char *p = numstr; + + /* NB: if you'll decide to disallow '+': + * at least renice applet needs to allow it */ + if (p[0] == '+' || p[0] == '-') { + ++p; + if (p[0] == '-') + ++u; /* = _MIN (01111... + 1 == 10000...) */ + } + + r = xstrtou(_range_sfx)(p, base, 0, u, suffixes); + + if (*numstr == '-') { + r = -r; + } + + if (r < lower || r > upper) { + bb_error_msg_and_die("number %s is not in %lld..%lld range", + numstr, (long long)lower, (long long)upper); + } + + return r; +} + +type FAST_FUNC xstrto(_range)(const char *numstr, int base, type lower, type upper) +{ + return xstrto(_range_sfx)(numstr, base, lower, upper, NULL); +} + +type FAST_FUNC xstrto()(const char *numstr, int base) +{ + return xstrto(_range_sfx)(numstr, base, XSTR_TYPE_MIN, XSTR_TYPE_MAX, NULL); +} + +type FAST_FUNC xato(_range_sfx)(const char *numstr, + type lower, + type upper, + const struct suffix_mult *suffixes) +{ + return xstrto(_range_sfx)(numstr, 10, lower, upper, suffixes); +} + +type FAST_FUNC xato(_range)(const char *numstr, type lower, type upper) +{ + return xstrto(_range_sfx)(numstr, 10, lower, upper, NULL); +} + +type FAST_FUNC xato(_sfx)(const char *numstr, const struct suffix_mult *suffixes) +{ + return xstrto(_range_sfx)(numstr, 10, XSTR_TYPE_MIN, XSTR_TYPE_MAX, suffixes); +} + +type FAST_FUNC xato()(const char *numstr) +{ + return xstrto(_range_sfx)(numstr, 10, XSTR_TYPE_MIN, XSTR_TYPE_MAX, NULL); +} + +#undef type +#undef xstrtou +#undef xstrto +#undef xatou +#undef xato +#undef XSTR_UTYPE_MAX +#undef XSTR_TYPE_MAX +#undef XSTR_TYPE_MIN +#undef XSTR_STRTOU diff --git a/probe-busybox/libbb/xconnect.c b/probe-busybox/libbb/xconnect.c new file mode 100644 index 00000000..625a35f7 --- /dev/null +++ b/probe-busybox/libbb/xconnect.c @@ -0,0 +1,542 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Connect to host at port using address resolution from getaddrinfo + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +#include +#include /* netinet/in.h needs it */ +#include +#include +#include +#include "libbb.h" + +int FAST_FUNC setsockopt_int(int fd, int level, int optname, int optval) +{ + return setsockopt(fd, level, optname, &optval, sizeof(int)); +} +int FAST_FUNC setsockopt_1(int fd, int level, int optname) +{ + return setsockopt_int(fd, level, optname, 1); +} +int FAST_FUNC setsockopt_SOL_SOCKET_int(int fd, int optname, int optval) +{ + return setsockopt_int(fd, SOL_SOCKET, optname, optval); +} +int FAST_FUNC setsockopt_SOL_SOCKET_1(int fd, int optname) +{ + return setsockopt_SOL_SOCKET_int(fd, optname, 1); +} + +void FAST_FUNC setsockopt_reuseaddr(int fd) +{ + setsockopt_SOL_SOCKET_1(fd, SO_REUSEADDR); +} +int FAST_FUNC setsockopt_broadcast(int fd) +{ + return setsockopt_SOL_SOCKET_1(fd, SO_BROADCAST); +} +int FAST_FUNC setsockopt_keepalive(int fd) +{ + return setsockopt_SOL_SOCKET_1(fd, SO_KEEPALIVE); +} + +#ifdef SO_BINDTODEVICE +int FAST_FUNC setsockopt_bindtodevice(int fd, const char *iface) +{ + int r; + struct ifreq ifr; + strncpy_IFNAMSIZ(ifr.ifr_name, iface); + /* NB: passing (iface, strlen(iface) + 1) does not work! + * (maybe it works on _some_ kernels, but not on 2.6.26) + * Actually, ifr_name is at offset 0, and in practice + * just giving char[IFNAMSIZ] instead of struct ifreq works too. + * But just in case it's not true on some obscure arch... */ + r = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, &ifr, sizeof(ifr)); + if (r) + bb_perror_msg("can't bind to interface %s", iface); + return r; +} +#else +int FAST_FUNC setsockopt_bindtodevice(int fd UNUSED_PARAM, + const char *iface UNUSED_PARAM) +{ + bb_error_msg("SO_BINDTODEVICE is not supported on this system"); + return -1; +} +#endif + +static len_and_sockaddr* get_lsa(int fd, int (*get_name)(int fd, struct sockaddr *addr, socklen_t *addrlen)) +{ + len_and_sockaddr lsa; + len_and_sockaddr *lsa_ptr; + + lsa.len = LSA_SIZEOF_SA; + if (get_name(fd, &lsa.u.sa, &lsa.len) != 0) + return NULL; + + lsa_ptr = xzalloc(LSA_LEN_SIZE + lsa.len); + if (lsa.len > LSA_SIZEOF_SA) { /* rarely (if ever) happens */ + lsa_ptr->len = lsa.len; + get_name(fd, &lsa_ptr->u.sa, &lsa_ptr->len); + } else { + memcpy(lsa_ptr, &lsa, LSA_LEN_SIZE + lsa.len); + } + return lsa_ptr; +} + +len_and_sockaddr* FAST_FUNC get_sock_lsa(int fd) +{ + return get_lsa(fd, getsockname); +} + +len_and_sockaddr* FAST_FUNC get_peer_lsa(int fd) +{ + return get_lsa(fd, getpeername); +} + +void FAST_FUNC xconnect(int s, const struct sockaddr *s_addr, socklen_t addrlen) +{ + if (connect(s, s_addr, addrlen) < 0) { + if (ENABLE_FEATURE_CLEAN_UP) + close(s); + if (s_addr->sa_family == AF_INET) + bb_perror_msg_and_die("%s (%s)", + "can't connect to remote host", + inet_ntoa(((struct sockaddr_in *)s_addr)->sin_addr)); + bb_perror_msg_and_die("can't connect to remote host"); + } +} + +void FAST_FUNC xrconnect(int s, + const struct sockaddr *s_addr, socklen_t addrlen, + void (*reportf)(int err)) +{ + if (connect(s, s_addr, addrlen) < 0) { + if (reportf) { + int t_errno= errno; + reportf(t_errno); + errno= t_errno; + } + if (ENABLE_FEATURE_CLEAN_UP) + close(s); + if (s_addr->sa_family == AF_INET) + bb_perror_msg_and_die("%s (%s)", + "cannot connect to remote host", + inet_ntoa(((struct sockaddr_in *)s_addr)->sin_addr)); + bb_perror_msg_and_die("cannot connect to remote host"); + } +} + +/* Return port number for a service. + * If "port" is a number use it as the port. + * If "port" is a name it is looked up in /etc/services, + * if it isnt found return default_port + */ +unsigned FAST_FUNC bb_lookup_port(const char *port, const char *protocol, unsigned default_port) +{ + unsigned port_nr = default_port; + if (port) { + int old_errno; + + /* Since this is a lib function, we're not allowed to reset errno to 0. + * Doing so could break an app that is deferring checking of errno. */ + old_errno = errno; + port_nr = bb_strtou(port, NULL, 10); + if (errno || port_nr > 65535) { + struct servent *tserv = getservbyname(port, protocol); + port_nr = default_port; + if (tserv) + port_nr = ntohs(tserv->s_port); + } + errno = old_errno; + } + return (uint16_t)port_nr; +} + +/* "New" networking API */ + + +int FAST_FUNC get_nport(const struct sockaddr *sa) +{ +#if ENABLE_FEATURE_IPV6 + if (sa->sa_family == AF_INET6) { + return ((struct sockaddr_in6*)sa)->sin6_port; + } +#endif + if (sa->sa_family == AF_INET) { + return ((struct sockaddr_in*)sa)->sin_port; + } + /* What? UNIX socket? IPX?? :) */ + return -1; +} + +void FAST_FUNC set_nport(struct sockaddr *sa, unsigned port) +{ +#if ENABLE_FEATURE_IPV6 + if (sa->sa_family == AF_INET6) { + struct sockaddr_in6 *sin6 = (void*) sa; + sin6->sin6_port = port; + return; + } +#endif + if (sa->sa_family == AF_INET) { + struct sockaddr_in *sin = (void*) sa; + sin->sin_port = port; + return; + } + /* What? UNIX socket? IPX?? :) */ +} + +/* We hijack this constant to mean something else */ +/* It doesn't hurt because we will remove this bit anyway */ +#define DIE_ON_ERROR AI_CANONNAME + +#ifndef IF_NOT_FEATURE_IPV6 +#define IF_NOT_FEATURE_IPV6(x) +#endif +#ifndef IF_FEATURE_IPV6 +#define IF_FEATURE_IPV6(...) __VA_ARGS__ +#endif +#ifndef ENABLE_FEATURE_UNIX_LOCAL +#define ENABLE_FEATURE_UNIX_LOCAL 0 +#endif +#ifndef ENABLE_FEATURE_IPV6 +#define ENABLE_FEATURE_IPV6 1 +#endif + +/* host: "1.2.3.4[:port]", "www.google.com[:port]" + * port: if neither of above specifies port # */ +static len_and_sockaddr* str2sockaddr( + const char *host, int port, +IF_FEATURE_IPV6(sa_family_t af,) + int ai_flags) +{ +IF_NOT_FEATURE_IPV6(sa_family_t af = AF_INET;) + int rc; + len_and_sockaddr *r; + struct addrinfo *result = NULL; + struct addrinfo *used_res; + const char *org_host = host; /* only for error msg */ + const char *cp; + struct addrinfo hint; + + if (ENABLE_FEATURE_UNIX_LOCAL && is_prefixed_with(host, "local:")) { + struct sockaddr_un *sun; + + r = xzalloc(LSA_LEN_SIZE + sizeof(struct sockaddr_un)); + r->len = sizeof(struct sockaddr_un); + r->u.sa.sa_family = AF_UNIX; + sun = (struct sockaddr_un *)&r->u.sa; + safe_strncpy(sun->sun_path, host + 6, sizeof(sun->sun_path)); + return r; + } + + r = NULL; + + /* Ugly parsing of host:addr */ + if (ENABLE_FEATURE_IPV6 && host[0] == '[') { + /* Even uglier parsing of [xx]:nn */ + host++; + cp = strchr(host, ']'); + if (!cp || (cp[1] != ':' && cp[1] != '\0')) { + /* Malformed: must be [xx]:nn or [xx] */ + bb_error_msg("bad address '%s'", org_host); + if (ai_flags & DIE_ON_ERROR) + xfunc_die(); + return NULL; + } + } else { + cp = strrchr(host, ':'); + if (ENABLE_FEATURE_IPV6 && cp && strchr(host, ':') != cp) { + /* There is more than one ':' (e.g. "::1") */ + cp = NULL; /* it's not a port spec */ + } + } + if (cp) { /* points to ":" or "]:" */ + int sz = cp - host + 1; + + host = safe_strncpy(alloca(sz), host, sz); + if (ENABLE_FEATURE_IPV6 && *cp != ':') { + cp++; /* skip ']' */ + if (*cp == '\0') /* [xx] without port */ + goto skip; + } + cp++; /* skip ':' */ + port = bb_strtou(cp, NULL, 10); + if (errno || (unsigned)port > 0xffff) { + bb_error_msg("bad port spec '%s'", org_host); + if (ai_flags & DIE_ON_ERROR) + xfunc_die(); + return NULL; + } + skip: ; + } + + /* Next two if blocks allow to skip getaddrinfo() + * in case host name is a numeric IP(v6) address. + * getaddrinfo() initializes DNS resolution machinery, + * scans network config and such - tens of syscalls. + */ + /* If we were not asked specifically for IPv6, + * check whether this is a numeric IPv4 */ + IF_FEATURE_IPV6(if(af != AF_INET6)) { + struct in_addr in4; + if (inet_aton(host, &in4) != 0) { + r = xzalloc(LSA_LEN_SIZE + sizeof(struct sockaddr_in)); + r->len = sizeof(struct sockaddr_in); + r->u.sa.sa_family = AF_INET; + r->u.sin.sin_addr = in4; + goto set_port; + } + } +#if ENABLE_FEATURE_IPV6 + /* If we were not asked specifically for IPv4, + * check whether this is a numeric IPv6 */ + if (af != AF_INET) { + struct in6_addr in6; + if (inet_pton(AF_INET6, host, &in6) > 0) { + r = xzalloc(LSA_LEN_SIZE + sizeof(struct sockaddr_in6)); + r->len = sizeof(struct sockaddr_in6); + r->u.sa.sa_family = AF_INET6; + r->u.sin6.sin6_addr = in6; + goto set_port; + } + } +#endif + + memset(&hint, 0 , sizeof(hint)); + hint.ai_family = af; + /* Need SOCK_STREAM, or else we get each address thrice (or more) + * for each possible socket type (tcp,udp,raw...): */ + hint.ai_socktype = SOCK_STREAM; + hint.ai_flags = ai_flags & ~DIE_ON_ERROR; + rc = getaddrinfo(host, NULL, &hint, &result); + if (rc || !result) { + bb_error_msg("bad address '%s'", org_host); + if (ai_flags & DIE_ON_ERROR) + xfunc_die(); + goto ret; + } + used_res = result; +#if ENABLE_FEATURE_PREFER_IPV4_ADDRESS + while (1) { + if (used_res->ai_family == AF_INET) + break; + used_res = used_res->ai_next; + if (!used_res) { + used_res = result; + break; + } + } +#endif + r = xmalloc(LSA_LEN_SIZE + used_res->ai_addrlen); + r->len = used_res->ai_addrlen; + memcpy(&r->u.sa, used_res->ai_addr, used_res->ai_addrlen); + + set_port: + set_nport(&r->u.sa, htons(port)); + ret: + if (result) + freeaddrinfo(result); + return r; +} +#if !ENABLE_FEATURE_IPV6 +#define str2sockaddr(host, port, af, ai_flags) str2sockaddr(host, port, ai_flags) +#endif + +#if ENABLE_FEATURE_IPV6 +len_and_sockaddr* FAST_FUNC host_and_af2sockaddr(const char *host, int port, sa_family_t af) +{ + return str2sockaddr(host, port, af, 0); +} + +len_and_sockaddr* FAST_FUNC xhost_and_af2sockaddr(const char *host, int port, sa_family_t af) +{ + return str2sockaddr(host, port, af, DIE_ON_ERROR); +} +#endif + +len_and_sockaddr* FAST_FUNC host2sockaddr(const char *host, int port) +{ + return str2sockaddr(host, port, AF_UNSPEC, 0); +} + +len_and_sockaddr* FAST_FUNC xhost2sockaddr(const char *host, int port) +{ + return str2sockaddr(host, port, AF_UNSPEC, DIE_ON_ERROR); +} + +len_and_sockaddr* FAST_FUNC xdotted2sockaddr(const char *host, int port) +{ + return str2sockaddr(host, port, AF_UNSPEC, AI_NUMERICHOST | DIE_ON_ERROR); +} + +int FAST_FUNC xsocket_type(len_and_sockaddr **lsap, int family, int sock_type) +{ + len_and_sockaddr *lsa; + int fd; + int len; + + if (family == AF_UNSPEC) { +#if ENABLE_FEATURE_IPV6 + fd = socket(AF_INET6, sock_type, 0); + if (fd >= 0) { + family = AF_INET6; + goto done; + } +#endif + family = AF_INET; + } + + fd = xsocket(family, sock_type, 0); + + len = sizeof(struct sockaddr_in); + if (family == AF_UNIX) + len = sizeof(struct sockaddr_un); +#if ENABLE_FEATURE_IPV6 + if (family == AF_INET6) { + done: + len = sizeof(struct sockaddr_in6); + } +#endif + lsa = xzalloc(LSA_LEN_SIZE + len); + lsa->len = len; + lsa->u.sa.sa_family = family; + *lsap = lsa; + return fd; +} + +int FAST_FUNC xsocket_stream(len_and_sockaddr **lsap) +{ + return xsocket_type(lsap, AF_UNSPEC, SOCK_STREAM); +} + +static int create_and_bind_or_die(const char *bindaddr, int port, int sock_type) +{ + int fd; + len_and_sockaddr *lsa; + + if (bindaddr && bindaddr[0]) { + lsa = xdotted2sockaddr(bindaddr, port); + /* user specified bind addr dictates family */ + fd = xsocket(lsa->u.sa.sa_family, sock_type, 0); + } else { + fd = xsocket_type(&lsa, AF_UNSPEC, sock_type); + set_nport(&lsa->u.sa, htons(port)); + } + setsockopt_reuseaddr(fd); + xbind(fd, &lsa->u.sa, lsa->len); + free(lsa); + return fd; +} + +int FAST_FUNC create_and_bind_stream_or_die(const char *bindaddr, int port) +{ + return create_and_bind_or_die(bindaddr, port, SOCK_STREAM); +} + +int FAST_FUNC create_and_bind_dgram_or_die(const char *bindaddr, int port) +{ + return create_and_bind_or_die(bindaddr, port, SOCK_DGRAM); +} + + +int FAST_FUNC create_and_connect_stream_or_die(const char *peer, int port) +{ + int fd; + len_and_sockaddr *lsa; + + lsa = xhost2sockaddr(peer, port); + fd = xsocket(lsa->u.sa.sa_family, SOCK_STREAM, 0); + setsockopt_reuseaddr(fd); + xconnect(fd, &lsa->u.sa, lsa->len); + free(lsa); + return fd; +} + +int FAST_FUNC xconnect_stream(const len_and_sockaddr *lsa) +{ + int fd = xsocket(lsa->u.sa.sa_family, SOCK_STREAM, 0); + xconnect(fd, &lsa->u.sa, lsa->len); + return fd; +} + +/* We hijack this constant to mean something else */ +/* It doesn't hurt because we will add this bit anyway */ +#define IGNORE_PORT NI_NUMERICSERV +static char* FAST_FUNC sockaddr2str(const struct sockaddr *sa, int flags) +{ + char host[128]; + char serv[16]; + int rc; + socklen_t salen; + + if (ENABLE_FEATURE_UNIX_LOCAL && sa->sa_family == AF_UNIX) { + struct sockaddr_un *sun = (struct sockaddr_un *)sa; + return xasprintf("local:%.*s", + (int) sizeof(sun->sun_path), + sun->sun_path); + } + + salen = LSA_SIZEOF_SA; +#if ENABLE_FEATURE_IPV6 + if (sa->sa_family == AF_INET) + salen = sizeof(struct sockaddr_in); + if (sa->sa_family == AF_INET6) + salen = sizeof(struct sockaddr_in6); +#endif + rc = getnameinfo(sa, salen, + host, sizeof(host), + /* can do ((flags & IGNORE_PORT) ? NULL : serv) but why bother? */ + serv, sizeof(serv), + /* do not resolve port# into service _name_ */ + flags | NI_NUMERICSERV + ); + if (rc) + return NULL; + if (flags & IGNORE_PORT) + return xstrdup(host); +#if ENABLE_FEATURE_IPV6 + if (sa->sa_family == AF_INET6) { + if (strchr(host, ':')) /* heh, it's not a resolved hostname */ + return xasprintf("[%s]:%s", host, serv); + /*return xasprintf("%s:%s", host, serv);*/ + /* - fall through instead */ + } +#endif + /* For now we don't support anything else, so it has to be INET */ + /*if (sa->sa_family == AF_INET)*/ + return xasprintf("%s:%s", host, serv); + /*return xstrdup(host);*/ +} + +char* FAST_FUNC xmalloc_sockaddr2host(const struct sockaddr *sa) +{ + return sockaddr2str(sa, 0); +} + +char* FAST_FUNC xmalloc_sockaddr2host_noport(const struct sockaddr *sa) +{ + return sockaddr2str(sa, IGNORE_PORT); +} + +char* FAST_FUNC xmalloc_sockaddr2hostonly_noport(const struct sockaddr *sa) +{ + return sockaddr2str(sa, NI_NAMEREQD | IGNORE_PORT); +} +#ifndef NI_NUMERICSCOPE +# define NI_NUMERICSCOPE 0 +#endif +char* FAST_FUNC xmalloc_sockaddr2dotted(const struct sockaddr *sa) +{ + return sockaddr2str(sa, NI_NUMERICHOST | NI_NUMERICSCOPE); +} + +char* FAST_FUNC xmalloc_sockaddr2dotted_noport(const struct sockaddr *sa) +{ + return sockaddr2str(sa, NI_NUMERICHOST | NI_NUMERICSCOPE | IGNORE_PORT); +} diff --git a/probe-busybox/libbb/xfunc_die.c b/probe-busybox/libbb/xfunc_die.c new file mode 100644 index 00000000..73f7998e --- /dev/null +++ b/probe-busybox/libbb/xfunc_die.c @@ -0,0 +1,22 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 2008 by Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +/* Keeping it separate allows to NOT pull in stdio for VERY small applets. + * Try building busybox with only "true" enabled... */ + +#include "libbb.h" + +void (*die_func)(void); + +void FAST_FUNC xfunc_die(void) +{ + if (die_func) + die_func(); + exit(xfunc_error_retval); +} diff --git a/probe-busybox/libbb/xfuncs.c b/probe-busybox/libbb/xfuncs.c new file mode 100644 index 00000000..45650edb --- /dev/null +++ b/probe-busybox/libbb/xfuncs.c @@ -0,0 +1,358 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * Copyright (C) 2006 Rob Landley + * Copyright (C) 2006 Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +/* We need to have separate xfuncs.c and xfuncs_printf.c because + * with current linkers, even with section garbage collection, + * if *.o module references any of XXXprintf functions, you pull in + * entire printf machinery. Even if you do not use the function + * which uses XXXprintf. + * + * xfuncs.c contains functions (not necessarily xfuncs) + * which do not pull in printf, directly or indirectly. + * xfunc_printf.c contains those which do. + * + * TODO: move xmalloc() and xatonum() here. + */ + +#include "libbb.h" + +/* Turn on nonblocking I/O on a fd */ +int FAST_FUNC ndelay_on(int fd) +{ + int flags = fcntl(fd, F_GETFL); + if (flags & O_NONBLOCK) + return flags; + fcntl(fd, F_SETFL, flags | O_NONBLOCK); + return flags; +} + +int FAST_FUNC ndelay_off(int fd) +{ + int flags = fcntl(fd, F_GETFL); + if (!(flags & O_NONBLOCK)) + return flags; + fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); + return flags; +} + +void FAST_FUNC close_on_exec_on(int fd) +{ + fcntl(fd, F_SETFD, FD_CLOEXEC); +} + +char* FAST_FUNC strncpy_IFNAMSIZ(char *dst, const char *src) +{ +#ifndef IFNAMSIZ + enum { IFNAMSIZ = 16 }; +#endif + return strncpy(dst, src, IFNAMSIZ); +} + + +/* Convert unsigned integer to ascii, writing into supplied buffer. + * A truncated result contains the first few digits of the result ala strncpy. + * Returns a pointer past last generated digit, does _not_ store NUL. + */ +void BUG_sizeof(void); +char* FAST_FUNC utoa_to_buf(unsigned n, char *buf, unsigned buflen) +{ + unsigned i, out, res; + + if (buflen) { + out = 0; + if (sizeof(n) == 4) + // 2^32-1 = 4294967295 + i = 1000000000; +#if UINT_MAX > 4294967295 /* prevents warning about "const too large" */ + else + if (sizeof(n) == 8) + // 2^64-1 = 18446744073709551615 + i = 10000000000000000000; +#endif + else + BUG_sizeof(); + for (; i; i /= 10) { + res = n / i; + n = n % i; + if (res || out || i == 1) { + if (--buflen == 0) + break; + out++; + *buf++ = '0' + res; + } + } + } + return buf; +} + +/* Convert signed integer to ascii, like utoa_to_buf() */ +char* FAST_FUNC itoa_to_buf(int n, char *buf, unsigned buflen) +{ + if (!buflen) + return buf; + if (n < 0) { + n = -n; + *buf++ = '-'; + buflen--; + } + return utoa_to_buf((unsigned)n, buf, buflen); +} + +// The following two functions use a static buffer, so calling either one a +// second time will overwrite previous results. +// +// The largest 32 bit integer is -2 billion plus NUL, or 1+10+1=12 bytes. +// It so happens that sizeof(int) * 3 is enough for 32+ bit ints. +// (sizeof(int) * 3 + 2 is correct for any width, even 8-bit) + +static char local_buf[sizeof(int) * 3]; + +/* Convert unsigned integer to ascii using a static buffer (returned). */ +char* FAST_FUNC utoa(unsigned n) +{ + *(utoa_to_buf(n, local_buf, sizeof(local_buf) - 1)) = '\0'; + + return local_buf; +} + +/* Convert signed integer to ascii using a static buffer (returned). */ +char* FAST_FUNC itoa(int n) +{ + *(itoa_to_buf(n, local_buf, sizeof(local_buf) - 1)) = '\0'; + + return local_buf; +} + +/* Emit a string of hex representation of bytes */ +char* FAST_FUNC bin2hex(char *p, const char *cp, int count) +{ + while (count) { + unsigned char c = *cp++; + /* put lowercase hex digits */ + *p++ = 0x20 | bb_hexdigits_upcase[c >> 4]; + *p++ = 0x20 | bb_hexdigits_upcase[c & 0xf]; + count--; + } + return p; +} + +/* Convert "[x]x[:][x]x[:][x]x[:][x]x" hex string to binary, no more than COUNT bytes */ +char* FAST_FUNC hex2bin(char *dst, const char *str, int count) +{ + errno = EINVAL; + while (*str && count) { + uint8_t val; + uint8_t c = *str++; + if (isdigit(c)) + val = c - '0'; + else if ((c|0x20) >= 'a' && (c|0x20) <= 'f') + val = (c|0x20) - ('a' - 10); + else + return NULL; + val <<= 4; + c = *str; + if (isdigit(c)) + val |= c - '0'; + else if ((c|0x20) >= 'a' && (c|0x20) <= 'f') + val |= (c|0x20) - ('a' - 10); + else if (c == ':' || c == '\0') + val >>= 4; + else + return NULL; + + *dst++ = val; + if (c != '\0') + str++; + if (*str == ':') + str++; + count--; + } + errno = (*str ? ERANGE : 0); + return dst; +} + +/* Return how long the file at fd is, if there's any way to determine it. */ +#ifdef UNUSED +off_t FAST_FUNC fdlength(int fd) +{ + off_t bottom = 0, top = 0, pos; + long size; + + // If the ioctl works for this, return it. + + if (ioctl(fd, BLKGETSIZE, &size) >= 0) return size*512; + + // FIXME: explain why lseek(SEEK_END) is not used here! + + // If not, do a binary search for the last location we can read. (Some + // block devices don't do BLKGETSIZE right.) + + do { + char temp; + + pos = bottom + (top - bottom) / 2; + + // If we can read from the current location, it's bigger. + + if (lseek(fd, pos, SEEK_SET)>=0 && safe_read(fd, &temp, 1)==1) { + if (bottom == top) bottom = top = (top+1) * 2; + else bottom = pos; + + // If we can't, it's smaller. + } else { + if (bottom == top) { + if (!top) return 0; + bottom = top/2; + } + else top = pos; + } + } while (bottom + 1 != top); + + return pos + 1; +} +#endif + +int FAST_FUNC bb_putchar_stderr(char ch) +{ + return write(STDERR_FILENO, &ch, 1); +} + +ssize_t FAST_FUNC full_write1_str(const char *str) +{ + return full_write(STDOUT_FILENO, str, strlen(str)); +} + +ssize_t FAST_FUNC full_write2_str(const char *str) +{ + return full_write(STDERR_FILENO, str, strlen(str)); +} + +static int wh_helper(int value, int def_val, const char *env_name, int *err) +{ + /* Envvars override even if "value" from ioctl is valid (>0). + * Rationale: it's impossible to guess what user wants. + * For example: "man CMD | ...": should "man" format output + * to stdout's width? stdin's width? /dev/tty's width? 80 chars? + * We _cant_ know it. If "..." saves text for e.g. email, + * then it's probably 80 chars. + * If "..." is, say, "grep -v DISCARD | $PAGER", then user + * would prefer his tty's width to be used! + * + * Since we don't know, at least allow user to do this: + * "COLUMNS=80 man CMD | ..." + */ + char *s = getenv(env_name); + if (s) { + value = atoi(s); + /* If LINES/COLUMNS are set, pretend that there is + * no error getting w/h, this prevents some ugly + * cursor tricks by our callers */ + *err = 0; + } + + if (value <= 1 || value >= 30000) + value = def_val; + return value; +} + +/* It is perfectly ok to pass in a NULL for either width or for + * height, in which case that value will not be set. */ +int FAST_FUNC get_terminal_width_height(int fd, unsigned *width, unsigned *height) +{ + struct winsize win; + int err; + int close_me = -1; + + if (fd == -1) { + if (isatty(STDOUT_FILENO)) + fd = STDOUT_FILENO; + else + if (isatty(STDERR_FILENO)) + fd = STDERR_FILENO; + else + if (isatty(STDIN_FILENO)) + fd = STDIN_FILENO; + else + close_me = fd = open("/dev/tty", O_RDONLY); + } + + win.ws_row = 0; + win.ws_col = 0; + /* I've seen ioctl returning 0, but row/col is (still?) 0. + * We treat that as an error too. */ + err = ioctl(fd, TIOCGWINSZ, &win) != 0 || win.ws_row == 0; + if (height) + *height = wh_helper(win.ws_row, 24, "LINES", &err); + if (width) + *width = wh_helper(win.ws_col, 80, "COLUMNS", &err); + + if (close_me >= 0) + close(close_me); + + return err; +} +int FAST_FUNC get_terminal_width(int fd) +{ + unsigned width; + get_terminal_width_height(fd, &width, NULL); + return width; +} + +int FAST_FUNC tcsetattr_stdin_TCSANOW(const struct termios *tp) +{ + return tcsetattr(STDIN_FILENO, TCSANOW, tp); +} + +pid_t FAST_FUNC safe_waitpid(pid_t pid, int *wstat, int options) +{ + pid_t r; + + do + r = waitpid(pid, wstat, options); + while ((r == -1) && (errno == EINTR)); + return r; +} + +pid_t FAST_FUNC wait_any_nohang(int *wstat) +{ + return safe_waitpid(-1, wstat, WNOHANG); +} + +// Wait for the specified child PID to exit, returning child's error return. +int FAST_FUNC wait4pid(pid_t pid) +{ + int status; + + if (pid <= 0) { + /*errno = ECHILD; -- wrong. */ + /* we expect errno to be already set from failed [v]fork/exec */ + return -1; + } + if (safe_waitpid(pid, &status, 0) == -1) + return -1; + if (WIFEXITED(status)) + return WEXITSTATUS(status); + if (WIFSIGNALED(status)) + return WTERMSIG(status) + 0x180; + return 0; +} + +// Useful when we do know that pid is valid, and we just want to wait +// for it to exit. Not existing pid is fatal. waitpid() status is not returned. +int FAST_FUNC wait_for_exitstatus(pid_t pid) +{ + int exit_status, n; + + n = safe_waitpid(pid, &exit_status, 0); + if (n < 0) + bb_perror_msg_and_die("waitpid"); + return exit_status; +} diff --git a/probe-busybox/libbb/xfuncs_printf.c b/probe-busybox/libbb/xfuncs_printf.c new file mode 100644 index 00000000..4ecd320c --- /dev/null +++ b/probe-busybox/libbb/xfuncs_printf.c @@ -0,0 +1,741 @@ +/* vi: set sw=4 ts=4: */ +/* + * Utility routines. + * + * Copyright (C) 1999-2004 by Erik Andersen + * Copyright (C) 2006 Rob Landley + * Copyright (C) 2006 Denys Vlasenko + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +/* We need to have separate xfuncs.c and xfuncs_printf.c because + * with current linkers, even with section garbage collection, + * if *.o module references any of XXXprintf functions, you pull in + * entire printf machinery. Even if you do not use the function + * which uses XXXprintf. + * + * xfuncs.c contains functions (not necessarily xfuncs) + * which do not pull in printf, directly or indirectly. + * xfunc_printf.c contains those which do. + */ + +#include "libbb.h" + + +/* All the functions starting with "x" call bb_error_msg_and_die() if they + * fail, so callers never need to check for errors. If it returned, it + * succeeded. */ + +#ifndef DMALLOC +/* dmalloc provides variants of these that do abort() on failure. + * Since dmalloc's prototypes overwrite the impls here as they are + * included after these prototypes in libbb.h, all is well. + */ +// Warn if we can't allocate size bytes of memory. +void* FAST_FUNC malloc_or_warn(size_t size) +{ + void *ptr = malloc(size); + if (ptr == NULL && size != 0) + bb_error_msg(bb_msg_memory_exhausted); + return ptr; +} + +// Die if we can't allocate size bytes of memory. +void* FAST_FUNC xmalloc(size_t size) +{ + void *ptr = malloc(size); + if (ptr == NULL && size != 0) + bb_error_msg_and_die(bb_msg_memory_exhausted); + return ptr; +} + +// Die if we can't resize previously allocated memory. (This returns a pointer +// to the new memory, which may or may not be the same as the old memory. +// It'll copy the contents to a new chunk and free the old one if necessary.) +void* FAST_FUNC xrealloc(void *ptr, size_t size) +{ + ptr = realloc(ptr, size); + if (ptr == NULL && size != 0) + bb_error_msg_and_die(bb_msg_memory_exhausted); + return ptr; +} +#endif /* DMALLOC */ + +// Die if we can't allocate and zero size bytes of memory. +void* FAST_FUNC xzalloc(size_t size) +{ + void *ptr = xmalloc(size); + memset(ptr, 0, size); + return ptr; +} + +// Die if we can't copy a string to freshly allocated memory. +char* FAST_FUNC xstrdup(const char *s) +{ + char *t; + + if (s == NULL) + return NULL; + + t = strdup(s); + + if (t == NULL) + bb_error_msg_and_die(bb_msg_memory_exhausted); + + return t; +} + +// Die if we can't allocate n+1 bytes (space for the null terminator) and copy +// the (possibly truncated to length n) string into it. +char* FAST_FUNC xstrndup(const char *s, int n) +{ + int m; + char *t; + + if (ENABLE_DEBUG && s == NULL) + bb_error_msg_and_die("xstrndup bug"); + + /* We can just xmalloc(n+1) and strncpy into it, */ + /* but think about xstrndup("abc", 10000) wastage! */ + m = n; + t = (char*) s; + while (m) { + if (!*t) break; + m--; + t++; + } + n -= m; + t = xmalloc(n + 1); + t[n] = '\0'; + + return memcpy(t, s, n); +} + +void* FAST_FUNC xmemdup(const void *s, int n) +{ + return memcpy(xmalloc(n), s, n); +} + +// Die if we can't open a file and return a FILE* to it. +// Notice we haven't got xfread(), This is for use with fscanf() and friends. +FILE* FAST_FUNC xfopen(const char *path, const char *mode) +{ + FILE *fp = fopen(path, mode); + if (fp == NULL) + bb_perror_msg_and_die("can't open '%s'", path); + return fp; +} + +// Die if we can't open a file and return a fd. +int FAST_FUNC xopen3(const char *pathname, int flags, int mode) +{ + int ret; + + ret = open(pathname, flags, mode); + if (ret < 0) { + bb_perror_msg_and_die("can't open '%s'", pathname); + } + return ret; +} + +// Die if we can't open a file and return a fd. +int FAST_FUNC xopen(const char *pathname, int flags) +{ + return xopen3(pathname, flags, 0666); +} + +// Warn if we can't open a file and return a fd. +int FAST_FUNC open3_or_warn(const char *pathname, int flags, int mode) +{ + int ret; + + ret = open(pathname, flags, mode); + if (ret < 0) { + bb_perror_msg("can't open '%s'", pathname); + } + return ret; +} + +// Warn if we can't open a file and return a fd. +int FAST_FUNC open_or_warn(const char *pathname, int flags) +{ + return open3_or_warn(pathname, flags, 0666); +} + +/* Die if we can't open an existing file readonly with O_NONBLOCK + * and return the fd. + * Note that for ioctl O_RDONLY is sufficient. + */ +int FAST_FUNC xopen_nonblocking(const char *pathname) +{ + return xopen(pathname, O_RDONLY | O_NONBLOCK); +} + +int FAST_FUNC xopen_as_uid_gid(const char *pathname, int flags, uid_t u, gid_t g) +{ + int fd; + uid_t old_euid = geteuid(); + gid_t old_egid = getegid(); + + xsetegid(g); + xseteuid(u); + + fd = xopen(pathname, flags); + + xseteuid(old_euid); + xsetegid(old_egid); + + return fd; +} + +void FAST_FUNC xunlink(const char *pathname) +{ + if (unlink(pathname)) + bb_perror_msg_and_die("can't remove file '%s'", pathname); +} + +void FAST_FUNC xrename(const char *oldpath, const char *newpath) +{ + if (rename(oldpath, newpath)) + bb_perror_msg_and_die("can't move '%s' to '%s'", oldpath, newpath); +} + +int FAST_FUNC rename_or_warn(const char *oldpath, const char *newpath) +{ + int n = rename(oldpath, newpath); + if (n) + bb_perror_msg("can't move '%s' to '%s'", oldpath, newpath); + return n; +} + +void FAST_FUNC xpipe(int filedes[2]) +{ + if (pipe(filedes)) + bb_perror_msg_and_die("can't create pipe"); +} + +void FAST_FUNC xdup2(int from, int to) +{ + if (dup2(from, to) != to) + bb_perror_msg_and_die("can't duplicate file descriptor"); +} + +// "Renumber" opened fd +void FAST_FUNC xmove_fd(int from, int to) +{ + if (from == to) + return; + xdup2(from, to); + close(from); +} + +// Die with an error message if we can't write the entire buffer. +void FAST_FUNC xwrite(int fd, const void *buf, size_t count) +{ + if (count) { + ssize_t size = full_write(fd, buf, count); + if ((size_t)size != count) { + /* + * Two cases: write error immediately; + * or some writes succeeded, then we hit an error. + * In either case, errno is set. + */ + bb_perror_msg_and_die( + size >= 0 ? "short write" : "write error" + ); + } + } +} +void FAST_FUNC xwrite_str(int fd, const char *str) +{ + xwrite(fd, str, strlen(str)); +} + +void FAST_FUNC xclose(int fd) +{ + if (close(fd)) + bb_perror_msg_and_die("close failed"); +} + +// Die with an error message if we can't lseek to the right spot. +off_t FAST_FUNC xlseek(int fd, off_t offset, int whence) +{ + off_t off = lseek(fd, offset, whence); + if (off == (off_t)-1) { + if (whence == SEEK_SET) + bb_perror_msg_and_die("lseek(%"OFF_FMT"u)", offset); + bb_perror_msg_and_die("lseek"); + } + return off; +} + +int FAST_FUNC xmkstemp(char *template) +{ + int fd = mkstemp(template); + if (fd < 0) + bb_perror_msg_and_die("can't create temp file '%s'", template); + return fd; +} + +// Die with supplied filename if this FILE* has ferror set. +void FAST_FUNC die_if_ferror(FILE *fp, const char *fn) +{ + if (ferror(fp)) { + /* ferror doesn't set useful errno */ + bb_error_msg_and_die("%s: I/O error", fn); + } +} + +// Die with an error message if stdout has ferror set. +void FAST_FUNC die_if_ferror_stdout(void) +{ + die_if_ferror(stdout, bb_msg_standard_output); +} + +int FAST_FUNC fflush_all(void) +{ + return fflush(NULL); +} + + +int FAST_FUNC bb_putchar(int ch) +{ + return putchar(ch); +} + +/* Die with an error message if we can't copy an entire FILE* to stdout, + * then close that file. */ +void FAST_FUNC xprint_and_close_file(FILE *file) +{ + fflush_all(); + // copyfd outputs error messages for us. + if (bb_copyfd_eof(fileno(file), STDOUT_FILENO) == -1) + xfunc_die(); + + fclose(file); +} + +// Die with an error message if we can't malloc() enough space and do an +// sprintf() into that space. +char* FAST_FUNC xasprintf(const char *format, ...) +{ + va_list p; + int r; + char *string_ptr; + + va_start(p, format); + r = vasprintf(&string_ptr, format, p); + va_end(p); + + if (r < 0) + bb_error_msg_and_die(bb_msg_memory_exhausted); + return string_ptr; +} + +void FAST_FUNC xsetenv(const char *key, const char *value) +{ + if (setenv(key, value, 1)) + bb_error_msg_and_die(bb_msg_memory_exhausted); +} + +/* Handles "VAR=VAL" strings, even those which are part of environ + * _right now_ + */ +void FAST_FUNC bb_unsetenv(const char *var) +{ + char *tp = strchr(var, '='); + + if (!tp) { + unsetenv(var); + return; + } + + /* In case var was putenv'ed, we can't replace '=' + * with NUL and unsetenv(var) - it won't work, + * env is modified by the replacement, unsetenv + * sees "VAR" instead of "VAR=VAL" and does not remove it! + * horror :( */ + tp = xstrndup(var, tp - var); + unsetenv(tp); + free(tp); +} + +void FAST_FUNC bb_unsetenv_and_free(char *var) +{ + bb_unsetenv(var); + free(var); +} + +// Die with an error message if we can't set gid. (Because resource limits may +// limit this user to a given number of processes, and if that fills up the +// setgid() will fail and we'll _still_be_root_, which is bad.) +void FAST_FUNC xsetgid(gid_t gid) +{ + if (setgid(gid)) bb_perror_msg_and_die("setgid"); +} + +// Die with an error message if we can't set uid. (See xsetgid() for why.) +void FAST_FUNC xsetuid(uid_t uid) +{ + if (setuid(uid)) bb_perror_msg_and_die("setuid"); +} + +void FAST_FUNC xsetegid(gid_t egid) +{ + if (setegid(egid)) bb_perror_msg_and_die("setegid"); +} + +void FAST_FUNC xseteuid(uid_t euid) +{ + if (seteuid(euid)) bb_perror_msg_and_die("seteuid"); +} + +// Die if we can't chdir to a new path. +void FAST_FUNC xchdir(const char *path) +{ + if (chdir(path)) + bb_perror_msg_and_die("can't change directory to '%s'", path); +} + +void FAST_FUNC xfchdir(int fd) +{ + if (fchdir(fd)) + bb_perror_msg_and_die("fchdir"); +} + +void FAST_FUNC xchroot(const char *path) +{ + if (chroot(path)) + bb_perror_msg_and_die("can't change root directory to '%s'", path); + xchdir("/"); +} + +// Print a warning message if opendir() fails, but don't die. +DIR* FAST_FUNC warn_opendir(const char *path) +{ + DIR *dp; + + dp = opendir(path); + if (!dp) + bb_perror_msg("can't open '%s'", path); + return dp; +} + +// Die with an error message if opendir() fails. +DIR* FAST_FUNC xopendir(const char *path) +{ + DIR *dp; + + dp = opendir(path); + if (!dp) + bb_perror_msg_and_die("can't open '%s'", path); + return dp; +} + +// Die with an error message if we can't open a new socket. +int FAST_FUNC xsocket(int domain, int type, int protocol) +{ + int r = socket(domain, type, protocol); + + if (r < 0) { + /* Hijack vaguely related config option */ +#if ENABLE_VERBOSE_RESOLUTION_ERRORS + const char *s = "INET"; +# ifdef AF_PACKET + if (domain == AF_PACKET) s = "PACKET"; +# endif +# ifdef AF_NETLINK + if (domain == AF_NETLINK) s = "NETLINK"; +# endif +IF_FEATURE_IPV6(if (domain == AF_INET6) s = "INET6";) + bb_perror_msg_and_die("socket(AF_%s,%d,%d)", s, type, protocol); +#else + bb_perror_msg_and_die("socket"); +#endif + } + + return r; +} + +// Die with an error message if we can't bind a socket to an address. +void FAST_FUNC xbind(int sockfd, struct sockaddr *my_addr, socklen_t addrlen) +{ + if (bind(sockfd, my_addr, addrlen)) bb_perror_msg_and_die("bind"); +} + +// Call a user supplied reporting function and die with an error message if we +// can't bind a socket to an address. +void FAST_FUNC xrbind(int sockfd, struct sockaddr *my_addr, socklen_t addrlen, + void (*reportf)(int err)) +{ + if (bind(sockfd, my_addr, addrlen)) { + if (reportf) { + int t_errno= errno; + reportf(t_errno); + errno= t_errno; + } + bb_perror_msg_and_die("bind"); + } +} + +// Die with an error message if we can't listen for connections on a socket. +void FAST_FUNC xlisten(int s, int backlog) +{ + if (listen(s, backlog)) bb_perror_msg_and_die("listen"); +} + +/* Die with an error message if sendto failed. + * Return bytes sent otherwise */ +ssize_t FAST_FUNC xsendto(int s, const void *buf, size_t len, const struct sockaddr *to, + socklen_t tolen) +{ + ssize_t ret = sendto(s, buf, len, 0, to, tolen); + if (ret < 0) { + if (ENABLE_FEATURE_CLEAN_UP) + close(s); + bb_perror_msg_and_die("sendto"); + } + return ret; +} + +/* Call a user supplied function and die with an error message if sendto failed. + * Return bytes sent otherwise */ +ssize_t FAST_FUNC xrsendto(int s, const void *buf, size_t len, + const struct sockaddr *to, socklen_t tolen, + void (*reportf)(int err)) +{ + ssize_t ret = sendto(s, buf, len, 0, to, tolen); + if (ret < 0) { + if (reportf) { + int t_errno= errno; + reportf(t_errno); + t_errno= errno; + } + if (ENABLE_FEATURE_CLEAN_UP) + close(s); + bb_perror_msg_and_die("sendto"); + } + return ret; +} + +/* Call a user supplied function with an error message if sendto failed. + * Return bytes sent otherwise */ +ssize_t FAST_FUNC rsendto(int s, const void *buf, size_t len, + const struct sockaddr *to, socklen_t tolen, + void (*reportf)(int err)) +{ + ssize_t ret = sendto(s, buf, len, 0, to, tolen); + if (ret < 0) { + int t_errno= errno; + if (reportf) { + reportf(t_errno); + } + if (ENABLE_FEATURE_CLEAN_UP) + close(s); + errno= t_errno; + bb_perror_msg("sendto"); + errno= t_errno; + } + return ret; +} + +// xstat() - a stat() which dies on failure with meaningful error message +void FAST_FUNC xstat(const char *name, struct stat *stat_buf) +{ + if (stat(name, stat_buf)) + bb_perror_msg_and_die("can't stat '%s'", name); +} + +void FAST_FUNC xfstat(int fd, struct stat *stat_buf, const char *errmsg) +{ + /* errmsg is usually a file name, but not always: + * xfstat may be called in a spot where file name is no longer + * available, and caller may give e.g. "can't stat input file" string. + */ + if (fstat(fd, stat_buf)) + bb_simple_perror_msg_and_die(errmsg); +} + +// selinux_or_die() - die if SELinux is disabled. +void FAST_FUNC selinux_or_die(void) +{ +#if ENABLE_SELINUX + int rc = is_selinux_enabled(); + if (rc == 0) { + bb_error_msg_and_die("SELinux is disabled"); + } else if (rc < 0) { + bb_error_msg_and_die("is_selinux_enabled() failed"); + } +#else + bb_error_msg_and_die("SELinux support is disabled"); +#endif +} + +int FAST_FUNC ioctl_or_perror_and_die(int fd, unsigned request, void *argp, const char *fmt,...) +{ + int ret; + va_list p; + + ret = ioctl(fd, request, argp); + if (ret < 0) { + va_start(p, fmt); + bb_verror_msg(fmt, p, strerror(errno)); + /* xfunc_die can actually longjmp, so be nice */ + va_end(p); + xfunc_die(); + } + return ret; +} + +int FAST_FUNC ioctl_or_perror(int fd, unsigned request, void *argp, const char *fmt,...) +{ + va_list p; + int ret = ioctl(fd, request, argp); + + if (ret < 0) { + va_start(p, fmt); + bb_verror_msg(fmt, p, strerror(errno)); + va_end(p); + } + return ret; +} + +#if ENABLE_IOCTL_HEX2STR_ERROR +int FAST_FUNC bb_ioctl_or_warn(int fd, unsigned request, void *argp, const char *ioctl_name) +{ + int ret; + + ret = ioctl(fd, request, argp); + if (ret < 0) + bb_simple_perror_msg(ioctl_name); + return ret; +} +int FAST_FUNC bb_xioctl(int fd, unsigned request, void *argp, const char *ioctl_name) +{ + int ret; + + ret = ioctl(fd, request, argp); + if (ret < 0) + bb_simple_perror_msg_and_die(ioctl_name); + return ret; +} +#else +int FAST_FUNC bb_ioctl_or_warn(int fd, unsigned request, void *argp) +{ + int ret; + + ret = ioctl(fd, request, argp); + if (ret < 0) + bb_perror_msg("ioctl %#x failed", request); + return ret; +} +int FAST_FUNC bb_xioctl(int fd, unsigned request, void *argp) +{ + int ret; + + ret = ioctl(fd, request, argp); + if (ret < 0) + bb_perror_msg_and_die("ioctl %#x failed", request); + return ret; +} +#endif + +char* FAST_FUNC xmalloc_ttyname(int fd) +{ + char buf[128]; + int r = ttyname_r(fd, buf, sizeof(buf) - 1); + if (r) + return NULL; + return xstrdup(buf); +} + +void FAST_FUNC generate_uuid(uint8_t *buf) +{ + /* http://www.ietf.org/rfc/rfc4122.txt + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | time_low | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | time_mid | time_hi_and_version | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |clk_seq_and_variant | node (0-1) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | node (2-5) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * IOW, uuid has this layout: + * uint32_t time_low (big endian) + * uint16_t time_mid (big endian) + * uint16_t time_hi_and_version (big endian) + * version is a 4-bit field: + * 1 Time-based + * 2 DCE Security, with embedded POSIX UIDs + * 3 Name-based (MD5) + * 4 Randomly generated + * 5 Name-based (SHA-1) + * uint16_t clk_seq_and_variant (big endian) + * variant is a 3-bit field: + * 0xx Reserved, NCS backward compatibility + * 10x The variant specified in rfc4122 + * 110 Reserved, Microsoft backward compatibility + * 111 Reserved for future definition + * uint8_t node[6] + * + * For version 4, these bits are set/cleared: + * time_hi_and_version & 0x0fff | 0x4000 + * clk_seq_and_variant & 0x3fff | 0x8000 + */ + pid_t pid; + int i; + + i = open("/dev/urandom", O_RDONLY); + if (i >= 0) { + read(i, buf, 16); + close(i); + } + /* Paranoia. /dev/urandom may be missing. + * rand() is guaranteed to generate at least [0, 2^15) range, + * but lowest bits in some libc are not so "random". */ + srand(monotonic_us()); /* pulls in printf */ + pid = getpid(); + while (1) { + for (i = 0; i < 16; i++) + buf[i] ^= rand() >> 5; + if (pid == 0) + break; + srand(pid); + pid = 0; + } + + /* version = 4 */ + buf[4 + 2 ] = (buf[4 + 2 ] & 0x0f) | 0x40; + /* variant = 10x */ + buf[4 + 2 + 2] = (buf[4 + 2 + 2] & 0x3f) | 0x80; +} + +#if BB_MMU +pid_t FAST_FUNC xfork(void) +{ + pid_t pid; + pid = fork(); + if (pid < 0) /* wtf? */ + bb_perror_msg_and_die("vfork"+1); + return pid; +} +#endif + +void FAST_FUNC xvfork_parent_waits_and_exits(void) +{ + pid_t pid; + + fflush_all(); + pid = xvfork(); + if (pid > 0) { + /* Parent */ + int exit_status = wait_for_exitstatus(pid); + if (WIFSIGNALED(exit_status)) + kill_myself_with_sig(WTERMSIG(exit_status)); + _exit(WEXITSTATUS(exit_status)); + } + /* Child continues */ +} diff --git a/probe-busybox/libbb/xreadlink.c b/probe-busybox/libbb/xreadlink.c new file mode 100644 index 00000000..7d4cb60a --- /dev/null +++ b/probe-busybox/libbb/xreadlink.c @@ -0,0 +1,125 @@ +/* vi: set sw=4 ts=4: */ +/* + * xreadlink.c - safe implementation of readlink. + * Returns a NULL on failure. + * + * Licensed under GPLv2, see file LICENSE in this source tree. + */ + +#include "libbb.h" + +/* Some systems (eg Hurd) do not have MAXSYMLINKS definition, + * set it to some reasonable value if it isn't defined */ +#ifndef MAXSYMLINKS +# define MAXSYMLINKS 20 +#endif + +/* + * NOTE: This function returns a malloced char* that you will have to free + * yourself. + */ +char* FAST_FUNC xmalloc_readlink(const char *path) +{ + enum { GROWBY = 80 }; /* how large we will grow strings by */ + + char *buf = NULL; + int bufsize = 0, readsize = 0; + + do { + bufsize += GROWBY; + buf = xrealloc(buf, bufsize); + readsize = readlink(path, buf, bufsize); + if (readsize == -1) { + free(buf); + return NULL; + } + } while (bufsize < readsize + 1); + + buf[readsize] = '\0'; + + return buf; +} + +/* + * This routine is not the same as realpath(), which + * canonicalizes the given path completely. This routine only + * follows trailing symlinks until a real file is reached and + * returns its name. If the path ends in a dangling link or if + * the target doesn't exist, the path is returned in any case. + * Intermediate symlinks in the path are not expanded -- only + * those at the tail. + * A malloced char* is returned, which must be freed by the caller. + */ +char* FAST_FUNC xmalloc_follow_symlinks(const char *path) +{ + char *buf; + char *lpc; + char *linkpath; + int bufsize; + int looping = MAXSYMLINKS + 1; + + buf = xstrdup(path); + goto jump_in; + + while (1) { + linkpath = xmalloc_readlink(buf); + if (!linkpath) { + /* not a symlink, or doesn't exist */ + if (errno == EINVAL || errno == ENOENT) + return buf; + goto free_buf_ret_null; + } + + if (!--looping) { + free(linkpath); + free_buf_ret_null: + free(buf); + return NULL; + } + + if (*linkpath != '/') { + bufsize += strlen(linkpath); + buf = xrealloc(buf, bufsize); + lpc = bb_get_last_path_component_strip(buf); + strcpy(lpc, linkpath); + free(linkpath); + } else { + free(buf); + buf = linkpath; + jump_in: + bufsize = strlen(buf) + 1; + } + } +} + +char* FAST_FUNC xmalloc_readlink_or_warn(const char *path) +{ + char *buf = xmalloc_readlink(path); + if (!buf) { + /* EINVAL => "file: Invalid argument" => puzzled user */ + const char *errmsg = "not a symlink"; + int err = errno; + if (err != EINVAL) + errmsg = strerror(err); + bb_error_msg("%s: cannot read link: %s", path, errmsg); + } + return buf; +} + +char* FAST_FUNC xmalloc_realpath(const char *path) +{ +/* NB: uclibc also defines __GLIBC__ + * Therefore the test "if glibc, or uclibc >= 0.9.31" looks a bit weird: + */ +#if defined(__GLIBC__) && \ + (!defined(__UCLIBC__) || UCLIBC_VERSION >= KERNEL_VERSION(0, 9, 31)) + /* glibc provides a non-standard extension */ + /* new: POSIX.1-2008 specifies this behavior as well */ + return realpath(path, NULL); +#else + char buf[PATH_MAX+1]; + + /* on error returns NULL (xstrdup(NULL) == NULL) */ + return xstrdup(realpath(path, buf)); +#endif +} diff --git a/probe-busybox/libevent-2.1.11-stable/.gitignore b/probe-busybox/libevent-2.1.11-stable/.gitignore new file mode 100644 index 00000000..955846cd --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/.gitignore @@ -0,0 +1,156 @@ +### These files should get ignored no matter where they appear. + +# Editors leave these lying around +\#*\# +.#* +*~ +*.swp + +# C stuff +*.o + +# Windows stuff +*.obj +*.exe +*.lib + +# Patch leaves these lying arround +*.orig +*.rej + +# gcov stuff +*.gcno +*.gcov +*.gcda + +# gdb stuff +.gdb_history + +# Autotools stuff +.deps +.dirstamp +Makefile +Makefile.in + +# Libtool stuff +.libs +*.lo +*.la + +# ctags stuff +TAGS +tags + +# cscope stuff +cscope* + +# Stuff made by our makefiles +*.pc +*.log +*.trs + +## The initial / makes these files only get ignored in particular directories. +/autom4te.cache + +# configure in progress +/.cyg* +/confdefs.* +/conftest.* + +# Libtool adds these, at least sometimes +/m4/libtool.m4 +/m4/ltoptions.m4 +/m4/ltsugar.m4 +/m4/ltversion.m4 +/m4/lt~obsolete.m4 + +/aclocal.m4 +/compile +/doxygen +/config.cache +/config.guess +/config.log +/config.status +/config.sub +/configure +/configure.lineno +/depcomp +/config.h +/config.h.in +/install-sh +/libtool +/ltmain.sh +/missing +/stamp-h1 +/stamp-h2 + +/sample/dns-example +/sample/event-read-fifo +/sample/hello-world +/sample/http-server +/sample/http-connect +/sample/le-proxy +/sample/https-client +/sample/signal-test +/sample/time-test +/sample/event-test + +/test-driver +/test/bench +/test/bench_cascade +/test/bench_http +/test/bench_httpclient +/test/regress +/test/regress.gen.c +/test/regress.gen.h +/test/rpcgen-attempted +/test/test-dumpevents +/test/test-eof +/test/test-closed +/test/test-init +/test/test-ratelim +/test/test-script.sh +/test/test-time +/test/test-weof +/test/test-changelist +/test/test-fdleak + +**/include/event2/event-config.h +**/include/evconfig-private.h +/evconfig-private.h + +# Files generated by cmake +CMakeCache.txt +CMakeFiles/ +CTestTestfile.cmake +DartConfiguration.tcl +LibeventConfig.cmake +LibeventConfigVersion.cmake +LibeventTargets.cmake +bin/ +cmake_install.cmake +Uninstall.cmake +lib/ +tmp/ +verify_tests.sh +verify_tests.bat +event.dir +event_core.dir +event_extra.dir +*.vcxproj +*.sln +*.filters +install_manifest.txt + +# ninja +build.ninja +rules.ninja +.ninja_deps +.ninja_log + +# make dist +/COPYING +/INSTALL +/*.tar.gz + +/.vagrant diff --git a/probe-busybox/libevent-2.1.11-stable/CMakeLists.txt b/probe-busybox/libevent-2.1.11-stable/CMakeLists.txt new file mode 100644 index 00000000..70acb696 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/CMakeLists.txt @@ -0,0 +1,1531 @@ +# +# Libevent CMake project +# +# Based on initial work by: +# Alexey Ozeritsky +# +# Additional changes: +# Brodie Thiesfield +# Joakim Soderberg +# Trond Norbye +# Sergei Nikulov +# +# Build example: +# +# cd libevent +# md build +# cd build +# cmake -G "Visual Studio 10" .. +# start libevent.sln +# + +cmake_minimum_required(VERSION 3.1 FATAL_ERROR) + +if (POLICY CMP0054) + cmake_policy(SET CMP0054 NEW) +endif() +if (POLICY CMP0074) + cmake_policy(SET CMP0074 NEW) +endif() +if (POLICY CMP0075) + cmake_policy(SET CMP0075 NEW) +endif() + +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release + CACHE STRING "Set build type to Debug o Release (default Release)" FORCE) +endif() +string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LOWER) + +# get rid of the extra default configurations +# what? why would you get id of other useful build types? - Ellzey +set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Limited configurations" FORCE) + +set(EVENT__LIBRARY_TYPE DEFAULT CACHE STRING + "Set library type to SHARED/STATIC/BOTH (default SHARED for MSVC, otherwise BOTH)") + +project(libevent C) + +set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/") +string(REGEX MATCH "SunOS" SOLARIS "${CMAKE_SYSTEM_NAME}") + + +include(CheckTypeSize) +include(CheckFunctionExistsEx) +include(CheckFileOffsetBits) +include(CheckFunctionExists) +include(CheckIncludeFile) +include(CheckIncludeFiles) +include(CheckVariableExists) +include(CheckSymbolExists) +include(CheckStructHasMember) +include(CheckCSourceCompiles) +include(CheckPrototypeDefinition) +include(CheckFunctionKeywords) +include(CheckConstExists) +include(AddCompilerFlags) +include(VersionViaGit) + +event_fuzzy_version_from_git() + +set(EVENT_VERSION_MAJOR ${EVENT_GIT___VERSION_MAJOR}) +set(EVENT_VERSION_MINOR ${EVENT_GIT___VERSION_MINOR}) +set(EVENT_VERSION_PATCH ${EVENT_GIT___VERSION_PATCH}) +set(EVENT_VERSION_STAGE ${EVENT_GIT___VERSION_STAGE}) + + +set(EVENT_ABI_MAJOR ${EVENT_VERSION_MAJOR}) +set(EVENT_ABI_MINOR ${EVENT_VERSION_MINOR}) +set(EVENT_ABI_PATCH ${EVENT_VERSION_PATCH}) + +set(EVENT_ABI_LIBVERSION + "${EVENT_ABI_MAJOR}.${EVENT_ABI_MINOR}.${EVENT_ABI_PATCH}") + +set(EVENT_PACKAGE_VERSION + "${EVENT_VERSION_MAJOR}.${EVENT_VERSION_MINOR}.${EVENT_VERSION_PATCH}") + +set(EVENT_NUMERIC_VERSION 0x02010b00) +# equals to VERSION_INFO in Makefile.am +set(EVENT_ABI_LIBVERSION_CURRENT 7) +set(EVENT_ABI_LIBVERSION_REVISION 0) +set(EVENT_ABI_LIBVERSION_AGE 0) + +# equals to RELEASE in Makefile.am +set(EVENT_PACKAGE_RELEASE 2.1) + +# only a subset of names can be used, defaults to "beta" +set(EVENT_STAGE_NAME ${EVENT_VERSION_STAGE}) + +# a list that defines what can set for EVENT_STAGE_VERSION +set(EVENT__ALLOWED_STAGE_NAMES + rc + beta + alpha + alpha-dev + release + stable +) +list( + FIND EVENT__ALLOWED_STAGE_NAMES + "${EVENT_STAGE_NAME}" + EVENT__STAGE_RET +) +if (EVENT__STAGE_RET EQUAL -1) + message(WARNING + "stage ${EVENT_STAGE_NAME} is not allowed, reset to beta") + set(EVENT_STAGE_NAME beta) +endif() + +set(EVENT_VERSION + "${EVENT_VERSION_MAJOR}.${EVENT_VERSION_MINOR}.${EVENT_VERSION_PATCH}-${EVENT_STAGE_NAME}") + +option(EVENT__DISABLE_DEBUG_MODE + "Define if libevent should build without support for a debug mode" OFF) + +option(EVENT__ENABLE_VERBOSE_DEBUG + "Enables verbose debugging" OFF) + +option(EVENT__DISABLE_MM_REPLACEMENT + "Define if libevent should not allow replacing the mm functions" OFF) + +option(EVENT__DISABLE_THREAD_SUPPORT + "Define if libevent should not be compiled with thread support" OFF) + +option(EVENT__DISABLE_OPENSSL + "Define if libevent should build without support for OpenSSL encryption" OFF) + +option(EVENT__DISABLE_BENCHMARK + "Defines if libevent should build without the benchmark executables" OFF) + +option(EVENT__DISABLE_TESTS + "If tests should be compiled or not" OFF) + +option(EVENT__DISABLE_REGRESS + "Disable the regress tests" OFF) + +option(EVENT__DISABLE_SAMPLES + "Disable sample files" OFF) + +option(EVENT__DISABLE_CLOCK_GETTIME + "Do not use clock_gettime even if it is available" OFF) + +option(EVENT__FORCE_KQUEUE_CHECK + "When crosscompiling forces running a test program that verifies that Kqueue works with pipes. Note that this requires you to manually run the test program on the cross compilation target to verify that it works. See cmake documentation for try_run for more details" OFF) + +# TODO: Add --disable-largefile omit support for large files +option(EVENT__COVERAGE +"Enable running gcov to get a test coverage report (only works with GCC/CLang). Make sure to enable -DCMAKE_BUILD_TYPE=Debug as well." OFF) + +# Put the libaries and binaries that get built into directories at the +# top of the build tree rather than in hard-to-find leaf directories. +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib) +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib) + +if (EVENT__ENABLE_VERBOSE_DEBUG) + add_definitions(-DUSE_DEBUG=1) +endif() + +# make it colorful under ninja-build +if ("${CMAKE_GENERATOR}" STREQUAL "Ninja") + add_compiler_flags(-fdiagnostics-color=always) +endif() + +# Setup compiler flags for coverage. +if (EVENT__COVERAGE) + if (NOT "${CMAKE_BUILD_TYPE_LOWER}" STREQUAL "debug") + message(FATAL_ERROR "Coverage requires -DCMAKE_BUILD_TYPE=Debug") + endif() + + message(STATUS "Setting coverage compiler flags") + + set(CMAKE_REQUIRED_LIBRARIES "--coverage") + add_compiler_flags(-g -O0 --coverage) + set(CMAKE_REQUIRED_LIBRARIES "") +endif() + +set(GNUC 0) +set(CLANG 0) +set(MSVC 0) +if (("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang") OR + ("${CMAKE_C_COMPILER_ID}" STREQUAL "AppleClang")) + set(CLANG 1) +endif() +if (("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU") OR (${CLANG})) + set(GNUC 1) +endif() +if (("${CMAKE_C_COMPILER_ID}" STREQUAL "MSVC") OR (${CLANG})) + set(MSVC 1) +endif() + +# Detect library type +set(EVENT_LIBRARY_TYPE) +if ("${EVENT__LIBRARY_TYPE}" STREQUAL "DEFAULT") + if (${MSVC}) + set(EVENT_LIBRARY_TYPE SHARED) + else() + set(EVENT_LIBRARY_TYPE BOTH) + endif() +else() + string(TOUPPER "${EVENT__LIBRARY_TYPE}" EVENT_LIBRARY_TYPE) +endif() +if ((${MSVC}) AND ("${EVENT_LIBRARY_TYPE}" STREQUAL "BOTH")) + message(WARNING + "Building SHARED and STATIC is not supported for MSVC " + "(due to conflicts in library name" + " between STATIC library and IMPORTED library for SHARED libraries)") +endif() +set(EVENT_LIBRARY_STATIC OFF) +set(EVENT_LIBRARY_SHARED OFF) +if ("${EVENT_LIBRARY_TYPE}" STREQUAL "BOTH") + set(EVENT_LIBRARY_STATIC ON) + set(EVENT_LIBRARY_SHARED ON) +elseif ("${EVENT_LIBRARY_TYPE}" STREQUAL "STATIC") + set(EVENT_LIBRARY_STATIC ON) +elseif ("${EVENT_LIBRARY_TYPE}" STREQUAL "SHARED") + set(EVENT_LIBRARY_SHARED ON) +else() + message(FATAL_ERROR "${EVENT_LIBRARY_TYPE} is not supported") +endif() + +if (${MSVC}) + set(msvc_static_runtime OFF) + if ("${EVENT_LIBRARY_TYPE}" STREQUAL "STATIC") + set(msvc_static_runtime ON) + endif() + + # For more info: + # - https://docs.microsoft.com/en-us/cpp/build/reference/md-mt-ld-use-run-time-library?view=vs-2017 + # - https://gitlab.kitware.com/cmake/community/wikis/FAQ#how-can-i-build-my-msvc-application-with-a-static-runtime + option(EVENT__MSVC_STATIC_RUNTIME + "Link static runtime libraries" + ${msvc_static_runtime}) + + if (EVENT__MSVC_STATIC_RUNTIME) + foreach (flag_var + CMAKE_C_FLAGS + CMAKE_C_FLAGS_DEBUG + CMAKE_C_FLAGS_RELEASE + CMAKE_C_FLAGS_MINSIZEREL + CMAKE_C_FLAGS_RELWITHDEBINFO + ) + if (${flag_var} MATCHES "/MD") + string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") + endif() + endforeach() + endif() +endif() + +# GNUC specific options. +if (${GNUC}) + option(EVENT__DISABLE_GCC_WARNINGS "Disable verbose warnings with GCC" OFF) + option(EVENT__ENABLE_GCC_HARDENING "Enable compiler security checks" OFF) + option(EVENT__ENABLE_GCC_FUNCTION_SECTIONS "Enable gcc function sections" OFF) + option(EVENT__ENABLE_GCC_WARNINGS "Make all GCC warnings into errors" OFF) + + set(GCC_V ${CMAKE_C_COMPILER_VERSION}) + + list(APPEND __FLAGS + -Wall -Wextra -Wno-unused-parameter -Wstrict-aliasing -Wstrict-prototypes + + -fno-strict-aliasing # gcc 2.9.5+ + -Wmissing-prototypes + + # gcc 4 + -Winit-self + -Wmissing-field-initializers + -Wdeclaration-after-statement + + # gcc 4.2 + -Waddress + -Wnormalized=id + -Woverride-init + + # gcc 4.5 + -Wlogical-op + + -Wwrite-strings + ) + + if (${CLANG}) + list(APPEND __FLAGS -Wno-unused-function) + endif() + + if (EVENT__DISABLE_GCC_WARNINGS) + list(APPEND __FLAGS -w) + endif() + + if (EVENT__ENABLE_GCC_HARDENING) + list(APPEND __FLAGS + -fstack-protector-all + -fwrapv + -fPIE + -Wstack-protector + "--param ssp-buffer-size=1") + + add_definitions(-D_FORTIFY_SOURCE=2) + endif() + + if (EVENT__ENABLE_GCC_FUNCTION_SECTIONS) + list(APPEND __FLAGS -ffunction-sections) + # TODO: Add --gc-sections support. We need some checks for NetBSD to ensure this works. + endif() + + if (EVENT__ENABLE_GCC_WARNINGS) + list(APPEND __FLAGS -Werror) + endif() + + add_compiler_flags(${__FLAGS}) +endif() + +if (APPLE) + # Get rid of deprecated warnings for OpenSSL on OSX 10.7 and greater. + add_compiler_flags( + -Wno-error=deprecated-declarations + -Qunused-arguments + ) +endif() + +# Winsock. +if(WIN32) + set(CMAKE_EXTRA_INCLUDE_FILES winsock2.h ws2tcpip.h) + set(CMAKE_REQUIRED_LIBRARIES ws2_32.lib shell32.lib advapi32.lib) + set(CMAKE_REQUIRED_DEFINITIONS -FIwinsock2.h -FIws2tcpip.h) +endif() +if (SOLARIS) + set(CMAKE_REQUIRED_LIBRARIES socket nsl) +endif() + +# Check if _GNU_SOURCE is available. +if (NOT DEFINED _GNU_SOURCE) + CHECK_SYMBOL_EXISTS(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) + + if (NOT _GNU_SOURCE) + unset(_GNU_SOURCE CACHE) + CHECK_SYMBOL_EXISTS(_GNU_SOURCE "features.h" _GNU_SOURCE) + endif() + + if (ANDROID) + set(_GNU_SOURCE TRUE) + endif() +endif() + +if (_GNU_SOURCE) + add_definitions(-D_GNU_SOURCE=1) +endif() + +CHECK_INCLUDE_FILE(sys/types.h EVENT__HAVE_SYS_TYPES_H) +if(EVENT__HAVE_SYS_TYPES_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES sys/types.h) +endif() + +CHECK_INCLUDE_FILE(sys/socket.h EVENT__HAVE_SYS_SOCKET_H) +if(EVENT__HAVE_SYS_SOCKET_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES sys/socket.h) +endif() + +CHECK_INCLUDE_FILE(netinet/in.h EVENT__HAVE_NETINET_IN_H) +if(EVENT__HAVE_NETINET_IN_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES netinet/in.h) +endif() + +CHECK_INCLUDE_FILE(sys/un.h EVENT__HAVE_SYS_UN_H) +if(EVENT__HAVE_SYS_UN_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES sys/un.h) +endif() + +if(WIN32) + CHECK_INCLUDE_FILE(afunix.h EVENT__HAVE_AFUNIX_H) + if(EVENT__HAVE_AFUNIX_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES afunix.h) + endif() +endif() +CHECK_TYPE_SIZE("struct sockaddr_un" EVENT__HAVE_STRUCT_SOCKADDR_UN) + +CHECK_INCLUDE_FILE(netinet/in6.h EVENT__HAVE_NETINET_IN6_H) +if(EVENT__HAVE_NETINET_IN6_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES netinet/in6.h) +endif() + +CHECK_INCLUDE_FILE(unistd.h EVENT__HAVE_UNISTD_H) +CHECK_INCLUDE_FILE(netdb.h EVENT__HAVE_NETDB_H) +CHECK_INCLUDE_FILE(dlfcn.h EVENT__HAVE_DLFCN_H) +CHECK_INCLUDE_FILE(arpa/inet.h EVENT__HAVE_ARPA_INET_H) +CHECK_INCLUDE_FILE(fcntl.h EVENT__HAVE_FCNTL_H) +if(EVENT__HAVE_FCNTL_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES fcntl.h) +endif() +CHECK_INCLUDE_FILE(inttypes.h EVENT__HAVE_INTTYPES_H) +CHECK_INCLUDE_FILE(memory.h EVENT__HAVE_MEMORY_H) +CHECK_INCLUDE_FILE(poll.h EVENT__HAVE_POLL_H) +CHECK_INCLUDE_FILE(port.h EVENT__HAVE_PORT_H) +if(EVENT__HAVE_PORT_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES port.h) +endif() +CHECK_INCLUDE_FILE(signal.h EVENT__HAVE_SIGNAL_H) +CHECK_INCLUDE_FILE(stdarg.h EVENT__HAVE_STDARG_H) +CHECK_INCLUDE_FILE(stddef.h EVENT__HAVE_STDDEF_H) +CHECK_INCLUDE_FILE(stdint.h EVENT__HAVE_STDINT_H) +CHECK_INCLUDE_FILE(stdlib.h EVENT__HAVE_STDLIB_H) +CHECK_INCLUDE_FILE(strings.h EVENT__HAVE_STRINGS_H) +CHECK_INCLUDE_FILE(string.h EVENT__HAVE_STRING_H) +CHECK_INCLUDE_FILE(sys/devpoll.h EVENT__HAVE_DEVPOLL) +CHECK_INCLUDE_FILE(sys/epoll.h EVENT__HAVE_SYS_EPOLL_H) +CHECK_INCLUDE_FILE(sys/eventfd.h EVENT__HAVE_SYS_EVENTFD_H) +CHECK_INCLUDE_FILE(sys/event.h EVENT__HAVE_SYS_EVENT_H) +CHECK_INCLUDE_FILE(sys/ioctl.h EVENT__HAVE_SYS_IOCTL_H) +CHECK_INCLUDE_FILE(sys/mman.h EVENT__HAVE_SYS_MMAN_H) +CHECK_INCLUDE_FILE(sys/param.h EVENT__HAVE_SYS_PARAM_H) +CHECK_INCLUDE_FILE(sys/queue.h EVENT__HAVE_SYS_QUEUE_H) +CHECK_INCLUDE_FILE(sys/select.h EVENT__HAVE_SYS_SELECT_H) +CHECK_INCLUDE_FILE(sys/sendfile.h EVENT__HAVE_SYS_SENDFILE_H) +CHECK_INCLUDE_FILE(sys/stat.h EVENT__HAVE_SYS_STAT_H) +CHECK_INCLUDE_FILE(sys/time.h EVENT__HAVE_SYS_TIME_H) +if(EVENT__HAVE_SYS_TIME_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES sys/time.h) +endif() +CHECK_INCLUDE_FILE(sys/uio.h EVENT__HAVE_SYS_UIO_H) +CHECK_INCLUDE_FILES("sys/types.h;ifaddrs.h" EVENT__HAVE_IFADDRS_H) +CHECK_INCLUDE_FILE(mach/mach_time.h EVENT__HAVE_MACH_MACH_TIME_H) +CHECK_INCLUDE_FILE(netinet/tcp.h EVENT__HAVE_NETINET_TCP_H) +CHECK_INCLUDE_FILE(sys/wait.h EVENT__HAVE_SYS_WAIT_H) +CHECK_INCLUDE_FILE(sys/resource.h EVENT__HAVE_SYS_RESOURCE_H) +CHECK_INCLUDE_FILE(sys/sysctl.h EVENT__HAVE_SYS_SYSCTL_H) +CHECK_INCLUDE_FILE(sys/timerfd.h EVENT__HAVE_SYS_TIMERFD_H) +CHECK_INCLUDE_FILE(errno.h EVENT__HAVE_ERRNO_H) + + +CHECK_FUNCTION_EXISTS_EX(epoll_create EVENT__HAVE_EPOLL) +CHECK_FUNCTION_EXISTS_EX(epoll_ctl EVENT__HAVE_EPOLL_CTL) +CHECK_FUNCTION_EXISTS_EX(eventfd EVENT__HAVE_EVENTFD) +if(NOT EVENT__DISABLE_CLOCK_GETTIME) + CHECK_FUNCTION_EXISTS_EX(clock_gettime EVENT__HAVE_CLOCK_GETTIME) +endif() +CHECK_FUNCTION_EXISTS_EX(fcntl EVENT__HAVE_FCNTL) +CHECK_FUNCTION_EXISTS_EX(getaddrinfo EVENT__HAVE_GETADDRINFO) +CHECK_FUNCTION_EXISTS_EX(getnameinfo EVENT__HAVE_GETNAMEINFO) +CHECK_FUNCTION_EXISTS_EX(gettimeofday EVENT__HAVE_GETTIMEOFDAY) +CHECK_FUNCTION_EXISTS_EX(getprotobynumber EVENT__HAVE_GETPROTOBYNUMBER) +CHECK_FUNCTION_EXISTS_EX(getservbyname EVENT__HAVE_GETSERVBYNAME) +CHECK_FUNCTION_EXISTS_EX(inet_ntop EVENT__HAVE_INET_NTOP) +CHECK_FUNCTION_EXISTS_EX(inet_pton EVENT__HAVE_INET_PTON) +CHECK_FUNCTION_EXISTS_EX(kqueue EVENT__HAVE_KQUEUE) +CHECK_FUNCTION_EXISTS_EX(mmap EVENT__HAVE_MMAP) +CHECK_FUNCTION_EXISTS_EX(pipe EVENT__HAVE_PIPE) +CHECK_FUNCTION_EXISTS_EX(pipe2 EVENT__HAVE_PIPE2) +CHECK_FUNCTION_EXISTS_EX(poll EVENT__HAVE_POLL) +CHECK_FUNCTION_EXISTS_EX(port_create EVENT__HAVE_PORT_CREATE) +CHECK_FUNCTION_EXISTS_EX(sendfile EVENT__HAVE_SENDFILE) +CHECK_FUNCTION_EXISTS_EX(sigaction EVENT__HAVE_SIGACTION) +CHECK_FUNCTION_EXISTS_EX(signal EVENT__HAVE_SIGNAL) +CHECK_FUNCTION_EXISTS_EX(splice EVENT__HAVE_SPLICE) +CHECK_FUNCTION_EXISTS_EX(strlcpy EVENT__HAVE_STRLCPY) +CHECK_FUNCTION_EXISTS_EX(strsep EVENT__HAVE_STRSEP) +CHECK_FUNCTION_EXISTS_EX(strtok_r EVENT__HAVE_STRTOK_R) +CHECK_FUNCTION_EXISTS_EX(strtoll EVENT__HAVE_STRTOLL) +CHECK_FUNCTION_EXISTS_EX(vasprintf EVENT__HAVE_VASPRINTF) +CHECK_FUNCTION_EXISTS_EX(sysctl EVENT__HAVE_SYSCTL) +CHECK_FUNCTION_EXISTS_EX(accept4 EVENT__HAVE_ACCEPT4) +CHECK_FUNCTION_EXISTS_EX(arc4random EVENT__HAVE_ARC4RANDOM) +CHECK_FUNCTION_EXISTS_EX(arc4random_buf EVENT__HAVE_ARC4RANDOM_BUF) +CHECK_FUNCTION_EXISTS_EX(arc4random_addrandom EVENT__HAVE_ARC4RANDOM_ADDRANDOM) +CHECK_FUNCTION_EXISTS_EX(epoll_create1 EVENT__HAVE_EPOLL_CREATE1) +CHECK_FUNCTION_EXISTS_EX(getegid EVENT__HAVE_GETEGID) +CHECK_FUNCTION_EXISTS_EX(geteuid EVENT__HAVE_GETEUID) +CHECK_FUNCTION_EXISTS_EX(getifaddrs EVENT__HAVE_GETIFADDRS) +CHECK_FUNCTION_EXISTS_EX(issetugid EVENT__HAVE_ISSETUGID) +CHECK_FUNCTION_EXISTS_EX(mach_absolute_time EVENT__HAVE_MACH_ABSOLUTE_TIME) +CHECK_FUNCTION_EXISTS_EX(nanosleep EVENT__HAVE_NANOSLEEP) +CHECK_FUNCTION_EXISTS_EX(usleep EVENT__HAVE_USLEEP) +CHECK_FUNCTION_EXISTS_EX(timeradd EVENT__HAVE_TIMERADD) +CHECK_FUNCTION_EXISTS_EX(timerclear EVENT__HAVE_TIMERCLEAR) +CHECK_FUNCTION_EXISTS_EX(timercmp EVENT__HAVE_TIMERCMP) +CHECK_FUNCTION_EXISTS_EX(timerfd_create EVENT__HAVE_TIMERFD_CREATE) +CHECK_FUNCTION_EXISTS_EX(timerisset EVENT__HAVE_TIMERISSET) +CHECK_FUNCTION_EXISTS_EX(putenv EVENT__HAVE_PUTENV) +CHECK_FUNCTION_EXISTS_EX(setenv EVENT__HAVE_SETENV) +CHECK_FUNCTION_EXISTS_EX(setrlimit EVENT__HAVE_SETRLIMIT) +CHECK_FUNCTION_EXISTS_EX(umask EVENT__HAVE_UMASK) +CHECK_FUNCTION_EXISTS_EX(unsetenv EVENT__HAVE_UNSETENV) + +# Get the gethostbyname_r prototype. +CHECK_FUNCTION_EXISTS_EX(gethostbyname_r EVENT__HAVE_GETHOSTBYNAME_R) + +if(EVENT__HAVE_GETHOSTBYNAME_R) + CHECK_PROTOTYPE_DEFINITION(gethostbyname_r + "int gethostbyname_r(const char *name, struct hostent *hp, struct hostent_data *hdata)" + "0" + "netdb.h" + EVENT__HAVE_GETHOSTBYNAME_R_3_ARG) + + CHECK_PROTOTYPE_DEFINITION(gethostbyname_r + "struct hostent *gethostbyname_r(const char *name, struct hostent *hp, char *buf, size_t buflen, int *herr)" + "NULL" + "netdb.h" + EVENT__HAVE_GETHOSTBYNAME_R_5_ARG) + + CHECK_PROTOTYPE_DEFINITION(gethostbyname_r + "int gethostbyname_r(const char *name, struct hostent *hp, char *buf, size_t buflen, struct hostent **result, int *herr)" + "0" + "netdb.h" + EVENT__HAVE_GETHOSTBYNAME_R_6_ARG) +endif() + +if(HAVE_PORT_H AND HAVE_PORT_CREATE) + set(EVENT__HAVE_EVENT_PORTS 1) +endif() + +if(NOT WIN32) + CHECK_FUNCTION_EXISTS_EX(select EVENT__HAVE_SELECT) +endif() + +CHECK_TYPE_SIZE("uint8_t" EVENT__HAVE_UINT8_T) +CHECK_TYPE_SIZE("uint16_t" EVENT__HAVE_UINT16_T) +CHECK_TYPE_SIZE("uint32_t" EVENT__HAVE_UINT32_T) +CHECK_TYPE_SIZE("uint64_t" EVENT__HAVE_UINT64_T) +CHECK_TYPE_SIZE("short" EVENT__SIZEOF_SHORT BUILTIN_TYPES_ONLY) +CHECK_TYPE_SIZE("int" EVENT__SIZEOF_INT BUILTIN_TYPES_ONLY) +CHECK_TYPE_SIZE("unsigned" EVENT__SIZEOF_UNSIGNED BUILTIN_TYPES_ONLY) +CHECK_TYPE_SIZE("unsigned int" EVENT__SIZEOF_UNSIGNED_INT BUILTIN_TYPES_ONLY) +CHECK_TYPE_SIZE("long" EVENT__SIZEOF_LONG BUILTIN_TYPES_ONLY) +CHECK_TYPE_SIZE("long long" EVENT__SIZEOF_LONG_LONG BUILTIN_TYPES_ONLY) + +if(WIN32) + # These aren't available until Windows Vista. + # But you can still link them. They just won't be found when running the exe. + set(EVENT__HAVE_INET_NTOP 0) + set(EVENT__HAVE_INET_PTON 0) +endif() + +# Check for different inline keyword versions. +check_function_keywords("inline" "__inline" "__inline__") + +if (HAVE_INLINE) + set(EVENT__inline inline) +elseif (HAVE___INLINE) + set(EVENT__inline __inline) +elseif(HAVE___INLINE__) + set(EVENT__inline __inline__) +else() + set(EVENT__inline) +endif() + +# __func__/__FUNCTION__ is not a macros in general +CHECK_SYMBOL_EXISTS("__func__" "" EVENT__HAVE___func__) +CHECK_SYMBOL_EXISTS("__FUNCTION__" "" EVENT__HAVE___FUNCTION__) + +CHECK_SYMBOL_EXISTS(TAILQ_FOREACH sys/queue.h EVENT__HAVE_TAILQFOREACH) +CHECK_CONST_EXISTS(CTL_KERN sys/sysctl.h EVENT__HAVE_DECL_CTL_KERN) +CHECK_CONST_EXISTS(KERN_ARND sys/sysctl.h EVENT__HAVE_DECL_KERN_ARND) +CHECK_CONST_EXISTS(KERN_RANDOM sys/sysctl.h EVENT__HAVE_DECL_KERN_RANDOM) +CHECK_CONST_EXISTS(RANDOM_UUID sys/sysctl.h EVENT__HAVE_DECL_RANDOM_UUID) +CHECK_SYMBOL_EXISTS(F_SETFD fcntl.h EVENT__HAVE_SETFD) + +CHECK_TYPE_SIZE(fd_mask EVENT__HAVE_FD_MASK) + +CHECK_TYPE_SIZE(size_t EVENT__SIZEOF_SIZE_T) +if(NOT EVENT__SIZEOF_SIZE_T) + set(EVENT__size_t "unsigned") + set(EVENT__SIZEOF_SIZE_T ${EVENT__SIZEOF_UNSIGNED}) +else() + set(EVENT__size_t size_t) +endif() + +CHECK_TYPE_SIZE("off_t" EVENT__SIZEOF_OFF_T LANGUAGE C) + + +# XXX we should functionalize these size and type sets. --elley + +# Winssck. +if (_MSC_VER) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES BaseTsd.h) +endif() +CHECK_TYPE_SIZE("ssize_t" EVENT__SIZEOF_SSIZE_T_LOWER LANGUAGE C) +CHECK_TYPE_SIZE("SSIZE_T" EVENT__SIZEOF_SSIZE_T_UPPER LANGUAGE C) + +if (EVENT__SIZEOF_SSIZE_T_LOWER) + set(EVENT__ssize_t "ssize_t") + set(EVENT__SIZEOF_SSIZE_T ${EVENT__SIZEOF_SSIZE_T_LOWER}) +elseif (EVENT__SIZEOF_SSIZE_T_UPPER) + set(EVENT__ssize_t "SSIZE_T") + set(EVENT__SIZEOF_SSIZE_T ${EVENT__SIZEOF_SSIZE_T_UPPER}) +else() + set(EVENT__ssize_t "int") + set(EVENT__SIZEOF_SSIZE_T ${EVENT__SIZEOF_INT}) +endif() + +CHECK_TYPE_SIZE(socklen_t EVENT__SIZEOF_SOCKLEN_T) +if(NOT EVENT__SIZEOF_SOCKLEN_T) + set(EVENT__socklen_t "unsigned int") + set(EVENT__SIZEOF_SOCKLEN_T ${EVENT__SIZEOF_UNSIGNED_INT}) +else() + set(EVENT__socklen_t "socklen_t") +endif() + +CHECK_TYPE_SIZE(pid_t EVENT__SIZEOF_PID_T) +if(NOT EVENT__SIZEOF_PID_T) + set(EVENT__SIZEOF_PID_T ${EVENT__SIZEOF_INT}) +else() + set(EVENT__SIZEOF_PID_T EVENT__SIZEOF_PID_T) +endif() + +if (NOT EVENT__DISABLE_THREAD_SUPPORT) + if (NOT WIN32) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES pthread.h) + endif() + CHECK_TYPE_SIZE(pthread_t EVENT__SIZEOF_PTHREAD_T) +endif() + +if(EVENT__HAVE_CLOCK_GETTIME) + set(EVENT__DNS_USE_CPU_CLOCK_FOR_ID 1) +endif() + +# we're just getting lazy now. +CHECK_TYPE_SIZE("uintptr_t" EVENT__HAVE_UINTPTR_T) +CHECK_TYPE_SIZE("void *" EVENT__SIZEOF_VOID_P) +CHECK_TYPE_SIZE("time_t" EVENT__SIZEOF_TIME_T) + +# Tests file offset bits. +# TODO: Add AIX test for if -D_LARGE_FILES is needed. + +# XXX: Why is this here? we don't even use it. Well, we don't even use it +# on top of that, why is it set in the config.h?! IT_MAKES_NO_SENSE +# I'm commenting it out for now. +# - ellzey + +#CHECK_FILE_OFFSET_BITS() + +# Verify kqueue works with pipes. +if (EVENT__HAVE_KQUEUE) + if ((CMAKE_CROSSCOMPILING OR APPLE) AND NOT EVENT__FORCE_KQUEUE_CHECK) + message(WARNING "Cannot check if kqueue works with pipes when crosscompiling, use EVENT__FORCE_KQUEUE_CHECK to be sure (this requires manually running a test program on the cross compilation target)") + set(EVENT__HAVE_WORKING_KQUEUE 1) + else() + message(STATUS "Checking if kqueue works with pipes...") + include(CheckWorkingKqueue) + endif() +endif() + +if(EVENT__HAVE_NETDB_H) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES netdb.h) + CHECK_TYPE_SIZE("struct addrinfo" EVENT__HAVE_STRUCT_ADDRINFO) +elseif(WIN32) + list(APPEND CMAKE_EXTRA_INCLUDE_FILES ws2tcpip.h) + CHECK_TYPE_SIZE("struct addrinfo" EVENT__HAVE_STRUCT_ADDRINFO) +endif() + +# Check for sockaddr structure sizes. +set(SOCKADDR_HEADERS) +if (WIN32) + set(CMAKE_REQUIRED_DEFINITIONS "-DWIN32_LEAN_AND_MEAN") + if (_MSC_VER LESS 1300) + set(SOCKADDR_HEADERS winsock.h) + else() + set(SOCKADDR_HEADERS winsock2.h ws2tcpip.h) + endif() +else() + if (EVENT__HAVE_NETINET_IN_H) + set(SOCKADDR_HEADERS ${SOCKADDR_HEADERS} netinet/in.h) + endif() + + if (EVENT__HAVE_NETINET_IN6_H) + set(SOCKADDR_HEADERS ${SOCKADDR_HEADERS} netinet/in6.h) + endif() + + if (EVENT__HAVE_SYS_SOCKET_H) + set(SOCKADDR_HEADERS ${SOCKADDR_HEADERS} sys/socket.h) + endif() + + if (EVENT__HAVE_NETDB_H) + set(SOCKADDR_HEADERS ${SOCKADDR_HEADERS} netdb.h) + endif() +endif() + +CHECK_TYPE_SIZE("struct in6_addr" EVENT__HAVE_STRUCT_IN6_ADDR) +if(EVENT__HAVE_STRUCT_IN6_ADDR) + CHECK_STRUCT_HAS_MEMBER("struct in6_addr" + s6_addr16 "${SOCKADDR_HEADERS}" + EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR16) + + CHECK_STRUCT_HAS_MEMBER("struct in6_addr" + s6_addr32 "${SOCKADDR_HEADERS}" + EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR32) +endif() + +CHECK_TYPE_SIZE("sa_family_t" EVENT__HAVE_SA_FAMILY_T) +CHECK_TYPE_SIZE("struct sockaddr_in6" EVENT__HAVE_STRUCT_SOCKADDR_IN6) + +if(EVENT__HAVE_STRUCT_SOCKADDR_IN6) + CHECK_STRUCT_HAS_MEMBER("struct sockaddr_in6" + sin6_len "${SOCKADDR_HEADERS}" + EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN) + + CHECK_STRUCT_HAS_MEMBER("struct sockaddr_in6" + sin_len "${SOCKADDR_HEADERS}" + EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN) +endif() + +CHECK_TYPE_SIZE("struct sockaddr_storage" EVENT__HAVE_STRUCT_SOCKADDR_STORAGE) +if(EVENT__HAVE_STRUCT_SOCKADDR_STORAGE) + CHECK_STRUCT_HAS_MEMBER("struct sockaddr_storage" + ss_family "${SOCKADDR_HEADERS}" + EVENT__HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY) + + CHECK_STRUCT_HAS_MEMBER("struct sockaddr_storage" + __ss_family "${SOCKADDR_HEADERS}" EVENT__HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY) +endif() + +CHECK_TYPE_SIZE("struct linger" EVENT__HAVE_STRUCT_LINGER) + +# Group the source files. +set(HDR_PRIVATE + bufferevent-internal.h + changelist-internal.h + defer-internal.h + epolltable-internal.h + evbuffer-internal.h + event-internal.h + evmap-internal.h + evrpc-internal.h + evsignal-internal.h + evthread-internal.h + ht-internal.h + http-internal.h + iocp-internal.h + ipv6-internal.h + log-internal.h + minheap-internal.h + mm-internal.h + ratelim-internal.h + strlcpy-internal.h + util-internal.h + evconfig-private.h + compat/sys/queue.h) + +set(HDR_COMPAT + include/evdns.h + include/evrpc.h + include/event.h + include/evhttp.h + include/evutil.h) + +set(HDR_PUBLIC + include/event2/buffer.h + include/event2/bufferevent.h + include/event2/bufferevent_compat.h + include/event2/bufferevent_struct.h + include/event2/buffer_compat.h + include/event2/dns.h + include/event2/dns_compat.h + include/event2/dns_struct.h + include/event2/event.h + include/event2/event_compat.h + include/event2/event_struct.h + include/event2/http.h + include/event2/http_compat.h + include/event2/http_struct.h + include/event2/keyvalq_struct.h + include/event2/listener.h + include/event2/rpc.h + include/event2/rpc_compat.h + include/event2/rpc_struct.h + include/event2/tag.h + include/event2/tag_compat.h + include/event2/thread.h + include/event2/util.h + include/event2/visibility.h + ${PROJECT_BINARY_DIR}/include/event2/event-config.h) + +set(SRC_CORE + buffer.c + bufferevent.c + bufferevent_filter.c + bufferevent_pair.c + bufferevent_ratelim.c + bufferevent_sock.c + event.c + evmap.c + evthread.c + evutil.c + evutil_rand.c + evutil_time.c + listener.c + log.c + signal.c + strlcpy.c) + +if(EVENT__HAVE_SELECT) + list(APPEND SRC_CORE select.c) +endif() + +if(EVENT__HAVE_POLL) + list(APPEND SRC_CORE poll.c) +endif() + +if(EVENT__HAVE_KQUEUE) + list(APPEND SRC_CORE kqueue.c) +endif() + +if(EVENT__HAVE_DEVPOLL) + list(APPEND SRC_CORE devpoll.c) +endif() + +if(EVENT__HAVE_EPOLL) + list(APPEND SRC_CORE epoll.c) +endif() + +if(EVENT__HAVE_EVENT_PORTS) + list(APPEND SRC_CORE evport.c) +endif() + +if (NOT EVENT__DISABLE_OPENSSL) + find_package(OpenSSL REQUIRED) + + set(EVENT__HAVE_OPENSSL 1) + + message(STATUS "OpenSSL include: ${OPENSSL_INCLUDE_DIR}") + message(STATUS "OpenSSL lib: ${OPENSSL_LIBRARIES}") + + include_directories(${OPENSSL_INCLUDE_DIR}) + + list(APPEND SRC_OPENSSL bufferevent_openssl.c) + list(APPEND HDR_PUBLIC include/event2/bufferevent_ssl.h) + list(APPEND LIB_APPS ${OPENSSL_LIBRARIES}) +endif() + +if (NOT EVENT__DISABLE_THREAD_SUPPORT) + if (WIN32) + list(APPEND SRC_CORE evthread_win32.c) + else() + find_package(Threads REQUIRED) + if (NOT CMAKE_USE_PTHREADS_INIT) + message(FATAL_ERROR + "Failed to find Pthreads, set EVENT__DISABLE_THREAD_SUPPORT to disable") + endif() + + set(EVENT__HAVE_PTHREADS 1) + list(APPEND LIB_APPS ${CMAKE_THREAD_LIBS_INIT}) + endif() +endif() + +if (NOT EVENT__DISABLE_TESTS) + # Zlib is only used for testing. + find_package(ZLIB) + + if (ZLIB_LIBRARY AND ZLIB_INCLUDE_DIR) + include_directories(${ZLIB_INCLUDE_DIRS}) + + set(EVENT__HAVE_LIBZ 1) + list(APPEND LIB_APPS ${ZLIB_LIBRARIES}) + endif() +endif() + +set(SRC_EXTRA + event_tagging.c + http.c + evdns.c + evrpc.c) + +add_definitions(-DHAVE_CONFIG_H) + +# We use BEFORE here so we don't accidentally look in system directories +# first for some previous versions of the headers that are installed. +include_directories(BEFORE ${PROJECT_SOURCE_DIR} + ${PROJECT_SOURCE_DIR}/compat + ${PROJECT_SOURCE_DIR}/include) + +if(WIN32) + list(APPEND SRC_CORE + buffer_iocp.c + bufferevent_async.c + event_iocp.c + win32select.c) + + list(APPEND HDR_PRIVATE WIN32-Code/getopt.h) + + set(EVENT__DNS_USE_FTIME_FOR_ID 1) + set(LIB_PLATFORM ws2_32 shell32 advapi32) + add_definitions( + -D_CRT_SECURE_NO_WARNINGS + -D_CRT_NONSTDC_NO_DEPRECATE) + + include_directories(./WIN32-Code) +endif() + +if (SOLARIS) + list(APPEND LIB_PLATFORM socket nsl) +endif() + +source_group("Headers Private" FILES ${HDR_PRIVATE}) +source_group("Header Compat" FILES ${HDR_COMPAT}) +source_group("Headers Public" FILES ${HDR_PUBLIC}) +source_group("Source Core" FILES ${SRC_CORE}) +source_group("Source Extra" FILES ${SRC_EXTRA}) + +# Generate the configure headers. +# (Place them in the build dir so we don't polute the source tree with generated files). +include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/include) + +if (${GNUC}) + set(EVENT_SHARED_FLAGS -fvisibility=hidden) +elseif ("${CMAKE_C_COMPILER_ID}" STREQUAL "SunPro") + set(EVENT_SHARED_FLAGS -xldscope=hidden) +endif() + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/event-config.h.cmake + ${CMAKE_CURRENT_BINARY_DIR}/include/event2/event-config.h + NEWLINE_STYLE UNIX) + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/evconfig-private.h.cmake + ${CMAKE_CURRENT_BINARY_DIR}/include/evconfig-private.h) + +# +# Create the libraries. +# +include(AddEventLibrary) +add_event_library(event_core SOURCES ${SRC_CORE}) +add_event_library(event_extra + LIBRARIES event_core_shared + SOURCES ${SRC_EXTRA}) + +if (NOT EVENT__DISABLE_OPENSSL) + add_event_library(event_openssl + LIBRARIES event_core_shared ${OPENSSL_LIBRARIES} + SOURCES ${SRC_OPENSSL}) +endif() + +if (CMAKE_USE_PTHREADS_INIT) + set(SRC_PTHREADS evthread_pthread.c) + add_event_library(event_pthreads + LIBRARIES event_core_shared + SOURCES ${SRC_PTHREADS}) +endif() + +# library exists for historical reasons; it contains the contents of +# both libevent_core and libevent_extra. You shouldn’t use it; it may +# go away in a future version of Libevent. +add_event_library(event SOURCES ${SRC_CORE} ${SRC_EXTRA}) + +set(WIN32_GETOPT) +if (WIN32) + list(APPEND WIN32_GETOPT + WIN32-Code/getopt.c + WIN32-Code/getopt_long.c) +endif() + +# +# Samples. +# +macro(add_sample_prog ssl name) + add_executable(${name} ${ARGN}) + + target_link_libraries(${name} + event_extra + event_core + ${LIB_APPS} + ${LIB_PLATFORM}) + + if (${ssl}) + target_link_libraries(${name} event_openssl) + endif() +endmacro() +if (NOT EVENT__DISABLE_SAMPLES) + set(SAMPLES + event-read-fifo + hello-world + signal-test + http-connect + time-test) + + foreach(SAMPLE ${SAMPLES}) + add_sample_prog(OFF ${SAMPLE} sample/${SAMPLE}.c) + endforeach() + + if (NOT EVENT__DISABLE_OPENSSL) + add_sample_prog(ON https-client + sample/https-client.c + sample/openssl_hostname_validation.c + sample/hostcheck.c) + add_sample_prog(ON le-proxy + sample/le-proxy.c) + endif() + + set(SAMPLES_WOPT + dns-example + http-server + ) + foreach (SAMPLE ${SAMPLES_WOPT}) + add_sample_prog(OFF ${SAMPLE} sample/${SAMPLE}.c ${WIN32_GETOPT}) + endforeach() +endif() + +# +# Benchmarks +# +macro(add_bench_prog prog) + add_executable(${prog} ${ARGN}) + target_link_libraries(${prog} + event_extra + event_core + ${LIB_APPS} + ${LIB_PLATFORM}) +endmacro() +if (NOT EVENT__DISABLE_BENCHMARK) + foreach (BENCHMARK bench_http bench_httpclient) + add_bench_prog(${BENCHMARK} test/${BENCHMARK}.c) + endforeach() + + add_bench_prog(bench test/bench.c ${WIN32_GETOPT}) + add_bench_prog(bench_cascade test/bench_cascade.c ${WIN32_GETOPT}) +endif() + +# +# Tests +# +macro(add_test_prog prog) + add_executable(${prog} test/${prog}.c) + target_link_libraries(${prog} + ${LIB_APPS} + ${LIB_PLATFORM} + event_core + event_extra + ${ARGN}) +endmacro() +if (NOT EVENT__DISABLE_TESTS) + # + # Generate Regress tests. + # + if (NOT EVENT__DISABLE_REGRESS) + # (We require python to generate the regress tests) + find_package(PythonInterp 3) + + if (PYTHONINTERP_FOUND) + set(__FOUND_USABLE_PYTHON 1) + else() + find_package(PythonInterp 2) + if (PYTHONINTERP_FOUND) + set(__FOUND_USABLE_PYTHON 1) + else() + message(ERROR "No suitable Python version found, bailing...") + endif() + endif() + + if (__FOUND_USABLE_PYTHON) + message(STATUS "Generating regress tests...") + + add_definitions(-DTINYTEST_LOCAL) + + add_custom_command( + OUTPUT + ${CMAKE_CURRENT_SOURCE_DIR}/test/regress.gen.c + ${CMAKE_CURRENT_SOURCE_DIR}/test/regress.gen.h + DEPENDS + event_rpcgen.py + test/regress.rpc + COMMAND ${PYTHON_EXECUTABLE} ../event_rpcgen.py regress.rpc + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/test) + + list(APPEND SRC_REGRESS + test/regress.c + test/regress.gen.c + test/regress.gen.h + test/regress_buffer.c + test/regress_bufferevent.c + test/regress_dns.c + test/regress_et.c + test/regress_finalize.c + test/regress_http.c + test/regress_listener.c + test/regress_main.c + test/regress_minheap.c + test/regress_rpc.c + test/regress_testutils.c + test/regress_testutils.h + test/regress_util.c + test/tinytest.c) + + if (WIN32) + list(APPEND SRC_REGRESS test/regress_iocp.c) + if (NOT EVENT__DISABLE_THREAD_SUPPORT) + list(APPEND SRC_REGRESS test/regress_thread.c) + endif() + elseif (CMAKE_USE_PTHREADS_INIT) + list(APPEND SRC_REGRESS test/regress_thread.c) + endif() + + if (ZLIB_LIBRARY AND ZLIB_INCLUDE_DIR) + list(APPEND SRC_REGRESS test/regress_zlib.c) + endif() + + if (NOT EVENT__DISABLE_OPENSSL) + list(APPEND SRC_REGRESS test/regress_ssl.c) + endif() + + add_executable(regress ${SRC_REGRESS}) + + target_link_libraries(regress + ${LIB_APPS} + ${LIB_PLATFORM} + event_core + event_extra) + if (NOT EVENT__DISABLE_OPENSSL) + target_link_libraries(regress event_openssl) + endif() + if (CMAKE_USE_PTHREADS_INIT) + target_link_libraries(regress event_pthreads) + endif() + else() + message(WARNING "No suitable Python interpreter found, cannot generate regress tests!") + endif() + endif() + + # + # Test programs. + # + # all of these, including the cmakelists.txt should be moved + # into the dirctory 'tests' first. + # + # doing this, we can remove all the DISABLE_TESTS stuff, and simply + # do something like: + # + # add_custom_targets(tests) + # add_executable(... EXCLUDE_FROM_ALL ...c) + # add_dependencis(tests testa testb testc) + # add_test(....) + # + # then you can just run 'make tests' instead of them all + # auto-compile|running + # - ellzey + set(TESTPROGS test-changelist + test-eof + test-fdleak + test-init + test-time + test-weof) + + foreach (TESTPROG ${TESTPROGS} test-dumpevents) + add_test_prog(${TESTPROG}) + endforeach() + if (UNIX) + add_test_prog(test-ratelim m) + else() + add_test_prog(test-ratelim) + endif() + + set(ALL_TESTPROGS + ${TESTPROGS} + test-dumpevents + test-ratelim + ) + + # + # We run all tests with the different backends turned on one at a time. + # + + # Add event backends based on system introspection result. + set(BACKENDS "") + + if (EVENT__HAVE_EPOLL) + list(APPEND BACKENDS EPOLL) + endif() + + if (EVENT__HAVE_SELECT) + list(APPEND BACKENDS SELECT) + endif() + + if (EVENT__HAVE_POLL) + list(APPEND BACKENDS POLL) + endif() + + if (EVENT__HAVE_KQUEUE) + list(APPEND BACKENDS KQUEUE) + endif() + + if (EVENT__HAVE_EVENT_PORTS) + list(APPEND BACKENDS EVPORT) + endif() + + if (EVENT__HAVE_DEVPOLL) + list(APPEND BACKENDS DEVPOLL) + endif() + + if (WIN32) + list(APPEND BACKENDS WIN32) + endif() + + + # Default environment variables turns off all event systems, + # then we enable each one, one at a time when creating the tests. + set(DEFAULT_TEST_ENV_VARS "EVENT_SHOW_METHOD=1;") + foreach(BACKEND ${BACKENDS}) + set(BACKEND_ENV_VAR "EVENT_NO${BACKEND}=1") + list(APPEND DEFAULT_TEST_ENV_VARS "${BACKEND_ENV_VAR}") + endforeach() + + # Macro that creates the ctest test for a backend. + macro(add_backend_test BACKEND_TEST_NAME ENV_VARS) + set(TEST_NAMES "") + + foreach (TESTPROG ${TESTPROGS}) + set(TEST_NAME ${TESTPROG}__${BACKEND_TEST_NAME}) + + add_test(${TEST_NAME} + ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TESTPROG}) + + list(APPEND TEST_NAMES ${TEST_NAME}) + + set_tests_properties(${TEST_NAME} + PROPERTIES ENVIRONMENT "${ENV_VARS}") + endforeach() + + # Dump events test. + if (__FOUND_USABLE_PYTHON) + set(TEST_NAME test-dumpevents__${BACKEND_TEST_NAME}) + + add_test(${TEST_NAME} + ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test-dumpevents | + ${PYTHON_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/test/check-dumpevents.py) + + set_tests_properties(${TEST_NAME} + PROPERTIES ENVIRONMENT "${ENV_VARS}") + else() + message(WARNING "test-dumpevents will be run without output check since python was not found!") + set(TEST_NAME test-dumpevents__${BACKEND_TEST_NAME}_no_check) + + add_test(${TEST_NAME} + ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test-dumpevents) + + set_tests_properties(${TEST_NAME} + PROPERTIES ENVIRONMENT "${ENV_VARS}") + endif() + + # Regress tests. + if (NOT EVENT__DISABLE_REGRESS AND __FOUND_USABLE_PYTHON) + set(TEST_NAME regress__${BACKEND_TEST_NAME}) + + add_test(${TEST_NAME} + ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/regress) + + set_tests_properties(${TEST_NAME} + PROPERTIES ENVIRONMENT "${ENV_VARS}") + + add_test(${TEST_NAME}_debug + ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/regress) + + set_tests_properties(${TEST_NAME}_debug + PROPERTIES ENVIRONMENT "${ENV_VARS};EVENT_DEBUG_MODE=1") + endif() + endmacro() + + # Add the tests for each backend. + foreach(BACKEND ${BACKENDS}) + # Enable this backend only. + set(BACKEND_ENV_VARS ${DEFAULT_TEST_ENV_VARS}) + list(REMOVE_ITEM BACKEND_ENV_VARS EVENT_NO${BACKEND}=1) + + # Epoll has some extra settings. + if (${BACKEND} STREQUAL "EPOLL") + add_backend_test(timerfd_${BACKEND} + "${BACKEND_ENV_VARS};EVENT_PRECISE_TIMER=1") + + add_backend_test(changelist_${BACKEND} + "${BACKEND_ENV_VARS};EVENT_EPOLL_USE_CHANGELIST=yes") + + add_backend_test(timerfd_changelist_${BACKEND} + "${BACKEND_ENV_VARS};EVENT_EPOLL_USE_CHANGELIST=yes;EVENT_PRECISE_TIMER=1") + else() + add_backend_test(${BACKEND} "${BACKEND_ENV_VARS}") + endif() + endforeach() + + # + # Rate limiter tests. + # + + # Group limits, no connection limit. + set(RL_BIN ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test-ratelim) + + add_test(test-ratelim__group_lim + ${RL_BIN} + -g 30000 + -n 30 + -t 100 + --check-grouplimit 1000 + --check-stddev 100) + + # Connection limit, no group limit. + add_test(test-ratelim__con_lim + ${RL_BIN} + -c 1000 + -n 30 + -t 100 + --check-connlimit 50 + --check-stddev 50) + + # Connection limit and group limit. + add_test(test-ratelim__group_con_lim + ${RL_BIN} + -c 1000 + -g 30000 + -n 30 + -t 100 + --check-grouplimit 1000 + --check-connlimit 50 + --check-stddev 50) + + # Connection limit and group limit with independent drain. + add_test(test-ratelim__group_con_lim_drain + ${RL_BIN} + -c 1000 + -g 35000 + -n 30 + -t 100 + -G 500 + --check-grouplimit 1000 + --check-connlimit 50 + --check-stddev 50) + + # Add a "make verify" target, same as for autoconf. + # (Important! This will unset all EVENT_NO* environment variables. + # If they are set in the shell the tests are running using simply "ctest" or "make test" will fail) + if (WIN32) + # Windows doesn't have "unset". But you can use "set VAR=" instead. + # We need to guard against the possibility taht EVENT_NOWIN32 is set, and all test failing + # since no event backend being available. + file(TO_NATIVE_PATH ${CMAKE_CTEST_COMMAND} WINDOWS_CTEST_COMMAND) + + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/tmp/verify_tests.bat + " + set EVENT_NOWIN32= + \"${WINDOWS_CTEST_COMMAND}\" + ") + + message(STATUS "${WINDOWS_CTEST_COMMAND}") + + file(COPY ${CMAKE_CURRENT_BINARY_DIR}/tmp/verify_tests.bat + DESTINATION ${CMAKE_CURRENT_BINARY_DIR} + FILE_PERMISSIONS + OWNER_READ + OWNER_WRITE + OWNER_EXECUTE + GROUP_READ + GROUP_EXECUTE + WORLD_READ WORLD_EXECUTE) + + file(TO_NATIVE_PATH + "${CMAKE_CURRENT_BINARY_DIR}/verify_tests.bat" VERIFY_PATH) + + add_custom_target(verify COMMAND "${VERIFY_PATH}" + DEPENDS event ${ALL_TESTPROGS}) + else() + # On some platforms doing exec(unset) as CMake does won't work, so make sure + # we run the unset command in a shell instead. + # First we write the script contents. + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/tmp/verify_tests.sh + " + #!/bin/bash + unset EVENT_NOEPOLL; unset EVENT_NOPOLL; unset EVENT_NOSELECT; unset EVENT_NOWIN32; unset EVENT_NOEVPORT; unset EVENT_NOKQUEUE; unset EVENT_NODEVPOLL + ${CMAKE_CTEST_COMMAND} + ") + + # Then we copy the file (this allows us to set execute permission on it) + file(COPY ${CMAKE_CURRENT_BINARY_DIR}/tmp/verify_tests.sh + DESTINATION ${CMAKE_CURRENT_BINARY_DIR} + FILE_PERMISSIONS + OWNER_READ + OWNER_WRITE + OWNER_EXECUTE + GROUP_READ + GROUP_EXECUTE + WORLD_READ + WORLD_EXECUTE) + + # Create the target that runs the script. + add_custom_target(verify + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/verify_tests.sh + DEPENDS event ${ALL_TESTPROGS}) + endif() + + if (NOT EVENT__DISABLE_REGRESS AND __FOUND_USABLE_PYTHON) + add_dependencies(verify regress) + endif() + + if (EVENT__COVERAGE) + include(CodeCoverage) + + setup_target_for_coverage( + verify_coverage # Coverage target name "make verify_coverage" + make # Test runner. + coverage # Output directory. + verify) # Arguments passed to test runner. "make verify" + endif() + + enable_testing() + + include(CTest) +endif() + +# +# Installation preparation. +# + +if(WIN32 AND NOT CYGWIN) + set(DEF_INSTALL_CMAKE_DIR cmake) +else() + set(DEF_INSTALL_CMAKE_DIR lib/cmake/libevent) +endif() + +set(EVENT_INSTALL_CMAKE_DIR + "${CMAKE_INSTALL_PREFIX}/${DEF_INSTALL_CMAKE_DIR}" + CACHE PATH "Installation directory for CMake files") + +export(PACKAGE libevent) + +# Generate the config file for the build-tree. +set(EVENT__INCLUDE_DIRS + "${PROJECT_SOURCE_DIR}/include" + "${PROJECT_BINARY_DIR}/include") + +set(LIBEVENT_INCLUDE_DIRS + ${EVENT__INCLUDE_DIRS} + CACHE PATH "Libevent include directories") + +configure_file(${PROJECT_SOURCE_DIR}/cmake/LibeventConfigBuildTree.cmake.in + ${PROJECT_BINARY_DIR}/LibeventConfig.cmake + @ONLY) + +# Generate the config file for the installation tree. +# Calculate the relative directory from the Cmake dir. +file(RELATIVE_PATH + REL_INCLUDE_DIR + "${EVENT_INSTALL_CMAKE_DIR}" + "${CMAKE_INSTALL_PREFIX}/include") + +# Note the LIBEVENT_CMAKE_DIR is defined in LibeventConfig.cmake.in, +# we escape it here so it's evaluated when it is included instead +# so that the include dirs are given relative to where the +# config file is located. +set(EVENT_INSTALL_INCLUDE_DIR "\${LIBEVENT_CMAKE_DIR}/${REL_INCLUDE_DIR}") + +configure_file(${PROJECT_SOURCE_DIR}/cmake/LibeventConfig.cmake.in + ${PROJECT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/LibeventConfig.cmake + @ONLY) + +# Generate version info for both build-tree and install-tree. +configure_file(${PROJECT_SOURCE_DIR}/cmake/LibeventConfigVersion.cmake.in + ${PROJECT_BINARY_DIR}/LibeventConfigVersion.cmake + @ONLY) + +# Install compat headers +install(FILES ${HDR_COMPAT} + DESTINATION "include" + COMPONENT dev) + +# Install the configs. +install(FILES + ${PROJECT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/LibeventConfig.cmake + ${PROJECT_BINARY_DIR}/LibeventConfigVersion.cmake + DESTINATION "${EVENT_INSTALL_CMAKE_DIR}" + COMPONENT dev) + +# Install exports for the install-tree. +install(EXPORT LibeventTargets + DESTINATION "${DEF_INSTALL_CMAKE_DIR}" + COMPONENT dev) + +# Install the scripts. +install(PROGRAMS + ${CMAKE_CURRENT_SOURCE_DIR}/event_rpcgen.py + DESTINATION "bin" + COMPONENT runtime) + +# Create documents with doxygen. +find_program(DOXYGEN doxygen) +if (DOXYGEN) + add_custom_target(doxygen + COMMAND ${DOXYGEN} Doxyfile + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) +else() + message(WARNING "The doxygen target will not support since doxygen command was not found!") +endif() + + +# Create the uninstall target. +# https://gitlab.kitware.com/cmake/community/wikis/FAQ#can-i-do-make-uninstall-with-cmake +configure_file(${PROJECT_SOURCE_DIR}/cmake/Uninstall.cmake.in + ${PROJECT_BINARY_DIR}/Uninstall.cmake + @ONLY) + +add_custom_target(uninstall + COMMAND ${CMAKE_COMMAND} -P ${PROJECT_BINARY_DIR}/Uninstall.cmake) + + +message(STATUS "") +message(STATUS " ---( Libevent " ${EVENT_VERSION} " )---") +message(STATUS "") +message(STATUS "Available event backends: ${BACKENDS}") +message(STATUS "CMAKE_BINARY_DIR: ${CMAKE_BINARY_DIR}") +message(STATUS "CMAKE_CURRENT_BINARY_DIR: ${CMAKE_CURRENT_BINARY_DIR}") +message(STATUS "CMAKE_SOURCE_DIR: ${CMAKE_SOURCE_DIR}") +message(STATUS "CMAKE_CURRENT_SOURCE_DIR: ${CMAKE_CURRENT_SOURCE_DIR}") +message(STATUS "PROJECT_BINARY_DIR: ${PROJECT_BINARY_DIR}") +message(STATUS "PROJECT_SOURCE_DIR: ${PROJECT_SOURCE_DIR}") +message(STATUS "CMAKE_MODULE_PATH: ${CMAKE_MODULE_PATH}") +message(STATUS "CMAKE_COMMAND: ${CMAKE_COMMAND}") +message(STATUS "CMAKE_ROOT: ${CMAKE_ROOT}") +message(STATUS "CMAKE_SYSTEM: ${CMAKE_SYSTEM}") +message(STATUS "CMAKE_SYSTEM_NAME: ${CMAKE_SYSTEM_NAME}") +message(STATUS "CMAKE_SYSTEM_VERSION: ${CMAKE_SYSTEM_VERSION}") +message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}") +message(STATUS "CMAKE_SKIP_RPATH: ${CMAKE_SKIP_RPATH}") +message(STATUS "CMAKE_VERBOSE_MAKEFILE: ${CMAKE_VERBOSE_MAKEFILE}") +message(STATUS "CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}") +message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") +message(STATUS "CMAKE_C_COMPILER: ${CMAKE_C_COMPILER} (id ${CMAKE_C_COMPILER_ID}, clang ${CLANG}, GNUC ${GNUC})") +message(STATUS "CMAKE_AR: ${CMAKE_AR}") +message(STATUS "CMAKE_RANLIB: ${CMAKE_RANLIB}") +message(STATUS "") + diff --git a/probe-busybox/libevent-2.1.11-stable/CONTRIBUTING.md b/probe-busybox/libevent-2.1.11-stable/CONTRIBUTING.md new file mode 100644 index 00000000..38756939 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/CONTRIBUTING.md @@ -0,0 +1,35 @@ +# Contributing to the libevent + +## Coding style + +First and most generic rule: **just look around**. + +But, we have a script for checking patches/files/git-refs: +```shell +# Chech HEAD git ref +./checkpatch.sh -r +./checkpatch.sh -r HEAD + +# Check patch +git format-patch --stdout -1 | ./checkpatch.sh -p +git show -1 | ./checkpatch.sh -p + +# Or via regular files +git format-patch --stdout -2 +./checkpatch.sh *.patch + +# Over a file +./checkpatch.sh -d event.c +./checkpatch.sh -d < event.c + +# And print the whole file not only summary +./checkpatch.sh -f event.c +./checkpatch.sh -f < event.c + +# See +./checkpatch.sh -h +``` + +## Testing +- Write new unit test in `test/regress_{MORE_SUITABLE_FOR_YOU}.c` +- `make verify` diff --git a/probe-busybox/libevent-2.1.11-stable/ChangeLog b/probe-busybox/libevent-2.1.11-stable/ChangeLog new file mode 100644 index 00000000..e89d5a81 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/ChangeLog @@ -0,0 +1,2081 @@ +Changes in version 2.1.11-stable (01 Aug 2019) + + This release contains one ABI breakage fix (that had been introduced in + 2.1.10, and strictly speaking this release breaks ABI again to make it + compatible with 2.1.9 and less, please take a look at 18104973 for more + details). Apart from that it contains some bug fixes, that grouped below. + + And even though the return value for evbuffer_setcb() had been changed it + should ABI compatible (anyway that function is in -compat.h header). + + There is also one patch that introduce new functionality, this is 546a366c, + to tune SO_RCVBUF/SO_SNDBUF in evdns, but one can count it as a bug-fix on + the application level, since before you cannot tune this settings and hence + you could stumble on problems. + + ABI breakage: + o Protect min_heap_push_ against integer overflow. (8c899768 Tobias Stoeckmann) + o Revert "Protect min_heap_push_ against integer overflow." (18104973 Azat Khuzhin) + + functionality: + o evdns: add new options -- so-rcvbuf/so-sndbuf (546a366c Azat Khuzhin) + + build: + o Change autoconf version to 2.62 and automake version to 1.11.2 (2a333008 yuangongji) + o cmake: install shared library only if it was requested (596855f7 Azat Khuzhin) + o Missing on win7/MinGW(MINGW32_NT-6.1)/MSYS (9559349c yuangongji) + o cmake: set library names to be the same as with autotools (305251b9 yuangongji) + o Enable _GNU_SOURCE for Android (f013fc7d Keith Smiley) + o Enable kqueue for APPLE targets (3aa68a82 Keith Smiley) + o autotools: do not install bufferevent_ssl.h under --disable-openssl (5349a07e Azat Khuzhin) + o cmake: link against shell32.lib/advapi32.lib (c9ce638c Azat Khuzhin) + o Add README.md into dist archive (3660a4cc Azat Khuzhin) + o cmake: add missing autotools targets (doxygen, uninstall, event_rpcgen.py) (2d65071c yuangongji) + o m4/libevent_openssl.m4: fix detection of openssl (d4056e59 Fabrice Fontaine) + o Fix detection of the __has_attribute() for apple clang [ci skip] (7fd7c5ef Azat Khuzhin) + + lib: + o buffer: fix possible NULL dereference in evbuffer_setcb() on ENOMEM (598f247d Azat Khuzhin) + o Warn if forked from the event loop during event_reinit() (b75922ae Azat Khuzhin) + o evutil: set the have_checked_interfaces in evutil_check_interfaces() + (ef498aa2, a09265ac jeremyerb) + + samples: + o https-client: correction error checking (a8a04565 wenyg) + + +Changes in version 2.1.10-stable (26 May 2019) + + This release contains mostly fixes (some evbuffer oddity, AF_UNIX handling in + http server, some UB fixes and others) but also some new functionality + (without ABI breakage as usual) and now dist archive can be used for building + on windows (getopt had been added into it). + + Above you will find changelog for this particular release (but with some + trivial fixes pruned out from it - to make it a little bit more informative). + + To view full changelog please use git: + git log --format=' o %s (%h %aN)' release-2.1.9-beta...release-2.1.10-stable + + dist: + o Add getopt into dist archive (7042ff24 Azat Khuzhin) + + functionality: + o evdns: add DNS_OPTION_NAMESERVERS_NO_DEFAULT/EVDNS_BASE_NAMESERVERS_NO_DEFAULT + (58e81106 Azat Khuzhin) + o Add support for EV_TIMEOUT to event_base_active_by_fd (3f893f0a John Ohl) + + fixes: + o Merge branch 'evbuffer-fixes-806-v2' (2fea04b3 Azat Khuzhin) + o Merge branch 'issue-807-accept4-getnameinfo-AF_UNIX' (7c4da937, e2790a7f + Azat Khuzhin) + o kqueue: Avoid undefined behaviour. (e70e18e9 Tobias Stoeckmann) + o Prevent integer overflow in kq_build_changes_list. (43a55a23 Tobias Stoeckmann) + o evdns: fix lock/unlock mismatch in evdns_close_server_port() (54103883 zhuizhuhaomeng) + o Merge remote-tracking branch 'official/pr/804' -- Enforce limit of NSIG + signals (87fa93a8 Tobias Stoeckmann) + o Protect min_heap_push_ against integer overflow. (0b46bb8c Tobias Stoeckmann) + o le-proxy: initiate use of the Winsock DLL (2a1e1530 linxiaohui) + o Fix leaks in error path of the bufferevent_init_common_() (bb0f8fe7 Azat Khuzhin) + o buffer: make evbuffer_prepend() of zero-length array no-op (61fa7b7d Azat Khuzhin) + o Merge branch 'evbuffer-empty-chain-handling' (6a3dd717 Azat Khuzhin) + o Don't loose top error in SSL (3d1a7a1d Yury Korzhetsky) + o Remove needless check for arc4_seeded_ok (6602a97d Seong-Joong Kim) + o Merge pull request #769 from sungjungk/fix-return-handling (91084140 Nathan French) + + build: + o Define `_GNU_SOURCE` properly/consistently per autoconf (00ba9fa2 Enji Cooper) + o signal: guard __cdecl definition with #ifdef (d89045a6 Azat Khuzhin) + o Link test/regress with event_core/event_extra over event (22380996 Azat Khuzhin) + + tests: + o Use kill() over raise() for raising the signal (fixes osx 10.14 with + kqueue) (3db5296b, a45f6733 Azat Khuzhin) + o tinytest: implement per-test timeout (via alarm() under !win32 only) + (b64dbfb6, 75d7e1ff Azat Khuzhin) + +Changes in version 2.1.9-beta (10 February 2019) + + This changelog will differs from other releases in the next few clauses: + - contains only highlighted changes (so now it will not contains a lot of + patches that fixes some stuff in regression tests, typos, leaks fixes in + samples and so forth) + - no authors (since merge commits breaks them anyway, but AUTHORS sections in + README will be kept up to date) + - group name trimmed from commit subjects trimmed + - it's been 2 years since the previoius release, so it is pretty huge + + And I think that this is more useful, so from now on it will always has the + same look (until there will too many objections of course). + + To view full changelog please use git: + git log --format=' o %s (%h %aN)' release-2.1.8-stable...release-2.1.9-beta + + + dist archive: + o Add cmake rules into dist archive (bf3a67cf) + o Add missing print-winsock-errors.c into dist archive (822d6462) + o Include openssl-compat.h into dist archive (08658136) + + core: + o Merge branch 'check-O_NONBLOCK-in-debug' (a39898f3, a8155c62) + o Merge branch 'event-ET-#636-v2' (ca4b6404) + o Fix visibility issues under (mostly on win32) + (349081e1g, 802be13ag, a1f28e2f) + o Define __EXT_POSIX2 for QNX (a2176f2c) + o Cleanup __func__ detection (b3af7bdd) + o Add convenience macros for user-triggered events (06ec5de6) + o Notify event base if there are no more events, so it can exit without delay (d9d1c09e) + o Fix base unlocking in event_del() if event_base_set() runned in another thread (4f0f40e3) + o If precise_time is false, we should not set EVENT_BASE_FLAG_PRECISE_TIMER (27dee54d) + o Fix race in access to ev_res from event loop with event_active() (43d92a6d) + o Return from event_del() after the last event callback termination (876c7ac7) + + http: + o Merge branch 'http-EVHTTP_CON_READ_ON_WRITE_ERROR-fixes-v2' (eb7b472b) + o Preserve socket error from listen across closesocket cleanup (2ccd00a6) + o fix connection retries when there more then one request for connection (d30e7bba) + o improve error path for bufferevent_{setfd,enable,disable}() (a8cc449e) + o Fix conceivable UAF of the bufferevent in evhttp_connection_free() (6ac2ec25) + o Merge branch 'http-request-line-parsing' (cdcfbafe) + o Fix evhttp_connection_get_addr() fox incomming http connections (4215c003) + o fix leaks in evhttp_uriencode() (123362e9) + o CONNECT method only takes an authority (7d1ffe64) + o Allow bodies for GET/DELETE/OPTIONS/CONNECT (23eb38b9) + o Do not crash when evhttp_send_reply_start() is called after a timeout. (826f1134) + o Fix crashing http server when callback do not reply in place (5b40744d, b2581380) + o fix handling of close_notify (ssl) in http with openssl bufferevents (7e91622b) + + evrpc: + o use *_new_with_arg() to match function prototype (a95cc9e3) + o avoid NULL dereference on request is not EVHTTP_REQ_POST (e05136c7) + + regression tests: + o Merge branch 'TT_RETRIABLE' (6ea1ec68, f9b592aa) + + bufferevent: + o Merge branch 'iocp-fixes' (6bfac964) + o Merge branch 'be-wm-overrun-v2' (3f692fff) + o bufferevent_socket_connect{,_hostname}() missing event callback and use ret code (1dde74ef) + o don't fail be_null_filter if bytes are copied (b92b0792) + o Call underlying bev ctrl GET_FD on filtered bufferevents (ebfac517) + + bufferevent_openssl/openssl: + o Merge branch 'ssl_bufferevent_wm_filter-fix' (30020a35) + o be_openssl: avoid leaking of SSL structure (e86ccfe5) + o Fix build with LibreSSL 2.7 (894ca48a) + o Add missing includes into openssl-compat.h (01bc36c1) + o Explicitly call SSL_clear when reseting the fd. (29b7a516) + o Unbreak build with LibreSSL after openssl 1.1 support added (230af9f0) + + samples: + o Merge branch 'sample-http-server' (b6309bcc) + o sample/https-client: use host SSL certificate store by default (5c0132f3) + + listener: + o ipv6only socket bind support (ba148796) + o Merge branch 'listener-immediate-close' (df2ed13f) + o Merge branch 'evconnlistener-do-not-close-client-fd' (42e851bb) + + evdns: + o evdns: handle NULL filename explicitly (0033f5cc) + o Merge branch 'evdns_getaddrinfo-race-fix' (3237d697) + o Generating evdns_base_config_windows_nameservers docs on all platforms (3bd2ce43) + + utils: + o Merge branch 'evutil_found_ifaddr-dev' (b07e43e6) + o Avoid possible SEGVs in select() (in unit tests) (8818c86c) + o Port `event_rpcgen.py` and `test/check-dumpevents.py` to Python 3. (532a8cc3) + + buffer: + o Fix assert() condition in evbuffer_drain() for IOCP (d6326104) + o fix incorrect unlock of the buffer mutex (for deferred callbacks) (2b4d127d) + o Fix wrong assert in evbuffer_drain() (9f4d0dce) + + cmake: + o fix checking of devpoll backend (like in autotools, by devpoll.h existence) (7f161902) + o support static runtime (MSVC) (c8b3ec17, 61fb055a) + o do not build both (SHARED and STATIC) for MSVC/win32 (bc7f2fd9) + o introduce EVENT__LIBRARY_TYPE option (eb10a738) + o ensure windows dll's are installed as well as lib files (29590718) + o Fix generation of LibeventConfig.cmake for the installation tree (7fa08c4b) + o fix pkgconfig generation (copy-paste typo) (cc554d87) + o Merge branch 'cmake-missing-bits' (9806b126) + o Fix detection of timerfd_create() in CMake. (e50af331) + o Merge branch 'cmake-configure-fixes-v2' (a0bfe2c4) + o Do not add epoll_sub (syscall wrappers) for epoll in cmake (cea61de6) + o Fix RPATH for APPLE (45b1f379) + + autotools: + o include win32 specific headers for socklen_t detection on win32/mingw (d7579fb9) + o Ignore evconfig-private.h for autotools (37423849) + o config.h can't be prefixed unconditionally (63a054f8) + o Merge branch 'pull-628' (7e56c8b2) + o Provide Makefile variables LIBEVENT_{CFLAGS,CPPFLAGS,LDFLAGS} (2f060c5f) + o confirm openssl is working before using (b39ccf8e) + o pass $(OPENSSL_INCS) for samples (FTBFS macOS) (c2495265) + o Add configure check for midipix (d433201e) + o Fix tests with detached builds (c46ff439) + + build: + o Fix arc4random_addrandom() detecting and fallback (regression) (303d6d77) + o Merge branch 'win32-fixes' (ebd12e6d) + o Merge branch 'fix-openssl-linking' (e7bd9e03) + o Merge branch 'fix-struct-linger' (8567f2f5) + + CI: + o travis-ci/appveyor now uses fast_finish+allow_failures + (5e97b6e6, dd472e7d, dfb5fc167) + o Merge branch 'travis-ci-osx-fixes' (9f02b39c) + o Merge branch 'win64-fixes' (aee0fcd5) + + +Changes in version 2.1.8-stable (22 January 2017) + + Libevent 2.1.8-stable, it contains openssl fixes for resetting fd and using + bufferevent_openssl_filter_new(). vagrant fixes, some build fixes, increased + timeout for some tests (to reduce number of failures due to timing issues), + date in RFC1123 format and running tests in parallel. + + There are highlighted changes above. + + Build fixes: + o Fix _FILE_OFFSET_BITS redinition (solaris/autotools) (336f3b11 Azat Khuzhin) + o util-internal: fix __func__ redefinition (netbsd) (253e7fa9 Azat Khuzhin) + o Fix signedness differ for iov_base (solaris) (2c62062e Azat Khuzhin) + o evutil_time: include when there is only sleep()/usleep() (3e75194c Azat Khuzhin) + o http: fix formatter for pritnf for req->ntoread (osx) (1cbf26f6 Azat Khuzhin) + Testing environment: + o Merge branch 'automake-tests-parallel-v4' (*includes ci bits also*) (59e217df Azat Khuzhin) + Vagrant env fixes: + o vagrant/netbsd: missing libtool (9c9be399 Azat Khuzhin) + o vagrant/netbsd: more reliable way of installing packages (36da6877 Azat Khuzhin) + o vagrant/osx: use make instead of gmake (there is no gmake) (f7c70aef Azat Khuzhin) + o vagrant: add centos box (ca591c5b Azat Khuzhin) + Tests: + o test/dns: replace servname since solaris does not have "http" (d6bafbbe Azat Khuzhin) + o test/thread: netbsd is too slow, increase timeout for conditions_simple (3c7422fc Azat Khuzhin) + o test/dns: run async resolving after sync one (to avoid timeouts) (07862531 Azat Khuzhin) + o test/http: turn off some tests that based on backlog filling (falky) (26f416c1 Azat Khuzhin) + Bugfixes: + o Merge branch 'openssl-filter-fixes-v4' (83e0f43b Azat Khuzhin) + o Merge branch 'date-rfc1123' (68def435,4798de6c,4545807d Azat Khuzhin) + o Merge branch 'be-openssl-fd-reset-fix-v2' (86fa0070,32adf434 Azat Khuzhin) + o Merge branch 'openssl-1.1-init-fixes-v2' (18a161f0 Azat Khuzhin) + o Fix incorrect MIME type (23f9a20e johnsonlee) + Trivial fixes: + Documentation updates: + o Update README.md (3821cca1 Breaker) + + +Changes in version 2.1.7-rc (2 Novemer 2016) + + Libevent 2.1.7-rc contains openssl 1.1 support, build fixes, CI improvements + and plus Vagrantfile for testing under multiple OS'es. + + + Continious Integration: + o Use coveralls.io via travis (9ac000c Azat Khuzhin) + o travis-ci: use container-based infrastructure (7e12e96 Azat Khuzhin) + o travis-ci/osx: fix compiling/linking openssl libraries (9d2f8d4 Azat Khuzhin) + o travis-ci: use gcc-5 (fixes osx|gcc failures) (d7ceae5 Azat Khuzhin) + o Testing with vagrant for 6 OS and cmake+autoconf (9585338 Azat Khuzhin) + o travis-ci/osx: install lcov (e4e099b Azat Khuzhin) + + Build Improvements/Fixes: + o Fix cmake -DEVENT__COVERAGE=ON (40fbffc Azat Khuzhin) + o autogen.sh: learn about gmake (9376ac4 Azat Khuzhin) + o autogen.sh: remove all autoconf/automake caches, if any (69cce25 Azat Khuzhin) + o cmake: fix finding python2, and check that it is really 2 (3453c08 Azat Khuzhin) + o cmake: fix CheckFunctionExistsEx/CheckPrototypeDefinition (CMP0054) (43b69b2 Azat Khuzhin) + o cmake: cleanup (dc624ad Zonr Chang) + o cmake/win32: fix running regress, but fixing finding python2 interpreter (bcb990a Azat Khuzhin) + o cmake: use PYTHON_EXECUTABLE to find python2 (a4d044c Azat Khuzhin) + o Merge branch 'force-disable-clockgettime' (83c7cdf Azat Khuzhin) + + Code Improvements (core) + o use ev_uint16_t instead of unsigned short for port (e983712 Thomas Bernard) + o Merge branch 'contrib-guide-v2' (b9c5077 Azat Khuzhin) + o poll: Prevent libevent from spinning if POLLNVAL occurs (675974c Tim Hentenaar) + + Testing: + o test/regress: cover a polling of invalid fd (cb0df5c Tim Hentenaar) + + Code Improvements (bufferevent_openssl) + o Make it build using OpenSSL 1.1.0 (3e9e0a0 Kurt Roeckx) + o Don't call BIO_number_{read|written} on NULL BIOs. (6702da1 Adam Langley) + o Switch from a 512 to 2048-bit RSA key. (f9803a6 Adam Langley) + + Trivial fixes: + o Ignore temporary configure files (8fb08ae Azat Khuzhin) + o README.md: fix typo: ar -> are (2361616 Simone Basso) + o be: just a simple mistake, reinclude the (7521664 Seven) + +Changes in version 2.1.6-beta (4 July 2016) + + Libevent 2.1.6-beta contains mostly bug fixes (evbuffers, evthread, evdns, + bufferevents, core, http, samples), improvements but mostly to fix some + possible issues (EVHTTP_CON_LINGERING_CLOSE), a lot of new unit tests and new + appveyor integration. + + Security Fixes (utils) + o evutil_parse_sockaddr_port(): fix buffer overflow (329acc1 Azat Khuzhin) + + Security Fixes (evdns) + o evdns: name_parse(): fix remote stack overread (96f64a0 Azat Khuzhin) + o evdns: fix searching empty hostnames (ec65c42 Azat Khuzhin) + + New APIs (evdns) + o New function to get address for nameserver. (537177d Nick Mathewson) + + New APIs (bufferevents) + o expose bufferevent_incref/decref (with fewer modifications) (1ed6718 Mark Ellzey) + + New APIs (internal) + o evdns: export cancel via callbacks in util (like async lib core/extra issues) (8cbe65d Azat Khuzhin) + + New APIs/Improvements (http) + o http: take EVHTTP_CON_LINGERING_CLOSE into account for "Expect: 100-Continue" (ac448a7 Azat Khuzhin) + o http: lingering close (like nginx have) for entity-too-large (9fde518 Azat Khuzhin) + o http: read server response even after server closed the connection (680742e Azat Khuzhin) + o http: export evhttp_connection_set_family() (714fc70 Azat Khuzhin) + o http: reuse connected address only with EVHTTP_CON_REUSE_CONNECTED_ADDR (a50f5f0 Azat Khuzhin) + o http: use IP address that we got before (if any) during retrying (54c887d Azat Khuzhin) + + Bugfixes (core) + o Fix getaddrinfo under solaris (for multiprotocol case) (40730ae Azat Khuzhin) + o Check for Mac OS X 10.4 kqueue bug properly (df6f99e Mark Mentovai) + o event_reinit: make signals works after fork() without evsig_add() (88640aa Nicholas Marriott) + o event_reinit: always re-init signal's socketpair (ad0c237 Nicholas Marriott) + o Free event queues even for recursive finalizers (7c8d015 Azat Khuzhin) + o Fix checking for make_base_notifiable() (f337296 Azat Khuzhin) + o Set correct socklen for PF_INET6 sockaddr len (3499ad9 Mark Ellzey) + o Fix garbage value in socketpair util function, stdint? (043ae74 Mark Ellzey) + o fix the return value of event_deferred_cb_schedule_ (38cef64 Greg Hazel) + o event_free_debug_globals_locks(): disable lock debugging (e5c87d1 Azat Khuzhin) + o event: call event_disable_debug_mode() in libevent_global_shutdown() (941faae Azat Khuzhin) + o ht-internal: don't reset hth_table_length explicitly in name_##HT_CLEAR (597c7b2 Azat Khuzhin) + + Bugfixes (evthread) + o evthread: fix evthread_setup_global_lock_() for debug-lock with a real-lock case (e4556fc Azat Khuzhin) + o evthread: evthreadimpl_disable_lock_debugging_() for libevent_global_shutdown() (ccc5593 Azat Khuzhin) + + Bugfixes (evdns) + o evdns: avoid double-free in evdns_base_free() for probing requests (4db15e0 Azat Khuzhin) + o evdns: evdns_base_free(): fix UAF of evdns_base with @fail_requests (00313c5 Azat Khuzhin) + o evdns: evdns_base_free(): free requests before namservers (14f84bb Azat Khuzhin) + o evdns: fix randomize-case by make case-insensitive as required (9c238de Azat Khuzhin) + + Bugfixes (bufferevents) + o be_sock: handle readv() returns ECONNREFUSED (freebsd 9.2) (3189eb0 Azat Khuzhin) + o be_filter: avoid data stuck under active watermarks (b627ad8 Eduardo Panisset) + o Fix bufferevent_pair to properly set BEV_EVENT_{READING,WRITING} on flush. (2851889 David Paschich) + o be_openssl: clear all pending errors before SSL_*() calls (38e0f4a Azat Khuzhin) + o be_sock: cancel in-progress dns requests (86dfd2c Azat Khuzhin) + o be_sock: unfreeze buffers on fd changing (255525d Azat Khuzhin) + o be_sock: bufferevent_socket_connect_hostname(): make it thread-safe (809bb39 Azat Khuzhin) + o be_openssl: don't call do_write() directly from outbuf_cb (da52933 Azat Khuzhin) + o be_openssl: use bufferevent_enable() instead of bufferevent_add_event_() (0c66d32 Azat Khuzhin) + o be_openssl: don't add events during bev creation (like be_sock) (f4b6284 Azat Khuzhin) + o Fix lock leak in be_pair_flush() if flush type is BEV_NORMAL (f45d39d Bill Vaughan) + o be_openssl: don't use *_auto() in do_handshake() we can't have fd == -1 there (877280d Azat Khuzhin) + o be_openssl: don't call set_open_callbacks() if fd == -1 (e8a2da9 Azat Khuzhin) + o be_openssl: get rid off hackish "fd_is_set", to fix some corner cases (40b0379 Azat Khuzhin) + o be: we don't need to use getpeername() we we have conn_address (2c271e2 Azat Khuzhin) + o Call underlying bev ctrl SET_FD on filtered bufferevents (c2aa7dc Mark Ellzey) + o be_pair: release shared lock with the latest of bufferevent_pair (92a359e Azat Khuzhin) + + Bugfixes (http) + o [Issue #313] set method to ASCII "NULL" if evhttp_method() returns NULL (17cc636 Mark Ellzey) + o evhttp_have_expect(): fix -Wlogical-not-parentheses (24b5214 Azat Khuzhin) + o http: set fd to -1 unconditioally, to avoid leaking of DNS requests (7a4b472 Azat Khuzhin) + o http: avoid leaking of fd in evhttp_connection_free() (f0e1341 Azat Khuzhin) + o http: get fd from be layer during connection reset (4a53c54 Azat Khuzhin) + o http: fix EVHTTP_CON_READ_ON_WRITE_ERROR when it doesn't supported by OS (2ff164a Azat Khuzhin) + o http: do not do function calls under EVUTIL_ASSERT() to fix NDEBUG builds (7c89999 Azat Khuzhin) + o http: fix leaking of response_code_line (8f18a62 Azat Khuzhin) + o http: fix "Expect: 100-continue" client side (0b46b39 Azat Khuzhin) + o http: fix conflicts EVHTTP_CON_AUTOFREE and EVHTTP_CON_REUSE_CONNECTED_ADDR (4dc0979 Azat Khuzhin) + o http: avoid epoll_ctl() on already closed fd (triggers by http/chunk_out) (ab3bc69 Azat Khuzhin) + o http: install timeout for read too during connect for ssl (040000d Azat Khuzhin) + o http: fix evhttp_request_own() by checking EVHTTP_USER_OWNED in more cases (b0d3964 Azat Khuzhin) + o http: fix detecting EOF without write (7ed02ac Azat Khuzhin) + o evhttp: Fix failure to send all output data for POST/PUT requests (24eea0d John Ohl) + o Fix evhttp_uriencode() regression. (c6b1ec1 Mark Ellzey) + o removed unused vars (e94250c Mark Ellzey) + o pointer overflow checks for evhttp_uriencode (72afe4c Zonr Chang) + + Bugfixes (evbuffers) + o buffer: fix overflow check in evbuffer_expand_singlechain() (a3f4ccd Azat Khuzhin) + o buffer: evbuffer_add_buffer(): clean empty chains from destination buffer (26fd932 Azat Khuzhin) + o Fix n_add_for_cb in evbuffer_prepend() in case of new buffer required (0abd039 Azat Khuzhin) + o be_filter: actually disable output_filter during processing output (c031215 Simon Perreault) + o evbuffer_add: Use last_with_datap if set, not last. (a8769ef Marcus Sundberg) + o EVBUFFER_PTR_SET -> EVBUFFER_PTR_ADD (8674e4f jer-gentoo) + + Bugfixes (evconnlistener) + o listener: unlock lev on error in listener_read_cb() (2a71b33 Azat Khuzhin) + o Fix potential fd leak in listener_read_cb() (a695a72 Mark Ellzey) + + Testing + o tests: use waitpid(..., WNOWAIT) to fix failing of main/fork under solaris (43eb56c Azat Khuzhin) + o test: replace sleeping with syncing pair in main/fork (16d220c Azat Khuzhin) + o test/http: do not run tests that based on backlog filling (freebsd) (500b6b7 Azat Khuzhin) + o test/bufferevent/iocp: fix test name for "bufferevent_connect_fail_eventcb" (4410e9d Azat Khuzhin) + o test/ssl: use send()/recv()/EVUTIL_ERR_RW_RETRIABLE()/EVUTIL_SOCKET_ERROR() to fix win32 (a9e8cd6 Azat Khuzhin) + o test/https_basic: increase timeout for complete write (fixes win32) (d5a2f2f Azat Khuzhin) + o test: fix building with --disable-thread-support under win32 (a487706 Azat Khuzhin) + o test/buffer: evbuffer_add_buffer() with empty chains (a272bc4 Azat Khuzhin) + o test/buffer: evbuffer_remove_buffer() with empty chains (prepend) (f0cfa14 Azat Khuzhin) + o test/buffer: evbuffer_remove_buffer() with empty chains (evbuffer_add_buffer()) (2880ce6 Azat Khuzhin) + o test/buffer: cover evbuffer_expand() for overflow (48dab7a Azat Khuzhin) + o test/be_filter: creating test case for data stuck with active watermarks (766194b Eduardo Panisset) + o test/http: avoid using conditionals with omitted operands (fixes VS2015) (2a4bf29 Azat Khuzhin) + o test/http: don't mix declarations and code (fixes -Wdeclaration-after-statement) (aabf1c2 Azat Khuzhin) + o test/buffer: fix leak in test_evbuffer_prepend() (c08d90b Azat Khuzhin) + o test/buffer: avoid errors with --no-fork (reinitialize static vars) (e7d1e39 Azat Khuzhin) + o test/buffer: cover n_add_for_cb when evbuffer_prepend() need to allocate buffer (e77ff41 Azat Khuzhin) + o test/tinytest_macros: add new one tt_nstr_op() (bd19a28 Azat Khuzhin) + o test/bufferevent: check that output_filter disabled during processing output (ae28812 Azat Khuzhin) + o test/listener: regression for missing unlock in listener_read_cb() (7d85651 Azat Khuzhin) + o test/regress: add tests for evbuffer_add() breakage on empty last chain (d5ee739 Marcus Sundberg) + o test/http: fix running some tests sequential (with --no-fork) (bddad71 Azat Khuzhin) + o test/http: localize evhttp server structure (cbc3209 Azat Khuzhin) + o test/dns: regression for empty hostname (d7348ba Azat Khuzhin) + o test/http: fix SERVER_TIMEOUT tests under win32 (d49a658 Azat Khuzhin) + o test/http: add a helper for creating timedout/failed request (376f107 Azat Khuzhin) + o test/http: adopt for C90 (mixed code and declarations) (d02a285 Azat Khuzhin) + o test/http: cover NS timed out during request cancellations separatelly (0c343af Azat Khuzhin) + o test/http: request cancellation with resolving/{conn,write}-timeouts in progress (334340d Azat Khuzhin) + o test/http: exit from the loop in the errorcb to wait cancellation (927ab33 Azat Khuzhin) + o regress_clean_dnsserver(): reset global port vars (351207f Azat Khuzhin) + o test/http: read_on_write_error: fix it for win32 (3b58169 Azat Khuzhin) + o test/http: separate coverage for EVHTTP_CON_READ_ON_WRITE_ERROR (5c2b4c1 Azat Khuzhin) + o test/http: cover "Expect: 100-continue" client-server interaction (31d8116 Azat Khuzhin) + o test/http: *lingering tests shouldn't have "Expect: 100-continue" (ed469ab Azat Khuzhin) + o test: use EVUTIL_SHUT_WR (04fc82f Azat Khuzhin) + o test/http: avoid huge stack allocations to fix win32 builds (3166765 Azat Khuzhin) + o test: http/lingering_close: cover EVHTTP_SERVER_LINGERING_CLOSE (e122ca1 Azat Khuzhin) + o test: http/non_lingering_close: cover ~EVHTTP_SERVER_LINGERING_CLOSE (f41e1b0 Azat Khuzhin) + o test: http/*: update expected HTTP codes for body exceeds `max_body_size` (addf2b9 Azat Khuzhin) + o test: http/data_length_constrains: set EVHTTP_CON_READ_ON_WRITE_ERROR (d38a723 Azat Khuzhin) + o test: increase buffer size for http/data_length_constraints to trigger EPIPE (0792e1e Azat Khuzhin) + o test/tinytest_demo: include for win32 to fix tdm-gcc (f062bbe Azat Khuzhin) + o test/regress: cover event_del() waiting mechanism (5b58b70 Azat Khuzhin) + o test/regress: cover existing signal callbacks and fork() + event_reinit() (ceddc60 Azat Khuzhin) + o test/regress: cover signals after fork() + event_reinit() (b075b81 Azat Khuzhin) + o test/regress: main/fork: rewrite assertions by just removing event in callback (088d8b3 Azat Khuzhin) + o test/dns: check exit code of evdns_getaddrinfo() (0b9d432 Azat Khuzhin) + o test/dns: cover evdns_getaddrinfo() and evdns_base_free() with @fail_requests (4ad3483 Azat Khuzhin) + o test/dns: cover @fail_requests for evdns_base_free() (d6c6fb4 Azat Khuzhin) + o test/dns: more graceful coverage of @fail_requests (123d372 Azat Khuzhin) + o test/ssl: cover busy-loop (i.e. {read,write}-blocked-on-{write,read} stuff) (da0ea7a Azat Khuzhin) + o test/http: write_during_read for https (23c77b6 Azat Khuzhin) + o test/http: connection_fail for https (7ea26f7 Azat Khuzhin) + o test/http: stream_out for https (ac04968 Azat Khuzhin) + o test/http: chunk_out for https (a71ffb9 Azat Khuzhin) + o test/regress: fix ssl-less builds (need to make this prettier) (3160716 Azat Khuzhin) + o test/http: allow dirty shutdown for ssl to fix https_incomplete (1ede326 Azat Khuzhin) + o test/http: https basic (59714b4 Azat Khuzhin) + o test/http: incomplete{,_timeout} for https (615490d Azat Khuzhin) + o test/http: add simplest test for http/https/https_dirty_shutdown (93b19dc Azat Khuzhin) + o test/http: https: retry coverage (7c2d24a Azat Khuzhin) + o test/http: https server support (plus some helpers) (a7088ad Azat Khuzhin) + o test/http: more sanity checks (a27c53c Azat Khuzhin) + o test/ssl: export getkey()/getcert()/get_ssl_ctx()/init_ssl() for https (0c4c387 Azat Khuzhin) + o test/regress_be: basic coverage bufferevent_flush() for pair/sock layers (ad52602 Azat Khuzhin) + o test/regress_be: socket_filter_inactive: check bufferevent after creation (f8081af Azat Khuzhin) + o test/regress_be: cover finalizers from inactive to active queue (337684b Azat Khuzhin) + o test/regress_buffer: fix clang compilation warnings (d8fd4c0 Azat Khuzhin) + o test/regress_http: fix compilation warnings (-Wmissing-field-initializers) (cd422e0 Azat Khuzhin) + o test/regress_dns: fix compilation warnings (-Wmissing-field-initializers/for) (f55db98 Azat Khuzhin) + o tests/regress_dns: cover that randomize-case works case-insensitive (1e8bfbc Azat Khuzhin) + o test: fix bufferevent/bufferevent_pair_release_lock in debug mode (3f749e9 Azat Khuzhin) + o test: fix bufferevent/bufferevent_pair_release_lock for freebsd (79f9ace Azat Khuzhin) + o test/regress_be: bufferevent_enable() shouldn't call eventcb by it's own (a0f308d Azat Khuzhin) + o test/regress_be: introduce fake_listener_create() (37dc9e0 Azat Khuzhin) + o test/regress_http: cover evhttp_request_own() (6f6fa0d Azat Khuzhin) + o test/regress_http: cover write during read (3d15aeb Azat Khuzhin) + o test/regress_http: verify that closecb will be called without multiple write (4be6c70 Azat Khuzhin) + o test/regress: fix bufferevent_pair_release_lock with EVENT_DEBUG_MODE (6ea6655 Azat Khuzhin) + o test/regress_ssl: check events fd/pending after timeout triggered (cdafdf0 Azat Khuzhin) + o test/regress_ssl: cover case when server didn't up (failed with timeout) (74845f1 Azat Khuzhin) + o test/regress_ssl: covert that we can't change fd with underlying (df507af Azat Khuzhin) + o test/regress_ssl: cover that events (read/write) at finish not pending (762edb4 Azat Khuzhin) + o test/regress_ssl: cover fd manipulations (b78a829 Azat Khuzhin) + o test/regress_ssl: convert open_ssl_bufevs() to mask (46bba73 Azat Khuzhin) + o test/regress_ssl: convert client/server to mask too (3455991 Azat Khuzhin) + o test/regress_ssl: cover "allow_dirty_shutdown" (0430327 Azat Khuzhin) + o test/regress_ssl: convert regress_bufferevent_openssl() to bitmask (342e116 Azat Khuzhin) + o tests/regress_ssl: drop duplicated assert (25e56fd Azat Khuzhin) + o test/regress_http: initialize "dns_base" to avoid reading trash (9f0bff3 Azat Khuzhin) + o test/http: cover retrying with saved conn_address by shutting down dns server (f4874d8 Azat Khuzhin) + o be_pair/regress: cover use of shared lock (lock/unlock/free) (a558fcd Azat Khuzhin) + o regress_dns: drop hack for event_debug_map_HT_GROW in leak tests (3540a19 Azat Khuzhin) + + Sample code + o Fix memory leak in signal-test.c (666db91 basavesh.as) + o sample/hello-world: exAmple, not eXMple (2d3cd35 kirillDanshin) + o dns-example: allow to set ns from args (df19a97 Azat Khuzhin) + o dns-example: convert to getopt() (32f8592 Azat Khuzhin) + o http-connect: make it win32 compilable (1bf7595 Azat Khuzhin) + o sample/https-client: allow to change path to ca-certificates (fdf713a Azat Khuzhin) + o sample/https-client: check for ERR_remove_thread_state() existence (c4e9d9b Azat Khuzhin) + o sample/https-client: replace ERR_remove_state() by ERR_remove_thread_state() (77ad68a Azat Khuzhin) + o sample/https-client: add -timeout option (4637aa8 Azat Khuzhin) + o sample/https-client: don't try to free uninitialized SSL (f3d7ff5 Azat Khuzhin) + o sample/https-client: graceful exit with freeing memory (to make valgrind happy) (24a1f25 Azat Khuzhin) + o https-client: correctly handle URLs with no path (like "https://host:port") (29a0482 Andrey Skriabin) + o sample/http-connect: don't use assert() to make it work with NDEBUG (6dc71e7 Azat Khuzhin) + o sample/http-connect: made it compatible with C90 (f976d43 Azat Khuzhin) + o sample: add HTTP CONNECT tunnelling example using libevent http layer (1d34498 Azat Khuzhin) + o Update dns-example. (620ff24 Mark Ellzey) + + Documentation + o Update README.md (b8ec70c Mark Ellzey) + o Update README.md (80faee9 Mark Ellzey) + o Update README.md (ad4a897 Mark Ellzey) + o Update README.md (a2b2e1e Mark Ellzey) + o Update README.md (0dfa5dc Mark Ellzey) + + Code Improvements (evthread) + o evthread: add evthread_get_{lock,condition}_callbacks() helpers (c0b34f6 Azat Khuzhin) + + Code Improvements (core) + o util: make @sa const for evutil_socket_connect_() (a8d32c2 Azat Khuzhin) + + Code Improvements (http) + o http: assert's that evbuffer_drain() success on connection reset (2185e63 Azat Khuzhin) + o http: introduce evhttp_request_free_() helper (22061ac Azat Khuzhin) + o http: introduce evhttp_is_request_connection_close() helper (6540da3 Azat Khuzhin) + + Code Improvements (bufferevents) + o be_sock: bufferevent_socket_set_conn_address(): assert instead of silent no-op (0ab88c2 Azat Khuzhin) + o be_sock: sanity check in bufferevent_socket_set_conn_address() (eedbeff Azat Khuzhin) + o be: replace sockaddr_storage with sockaddr_in6 for conn_address (3889612 Azat Khuzhin) + o be: replace conn_address by full struct instead of pointer (e5615aa Azat Khuzhin) + o bufferevent: move conn_address out from http into bufferevent (8bb3842 Azat Khuzhin) + o be: make @sa const for bufferevent_socket_connect() (dc33c78 Azat Khuzhin) + + Cleanups (core) + o Refactoring conditional directives that break parts of statements. (4b41eeb lzmths) + o epoll: introduce PRINT_CHANGES() macro to avoid copy-pasting (a1b142b Azat Khuzhin) + o tab (6e7a580 Greg Hazel) + + Cleanups (evbuffers) + o buffer_compat: fix comment -- we have EVBUFFER_EOL_ANY not EOL_STYLE_ANY (575ff67 Azat Khuzhin) + + Cleanups (bufferevents) + o be_sock: evutil_getaddrinfo_async_() always return 0 (dbff101 Azat Khuzhin) + o be_sock: drop be_sock_add() macro (useless and debug unfriendly) (fad5fe2 Azat Khuzhin) + o be: introduce bufferevent_generic_adj_existing_timeouts_() (3c1f58f Azat Khuzhin) + o be: add_event: use evutil_timerisset() (a96b73b Azat Khuzhin) + o be_openssl: introduce be_openssl_auto_fd() helper (2a8a711 Azat Khuzhin) + o be_openssl: introduce set_open_callbacks_auto() (510da71 Azat Khuzhin) + + Cleanups (http) + o http: make fallback for EVHTTP_CON_READ_ON_WRITE_ERROR more cleaner (d405492 Azat Khuzhin) + o http: coding style issue (365f181 Azat Khuzhin) + + Cleanups (evdns) + o evnds: inline TEST_NAME macro to make debuggin easier (0c615f4 Azat Khuzhin) + + Portability Fixes + o [#372] check for errno.h (3031617 Mark Ellzey) + o Fixed Unicode issue in error messages. (e8b7895 Mattes D) + o Assume that ke_udata is an integer type on CloudABI. (5602e45 Ed Schouten) + o Add missing include of . (b2c68bc Ed Schouten) + o Include , and optionally. (c1404b5 Ed Schouten) + o Test against SO_REUSEADDR (along with _WIN32). (ce1776c Ed Schouten) + o Always define missing TAILQ functions from sys/queue.h (2828bdb Christopher Wiley) + o Don't use BSD u_* types. (fd36647 Ed Schouten) + o Remove BSD-ism: TIMEVAL_TO_TIMESPEC(). (193c7de Ed Schouten) + o be: include all variations of headers for sockaddr_in6 struct (c212291 Azat Khuzhin) + o be: fix sockaddr_in6 type definition for win32 (c42bc6b Azat Khuzhin) + + Continious Integration: + o travis: split long lines, and make it cleaner (685a6a1 Azat Khuzhin) + o travis: fix autotools on osx by reinstalling libtool (088ea5e Azat Khuzhin) + o appveyor/autotools: link with openssl by passing LDFLAGS/CFLAGS (6fcfa25 Azat Khuzhin) + o appveyor: image already had openssl installed (4634b85 Azat Khuzhin) + o appveyor: check -DUNICODE -D_UNICODE according to ReleaseChecklist (cmake only) (e9acc44 Azat Khuzhin) + o appveyor: ignore failure of mingw-get (1810857 Azat Khuzhin) + o appveyor: drop shallow_clone, since we use tags for detecting version in cmake (ac90133 Azat Khuzhin) + o appveyor: support cmake & autotools using build matrix (like travis-ci has) (8f95015 Azat Khuzhin) + o travis-ci/osx: relink gcc/g++ instead of clang (481481d Azat Khuzhin) + o travis-ci: enable multi-os mode (osx, linux) (79917e4 Azat Khuzhin) + o travis-ci: increase matrix (--disable-foo) (59649f7 Azat Khuzhin) + o travis-ci: adjust alignment (c8be339 Azat Khuzhin) + o travis: add builds without debug mode into matrix (3e56da2 Azat Khuzhin) + o test: run regress with EVENT_DEBUG_MODE=1 and without (cf2cf2a Azat Khuzhin) + o Update travis config for status updates (37453ab Mark Ellzey) + o Use autotools for appveyor until cmake is fixed. (1cc2e29 Mark Ellzey) + o Fix the link for appveyor OpenSSL installer (WIN32) (107d565 Mark Ellzey) + o Forgot to install OpenSSL for appveyor (26164a5 Joakim Söderberg) + o Add support for appveyor.com windows CI (5f89c37 Joakim Söderberg) + + Build Improvements/Fixes: + o evutil: mark ai_find_protocol() static (prototype-less) (5a157c8 Azat Khuzhin) + o cmake/solaris: set CMAKE_REQUIRED_LIBRARIES to fix functions detections (dc95823 Azat Khuzhin) + o cmake/solaris: fix building (link with socket,nsl) (050bfc7 Azat Khuzhin) + o cmake: check for ZLIB_INCLUDE_DIR, since we can have only library without headers (c4dfb93 Azat Khuzhin) + o autotools/win32: fix searching ssl library (671a24f Azat Khuzhin) + o cmake/win32: do not compile regress_thread on -DEVENT__DISABLE_THREAD_SUPPORT=ON (de0c196 Azat Khuzhin) + o cmake/win32: do not compile evthread_win32 on -DEVENT__DISABLE_THREAD_SUPPORT=ON (ecb0ec8 Azat Khuzhin) + o cmake: fix -DEVENT__ENABLE_VERBOSE_DEBUG (typo on -DUSE_DEBUG) (e35f224 Azat Khuzhin) + o cmake: do not use stderr for notifications/version-info (38716c6 Azat Khuzhin) + o autoconf: fix --disable-thread-support build under win32 (bb09535 Azat Khuzhin) + o buffer: don't mix code and declarations (8892f4c Azat Khuzhin) + o Update gitignore file to ignore cscope gen'ed files (0aaa4fb Neeraj Badlani) + o For non GCC/clang on OSX the -Wno-deprecated-declarations may not be valid (b5ca365 Rainer Keller) + o automake: define serial-tests only if automake have this option (61179de Azat Khuzhin) + o test/automake: don't use paralell test harness (since automake 1.12) (44d755e Azat Khuzhin) + o Ignore all pkgconfig generated stuff (ce38993 Azat Khuzhin) + o libevent_core and libevent_extra also deserve a pkgconfig file (b8d7c62 Jan Heylen) + o Ignore verify_tests.bat (win32 version) (0f2de10 Azat Khuzhin) + o cmake: require 3.1 only for win32 to make it work under ubunty precise (87f7238 Azat Khuzhin) + o cmake: require at least 3.1 for target_sources() (c46ead5 Azat Khuzhin) + o cmake: fix adding of compiler flags, and now it will (36588e1 Azat Khuzhin) + o Replace -Wswitch-enum with -Wswitch, and add it into cmake rules too (f29f59e Azat Khuzhin) + o test/regress_ssl: Fix compile problems for win32 (73d0360 Trond Norbye) + o util: fix "%zu" format on TDM-gcc/MinGW-w64 (79b69d8 Azat Khuzhin) + o cmake: don't define EVENT__NEED_DLLIMPORT always (fixes VS2013 static build) (49bd790 Azat Khuzhin) + o Add missing return statement to del_wait_thread so libevent can build. (4f778ab Nick Mathewson) + o cmake: fix building dns-example under win32 (missing getopt) (a1609a8 Azat Khuzhin) + o visibility: align it to make it more readable (bb6b53d Azat Khuzhin) + o cmake: Fix detection of ssize_t/SSIZE_T (7707f6b Azat Khuzhin) + o Ignore more configure stuff (configure.lineno) (8d34302 Azat Khuzhin) + o Fixed issue with cmake version generation (d56efd9 Mark Ellzey) + o Cmake is now officially working. (7f9646d Mark Ellzey) + o More cmake updates, lot's of missing definitions (49a5381 Mark Ellzey) + o CMake syntax fixes fo .in files (6aad23d Mark Ellzey) + o Revert "The Windows socket type is defined as SOCKET." (a264da8 Mark Ellzey) + o CMAKE CMAKE CMAKE CLEANUPS (a9db46a Mark Ellzey) + o Lot's of cmake updates (8b228e2 Mark Ellzey) + o Provide a mechanism for building the library on Windows with different compiler flags. Add a batch file that builds it for the M[DT][d] options and performs a hunt and gather of the different output libraries. (ded8086 billsegall) + o The Windows socket type is defined as SOCKET. (c9e6c3d billsegall) + o autotools: fix getservbyname() detection (959a4c2 Azat Khuzhin) + o Add missing for openssl_hostname_validation module (3316a21 Azat Khuzhin) + o make test/regress_ssl.c compile without warnings (9f02a44 Thomas Bernard) + o test/regress_be: drop debug __asm__(int3) to fix arm build (8240379 Azat Khuzhin) + o event_debug_created_threadable_ctx_: fix compilation without debug mode (a068f2e Azat Khuzhin) + o Add a prototype for event_disable_debug_mode() (bfcedee Sebastian Hahn) + o http: eliminate warning about "socklen" in evhttp_connection_connect_() (dfad1a4 Azat Khuzhin) + o Updated gitignore (1dbb55d Mark Ellzey) + o Update bench_httpclient.c (cb96931 Seungmo Koo) + o *fix: bench_httpclient to support win32 (4e9325e zeliard) + o Commented out a WIN32 threading / timing test for now (e84e269 Mark Ellzey) + o Fix mixed declarations and code (forbidden by ISO C90) (0c7f217 Thomas Bernard) + o Fix "function declaration isn’t a prototype" (746d2c5 Thomas Bernard) + o This fixes a bug introduced in 27bd9faf498b91923296cc91643e03ec4055c230 (19ba454 Joakim Söderberg) + o changed strtotimeval signature as per #211 (bdbc823 Xiao Bao Clark) + o Added cmake-generated files to ignore list. (6c12bfe Matyas Dolak) + o Ignore `make dist` generated files (8a2c6c7 Azat Khuzhin) + + Debugging + o Debug mode option to error on evthread init AFTER other event calls. (dcfb19a Mark Ellzey) + + + +Changes in version 2.1.5-beta (5 January 2015) + + Security Fixes (evbuffers) + o Avoid integer overflow bugs in evbuffer_add() and related functions. See CVE-2014-6272 advisory for more information. (d49bc0e88b81a5812116074dc007f1db0ca1eecd) + + New APIs (evconnlistener) + o Provide support for SO_REUSEPORT through LEV_OPT_REUSABLE_PORT (b625361 Maciej Soltysiak) + + Bugfixes (core) + o Fix use-after-free error in EV_CLOSURE_EVENT callback (3cc0eac John Ohl) + o Fix race caused by event_active (3c7d6fc vjpai) + + Bugfixes (evbuffer) + o Fix evbuffer_peek() with len==-1 and start_at non-NULL. (ba59923) + o Consistently check for failure from evbuffer_pullup() (60f8f72) + o Fix evbuffer_peek() with len==-1 and start_at non-NULL. (fb7e76a) + + Bugfixes (windows, IOCP) + o be async: avoid double close() (f133b86 Azat Khuzhin) + + Bugfixes (bufferevents) + o Fix issue #127, double free for filterevents that use BEV_OPT_CLOSE_ON_FREE (2c82aa0 John Ohl) + o make bufferevent_getwatermark api more robust (a21e510 ufo2243) + o [Bugfix] fix bufferevent setwatermark suspend_read (b34e4ac ufo2243) + o bufferevent_openssl: reset fd_is_set when setfd with -1 is called (3da84c2 Azat Khuzhin) + o Fix compilation for older OpenSSL versions. (5c7282f Joakim Soderberg) + + New APIs (evhttp) + o Add evhttp_connection_set_family() to set addrinfo->family for DNS requests (12c29b0 Azat Khuzhin) + o Implement interface that provides the ability to have an outbound evhttp_connection free itself once all requests have completed (2b9ec4c,10fe4f John Ohl) + + New APIs (core) + o Implement new/free for struct evutil_monotonic_timer and export monotonic time functions (f2645f8 Andrea Shepard) + + Bugfixes (evdns) + o Load hosts file on Windows. (a0b247c Vilmos Nebehaj) + o Don't truncate hosts file path on Windows. (d0dc861 Vilmos Nebehaj) + o Fix a crash in evdns related to shutting down evdns (9f39c88,e8fe749) + o evdns: avoid read-after-free in evdns_request_timeout_callback() (61262a0 Azat Khuzhin) + o Correctly handle allocation failures in evdns_getaddrinfo (6a53d15) + o evdns: fix EVDNS_BASE_DISABLE_WHEN_INACTIVE in case retransmit/retry (74d0eee Azat Khuzhin) + o evdns: add retry/reissue tests for EVDNS_BASE_DISABLE_WHEN_INACTIVE (3ca9d43 Azat Khuzhin) + o evdns: fail ns after we are failing/retrasmitting request (97c750d Azat Khuzhin) + + Bugfixes (evhttp) + o http: reset connection before installing retry timer (fix http retries handling) (bc79cc5 Azat Khuzhin) + + + Testing + o regress_dns: fix leaks in getaddrinfo_async{,_cancel_stress} tests (2fdc5f2 Azat Khuzhin) + o test: add family argument for http_connection_test_() (177b8a7 Azat Khuzhin) + o test: add regress for evhttp_connection_set_family() with AF_INET and AF_UNSPEC (42aefeb Azat Khuzhin) + o test/http: add regress test for set family to AF_INET6 (3fbf3cc Azat Khuzhin) + o Update to a more recent tinytest_macros. (8da5a18) + o test/regress: add simplestsignal: to track reorder bugs separately (b897bef Azat Khuzhin) + o test/evbuffer_peek: add regress in case we have first buffer greater (e2d139d Azat Khuzhin) + o More evbuffer_peek() test cases (154006a) + o use correct tt macro for pointer compare (08c88ea) + o regress_buffer: fix 'memcmp' compare size (79800df Maks Naumov) + o Fix a use-after-free in unit tests. CID 752027 (3739057) + o Fix a dead-code warning in unit tests. CID 1193548 (c119f24) + o Use evutil_weakrand() in unit tests. (a677b72, 364c110) + o Use a more precise calculation for max in time-ratelim.c (ca5b5c7) + o Make a buffer larger in the tests to avoid a scary evbuffer_copyout_from() (fb57b8b) + o Fix several memory leaks in the unit tests. (89c1a3b) + o Add test for evhttp_connection_free_on_completion (b0e9924 John Ohl) + o Fix annoying heisenbug in test-time.c (cb73704) + + Sample code + o Make http-server.c output into good html5 (6d72bdc) + o Use FindClose for handle from FindFirstFile in http-server.c (6466e88) + o https-client: add -retries argument, for connection retries (d9da844 Azat Khuzhin) + + Bugfixes (build) + o Add missing headerfile for cmake (15d90cc Trond Norbye) + o ignore one more test binary (b6593aa Michael Richardson) + o ignore config.cache/test-driver files (c83f333 Mike Frysinger) + o add a --disable-samples configure flag (0c492b3 Mike Frysinger) + o Add a few files created by "make verify" to .gitignore. (1a8295a Pierre Phaneuf) + o updates in cmake build (27bd9fa Sergey Nikulov) + o Fix cmake error when the Module path has more than one entry. (befbd13 Acer Yang) + o Fix CMake shared library build (e69d910 Nobuaki Sukegawa) + o Fix warnings when compiling with clang 3.5 (f5b4765 John Ohl) + o Fix mixed declarations and code (forbidden by ISO C90) (8afbdbc Thomas Bernard) + + Bugfixes (miscellaneous) + o tree.h: drop duplicated content of tree.h (6193187 Azat Khuzhin) + o evdns: disable probing with EVDNS_BASE_DISABLE_WHEN_INACTIVE (610410b,ad0493e,fea86a6,d83b337,5ca9e97 Azat Khuzhin) + o [Bugfix] fix grammer error (3a4d249 ufo2243) + o Change return type of evutil_load_windows_system_library_ to HMODULE (f691389) + o Fix a c90 warning (76643dd) + o Fix a typo in a doxygen comment. Reported by 亦得. (be1aeff) + o remove trailing comma from enum (b361b8a Jean-Philippe Ouellet) + + Bugfixes (FreeBSD) + o Handle ENOTCAPABLE from FreeBSD - this is returned if an event in the changelist is for an FD that has been closed. (6fd7394 Adrian Chadd) + + + +Changes in version 2.1.4-alpha (21 Mar 2014) + + Libevent 2.1.4-alpha adds a number of new miscellaneous APIs to make + Libevent more useful, including support for early close detection with + epoll via EPOLLRDHUP, triggering bufferevent callbacks, adding more + evhttp callbacks, and more. There are also numerous bugfixes, including + a number for finalize-related issues from 2.1.3-alpha; and an + alternative (non-primary!) cmake-based build mechanism. + + New APIs (core) + o Added event_base_get_num_events() (0fa107d Mobai Zhang) + o New event_base_active_by_fd API (865a142 Greg Hazel, 5c9da9a, 87fa2b0) + o Add event_base_active_by_signal by analogy (4865943) + o Add access to max event count stats (5173bef, efbd3dc, 26230a2 + Andrew Sweeney) + o Implemented EV_CLOSED event for epoll backend + (EPOLLRDHUP). (b1b69ac Diego Giagio, 53d2793, 43ffcf6, dfe1e52 + Marcin Juszkiewicz, ff26633 Joakim Soderberg, 3908a5e) + + New APIs (evutil_secure_rng) + o Add evutil_secure_rng_set_urandom_device_file (2bbb5d7) + + New APIs (bufferevents) + o Add function to fetch underlying ratelimit cfg (4b3d5af Mark Ellzey) + o Pass and return const for bufferevent_get_token_bucket_cfg (1c77fbb + Mark Ellzey) + o Add watermark introspection (4ce242b Ondřej Kuzník) + o Add an option to trigger bufferevent I/O callbacks (61ee18b Ondřej Kuzník) + o Add an option to trigger bufferevent event callbacks (a7384c7 + Ondřej Kuzník) + o Clarifications in response to merge req. comments (bd41947 Ondřej + Kuzník) + o Minor optimizations on bufferevent_trigger options (a3172a4) + + New APIs (evhttp) + o Add evhttp_connection_get_server(). (a7f82a3 Maxime Henrion) + o add a http default content type option (5a5acd9 Nicolas Martyanoff) + o http: implement new evhttp_connection_get_addr() api. (0c7f040 Azat + Khuzhin) + o Add a variant of evhttp_send_reply_chunk() with a callback on + evhttp_write_buffer() (8d8decf Julien BLACHE) + o Allow registering callback for parsing HTTP headers (b0bd7fe Balint Reczey) + o Provide on request complete callback facility (b083ca0 Andrew Sweeney) + o evhttp_request_set_on_complete_cb to be more specific about what + the function actually does and usage (da86dda Andrew Sweeney) + o Update unit test to make sure that the callback happens after the + output data is written (b85f398 Andrew Sweeney) + + Features (evdns) + o bug fix for issues #293 evdns_base_load_hosts doesn't remove + outdated addresses (954d2f9, f03d353, 45eba6f Kuldeep Gupta) + + Features: (cmake build support) + o Initial CMake commit. (e415196 Joakim Soderberg) + o Add all tests and benchmarks to CMake project. (e9fc014 Joakim Soderberg) + o More work on adding tests to CMake project (99c1dc3 Joakim Soderberg) + o Generate a dummy evconfig-private.h so things build + properly. (ce14def Joakim Soderberg) + o Link libm on unix platforms. (58fcd42 Joakim Soderberg) + o Added some GCC specific options. (19222e5 Joakim Soderberg) + o Use evutil_closesocket instead. (dbf2b51 Joakim Soderberg) + o Add copyright and licensing files for CMake modules. (c259d53 + Joakim Soderberg) + o Only include WIN32 getopt where it is used. (9bbce0b Joakim Soderberg) + o Fix bench_cascade program on Windows. (78da644 Joakim Soderberg) + o Don't segfault on no found event backend. (8f2af50 Joakim Soderberg) + o Only test the event backends available on the system. (7ea4159 + Joakim Soderberg) + o Added a "make verify" target. (e053c4f Joakim Soderberg) + o Fix the make "verify" target on Windows. (67e5d74 Joakim Soderberg) + o Get rid of deprecation warnings for OpenSSL on OSX 10.7+ (69c3516 + Joakim Söderberg) + o Fix kqueue support. (a831f2f Joakim Söderberg) + o Added a test for testing if kqueue works with pipes. (2799b35 + Joakim Söderberg) + o Change the BSD license from 4 to 3-clause. (86df3ed Joakim Soderberg) + o Minimum required python version is 2.4. (968e97b Joakim Soderberg) + o Get rid of unknown pragma warnings. (0ef1d04 Joakim Soderberg) + o Add a "make verify_coverage" target generation coverage + info. (f2483f8 Joakim Soderberg) + o Fix the "make verify" target on NetBSD (4ac086a Joakim Soderberg) + o Only look for ZLib when it is used (if tests are + included). (f780593 Joakim Soderberg) + o Added EVENT__ENABLE_GCC_WARNINGS, turns all warnings into + errors. (dd413bd Joakim Soderberg) + o Add CMake config and install targets. (f3446ed Joakim Soderberg) + o Fix typo (4b754df Joakim Soderberg) + o Some work on making it possible to simply do add_subdirectory() on + the project. (49ab363 Joakim Soderberg) + o Set USE_DEBUG=1 on EVENT__ENABLE_VERBOSE_DEBUG (fd42e70 Joakim Soderberg) + o Fix so that old nmake project still builds. (24d6466 Joakim + Soderberg) + o Rename README to README.md and use markdown to format. (d2bc39a + Joakim Soderberg) + o Update README with CMake build instructions. (604b8cc Joakim Soderberg) + o Clean up the README some. (8d4cb35 JoakimSoderberg) + o Forgotten headers for old nmake project compatability. (8697b99 + Joakim Soderberg) + o Change all uses of WIN32 to _WIN32 (4e14395 Joakim Söderberg) + o Fix include bug. (2024467 Joakim Söderberg) + o Check if we're on OSX before disabling deprecation in le-proxy + (8b40a5b Joakim Söderberg) + o Fix broken autotools build. (ae1bd82 Joakim Söderberg) + o Disclaimerize cmake a little in the README (d03b5bf) + o Fix CMake compile when OpenSSL is disabled. (e423d42 Joakim + Söderberg) + o CMake: Get rid of python not found warning when regress tests + turned off. (d38d798 Joakim Söderberg) + o Fix https-client compilation on Windows. (d7be788 Joakim Soderberg) + o Guard against EVENT_NOWIN32 being set during testing. (f1715b4 + Joakim Soderberg) + o Check for OSX when checking for clang. (e212c54 Joakim Soderberg) + o Added a Travis-CI configuration file. (8c0f0a9 Joakim Soderberg) + o Added -Qunused-arguments for clang on macosx (ed99d92 Trond Norbye) + o Rename event_extras to event_extra (a0dd5df Trond Norbye) + o Add option to build shared library (4545fa9 Trond Norbye) + o Add -Qunused-arguments for clang on macos (b56611d Trond Norbye) + o Add cmake-related files to .gitignore (e061321 Trond Norbye) + o Export event_extra not event_extras. (2b41bcf Joakim Söderberg) + + Bugfixes (core) + o If evsel->del() fails, don't leave the evmap in an inconsistent + state (9b5a527 Maxime Henrion) + o Move event_debug_note_teardown_ before mm_free. (69b5c64) + o Check CLOCK_MONOTONIC_* at runtime if needed. (911abf3) + o Fix reinit of fds with EV_WRITE but not EV_READ. (ebfd8a8 maksqwe) + o Tweaked callbacks to prevent race condition + (https://github.com/libevent/libevent/issues/104) (40830f1, 2ea15ed + John Ohl) + o Move assert(ev) to before we use ev in EV_CLOSURE_EVENT_FINALIZE + case (9805972) + + Bugfixes (evhttp) + o Fix a double close() bug in evhttp when the underlying bufferevent uses + BEV_OPT_CLOSE_ON_FREE. (31db8a0 Maxime Henrion) + o Fix an unlikely but possible error case for http connections (f22049e) + o Avoid racy bufferevent activation (5eb1788 Nate Rosenblum) + + Bugfixes on 2.0 (Windows) + o Use windows vsnprintf fixup logic on all windows environments (e826f19) + o libevent/win32_dealloc() : fix sizeof(pointer) vs sizeof(*pointer) + (b8f5980 Frank Denis) + + Bugfixes (evutil_secure_rng) + o When we seed from /proc/sys/kernel/random/uuid, count it as success + (e35b540) + o We should return after arc4random_buf() (1ea1f26 Makoto Kato) + o Avoid other RNG initialization FS reads when urandom file is + specified (9695e9c) + o Really remove RNG seeds from the stack (f5ced88) + o Fix another arc4random_buf-related warning (e64a2b0) + + Bugfixes (bufferevents) + o Initialize async bufferevent timeout CBs unconditionally (af9b2a7) + + Bugfixes (evdns) + o Checking request nameserver for NULL, before using it. (5c710c0 + Belobrov Andrey) + o Fix SEGFAULT after evdns_base_resume if no nameservers + installed. (14971a8 Azat Khuzhin) + o Actually use the log facility for reporting evdns problems. (e1766a1) + o Fix SEGFAULT after evdns_base_resume if no nameservers + installed. (f8d7df8 Azat Khuzhin) + o fix for ServFail from RIPE Atlas release (62f596b Antony Antony) + + Bugfixes (compilation) + o Fix test compilation with nmake: add the gdi.lib dependency (5ba8ab7) + o Whoops. It is gdi.lib, not gdi32.lib. (github issue #61) (8ab612e) + o Don't use return since return type is void and build error occurs + using clang (838161d Makoto Kato) + o Use void casts to suppress some "unchecked return value" warns (7080d55) + o rpcgen: Generate regress.gen.[c,h] in build rather than src dir + (243386c Ross Lagerwall) + o Fix a compiler warning when checking for arc4random_buf linker + breakage. (5cb3865) + o Fix 'make distcheck' by adding regress.gen.[ch] to DISTCLEANFILES + (239d834) + + o Fix a c90 warning (c207682) + o Fix consts in WIN32-Code/getopt*.[ch] (57abb35) + + Bugfixes (locks, synchronization) + o Missed lock acquire/release in event_base_cancel_single_callback_() + (d3d999a Azat Khuzhin) + o Fix locking in bufferevent_get_options_(). (dbc9cd4 Maxime Henrion) + + Bugfixes (leaks) + o Avoid leaking segment mappings when offset is not a page multiple (d409514) + + Testing + o Add tests for evdns_base_resume(). (1cd9ff5 Azat Khuzhin) + o Fix dns/leak_resume_send_err test. (7e876df Azat Khuzhin) + o Add checks for evhttp_connection_get_server() in unit + tests. (fbc323b Maxime Henrion) + o Fix a (failure-only) null dereference in the unit tests (1104d0b) + o Fix a logic error in test_evbuffer_freeze (7765884) + o Add missing check to test_evbuffer_file_segment_add_cleanup_cb (eba4506) + o Fix some crash-on-fail cases in DNS regression tests (87cd6f0) + o DNS tests: add a missing check (f314900) + o Finalize tests: add a missing check (82b6956) + o test_evutil_rtrim: add another missing check. (e193c95) + o regress_main: logging all if env EVENT_DEBUG_LOGGING_ALL isset + (611e28b Azat Khuzhin) + o regress_http: add tests for evhttp_connection_get_addr() (4dd500c + Azat Khuzhin) + o Update to the latest version of tinytest (7a80476) + o Heap-allocate zlib data structure in regress_zlib tests (4947c18) + + Performance tweaks (core) + o Avoid redundant syscall to make a nonblocking socket nonblocking + (42c03da Maxime Henrion) + o Avoid redundant syscall if making a socket cloexec twice (1f29b18) + o Avoid redundant invocations of init_extension_functions for IOCP (3b77d62) + + Documentation + o Document that arc4random is not a great cryptographic PRNG. (6e49696) + o Small doxygen tweaks (6e67b51) + o Try another doxygen tweak (ccf432b) + o Clarify event_base_loop exit conditions (031a803) + o Fix a typo (be7bf2c Ondřej Kuzník) + o Document deferred eventcb behaviour (13a9a02 Ondřej Kuzník) + o Typo fixes from Linus Nordberg (cec62cb, 8cd695b) + o Fix duplicate paragraph in evbuffer_ptr documentation (58408ee) + + Code Improvements (coverity) + o Fix a pile of coverity warnings in the unit tests (867f401) + o Fix coverity warnings in benchmark tools. (ff7f739) + o Whoops; fix compilation in bench.c (544cf88) + o Remove spurious checks in evrpc.c error cases (coverity) (991b362) + o Fix a couple of compilation warnings in regress_http.c (860767e) + o Fix even more coverity warnings. (d240328) + o Stop checking for inet_aton; we don't use it. (f665d5c) + o Add an include to evrpc-internal to fix openbsd compilation warning + (5e161c6) + + Cleanups + o Remove an unreachable return statement in minheap-internal.h (e639a9e) + o Refactor evmap_{io,signal}_active_() to tolerate bad inputs (974c60e) + o Fix needless bufferevent includes in evdns.c (254c04e) + o Fix a couple of "#ifdef WIN32" instances (88ecda3) + o Remove unneeded declaration in bufferevent-internal.h (4c8ebcd) + + Sample code + o le-proxy: Fail more gracefully if opening listener fails (44b2491) + o http-server: drop uri_root from base_url in http-server. (6171e1c Azat Khuzhin) + o https-client: POST supported, args supported (c5887f7 Alexey Ozeritsky) + o https-client: code cleanup (29af65e Alexey Ozeritsky) + o https-client: Small tweaks to https-client.c (90786eb) + o https-client: Set hostname for SNI extension (by f69m) (d1976f8) + o https-client: add a cast to https-client.c (462e6b6) + + + +Changes in version 2.1.3-alpha (1 May 2013) + + Libevent 2.1.3-alpha fixes various bugs, adds new unit tests, and cleans + up the code in a couple of places. It has a new callback in evhttp for + reporting errors during a request, a new feature for allowing evdns to + not keep the event_base looping when there are no requests inflight, and + example code for writing an https client. + + Libevent 2.1.3-alpha also has an important new (experimental) event + finalization feature to allow safe event teardown in multithreaded + programs. This ought to fix the longstanding bug with deadlocks in + multithreaded use of SSL-based bufferevents that some people have been + experiencing since Libevent 2.0. + + + Core (event finalization) + o Implement event_finalize() and related functions to avoid certain + deadlocks (8eedeab) + o Use finalization feature so bufferevents can avoid deadlocks (02fbf68) + o Always run pending finalizers when event_base_free() is called (e9ebef8) + o Remove bufferevent_del_generic_timeout_cbs as now unused (4ea4c6a) + o More documentation for finalization feature (a800b91) + o Make the event_finalize* functions return an error code (5d11f4f) + o Mark the finalize stuff as experiemental in case it needs to + change (23e2e29) + + Evdns + o evdns: New flag to make evdns not prevent the event loop from + exiting (6b7fa62 Azat Khuzhin) + + Bugfixes (Core) + o Make event_remove_timer behave correctly with persistent timers (5623e80) + o Unit test for event_remove_timer with EV_PERSIST. (96150dd) + o Double-check next timeout when adding events (9443868 Nate Rosenblum) + o event_base_update_cache_time should be a no-op if the loop isn't + running (5e6fa2a) + + Bugfixes (evhttp, crash fix, from 2.0) + o fix #73 and fix http_connection_fail_test to catch it (b618204 Greg Hazel) + + Bugfixes (compilation and portability, from 2.0) + o Fix compilation with WIN32_HAVE_CONDITION_VARIABLES enabled (7e45739) + o Fix missing AC_PROG_SED on older Autoconfs (9ab2b3f Tay Ray Chuan) + o Backport libevent to vanilla Autoconf 2.59 (as used in RHEL5) + (74d4c44 Kevin Bowling) + o Use AC_CONFIG_HEADERS in place of AM_CONFIG_HEADERS for autmake + 1.13 compat (817ea36) + o Rename configure.in to configure.ac to appease newer autoconfs (0c79787) + o Avoid using top_srcdir in TESTS: new automakes do not like this (a55514e) + + Bugfixes (resource leaks/lock errors on error, from 2.0) + o Avoid leaking fds on evconnlistener with no callback set (69db261) + o Avoid double-close on getsockname error in evutil_ersatz_socketpair + (0a822a6) + o Fix a locking error in bufferevent_socket_get_dns_error. (0a5eb2e) + + Documentation Fixes (from 2.0) + o Fix a mistake in evbuffer_remove() arguments in example http server code + (c322c20 Gyepi Sam) + o Fix a typo in a comment in buffer.h. Spotted by Alt_F4 (773b0a5) + + Documentation Fixes + o minor documentation typos (809586a Patrick Pelletier) + o Fix cut-and-paste err in whatsnew-2.1 (49905ac) + o Fix comment to refer to sample/include.am correctly (9e8cdf3 Sebastian + Hahn) + o Fix typo : Dispatching instead of Dispaching (0c2bacc Volker Lendecke) + o fix some hinky indentation in evhttp_make_request (80e220e Patrick + Pelletier) + o "buffer" spelling (a452811 Patrick Pelletier) + o Specify return behavior in header for evbuffer_pullup() in corner case + (cf8d1cd Dan Petro) + o Clarify an important point about event_base_foreach_event() (920a5e6) + + Compilation Fixes/Tool Support + o avoid valgrind false positive by zeroing epoll_event (1258614 Patrick + Pelletier) + o Fix harmless clang enum warning (b452a43 Sebastian Hahn) + o remove all exes on "make clean", not just regress.exe (974bfa0 Patrick + Pelletier) + o Make --disable-libevent-regress work again (787fd74) + o Do not build strlcpy.c when it will have no code. (4914620) + + Portability Fixes + o When EWOULDBLOCK is not EAGAIN, treat it as equivalent to it (bf7a0ff) + o Preliminary changes for Minix3. (0dda56a Nicholas Heath) + o Use AC_CONFIG_HEADERS in place of AM_CONFIG_HEADERS for autmake 1.13 + compat (bf278b) + o Avoid using $(top_srcdir) in TESTS. (2863c83) + o build test/test-script.sh on systems with a less-featureful $< (f935e21) + o Implement EVUTIL_ERR_IS_EAGAIN on windows. (42aaf4d) + + Evhttp changes: + o Fix ipv6 support for http. When URL contain domain, not IP + address. (71e709c Azat Khuzhin) + o uri decode: fix for warning "use of uninitialised value" (64b6ece Azat + Khuzhin) + o uri decode: changed the test for the existence of the next character + (e1903e3 Azat Khuzhin) + o Move prototype of evhttp_decode_uri_internal() to http-internal.h + (de8101a Azat Khuzhin) + o Test: decoding just part of string with evhttp_decode_uri_internal() + (1367653 Azat Khuzhin) + o Add new error_cb for actual reporting of HTTP request errors. (7b07719 + Azat Khuzhin) + o Add test for EVREQ_HTTP_REQUEST_CANCEL into http_cancel_test() (862c217 + Azat Khuzhin) + o Drop extra header http_struct.h from regress_http.c (54cc800 Azat Khuzhin) + + Testing + o Add regress test ipv6_for_domain. (9ec88bd Azat Khuzhin) + o Add an environment variable (EVENT_DEBUG_MODE) to run unit tests in debug + mode (2fad0f3) + o Add a test with an active_later event at event_base_free time. (1c3147f) + o Make all tests pass under EVENT_DEBUG_MODE=1 (b1b054f) + o Add some verbose notes to bufferevent unit tests (9d893c9) + o New test for active_later->active transition on event_active (a153874) + o New tests for event_base_foreach_event() (0b096ef) + o Unit tests for event_base_gettimeofday_cached() and + event_base_update_cache_time() (30ea291) + o A test for event_get_assignment() (f09629e) + o More unit tests for initializing common timeouts. (d596739) + o Fix a bug in the new main/event_foreach test (702c9aa) + + Windows: + o use FormatMessage for winsock errors (0c6ec5d, 2078e9b, 4ccdd53, c9ad3af + Patrick Pelletier) + o a program to print out the error strings for winsock errors (7296512 + Patrick Pelletier) + o Fix a warning introduced in 0c6ec5d8 (eeb700c) + o Fix another warning introduced in 0c6ec5d8 (ed26561) + + Examples (http) + o Add sample/https-client.c, an example of stacking evhttp as a client on + top of bufferevent_ssl. (be46c99 Catalin Patulea) + o use ${OPENSSL_LIBS} instead of -lssl -lcrypto (bf31fa5 Patrick Pelletier) + o https-client was putting newlines at 256-byte boundaries (42d7441 Patrick + Pelletier) + o better handling of OpenSSL errors (5754d96 Patrick Pelletier) + o use Debian's default root certificate location (aacd674 Patrick Pelletier) + o use iSECPartners code to validate hostname in certificate (64d9f16 + Patrick Pelletier) + o avoid sign mismatch warning in openssl_hostname_validation.c (6021cb5 + Patrick Pelletier) + o pull in wildcard matching code from cURL (4db9da6 Patrick Pelletier) + o Another tweak to https-client.c (95acdaa) + o Remove http_struct.h usage in sample/https-client.c (8a90a85) + + + +Changes in version 2.1.2-alpha (18 Nov 2012) + + Libevent 2.1.2-alpha includes more portable for monotonic timers, + refactors much of Libevent's internal and external infrastructure, + closes some longstanding gaps in the interface, makde other + improvements. Ths log below tries to organize features by rough area of + effect. It omits a few commits which were pure bugfixes on other commits + listed below. For more detail, see the git changelogs. For more + insight, see the "whatsnew-2.1.txt" document included in the Libevent + 2.1.2-alpha distribution. + + Libevent 2.1.2-alpha also includes all changes made in 2.0.19-stable + through 2.0.21-stable inclusive. + + Performance (core): + o Replace pipe-based notification with EVFILT_USER where possible. This + should make multithreaded programs on OSX and *BSD alert the main thread a + little faster. (53a07fe) + o Make th_base_lock nonrecursive. (9cd5acb) + + New/Changed API Functions: + o New event_get_priority() function to return an event's priority (f90e255) + o Add a bufferevent_get_priority() function (bd39554) + o Add an event_base_loopcontinue() to tell Libevent to rescan for more + events right away (7d6aa5e) + o Add a new callback to get called on evbuffer_file_segment free + (e9f8feb yangacer, 64051b9) + o Expose event_base_foreach_event() as a public API. (84fd6d7 Roman + Puls, 232055e, ffe1643) + o Add an event_remove_timer() to remove timer on an event without + deleting it (e3b2e08) + o Make bufferevent_set_timeouts(bev, NULL, NULL) have plausible + semantics (9dee36b) + o Rename event_enable_lock_debuging() to ..._debugging(). (The old name + should still work.) (07e132e) + o Add missing implementation for event_enable_debug_logging (3b3e21d) + + PORTABLE MONOTONIC TIMERS: + + Libevent 2.1.2 includes internal support for monotonic timers on + (nearly) all supported platforms, including Windows, and OSX. Libevent + applications should now be more resilient to jumps forwards or backwards + in the system clock. Also, on Linux systems with epoll, we now + optionally support microsecond-level timeouts (whereas epoll only + supports millisecond-precision timeouts). + + o Use mach_absolute_time() for monotonic clock support on OSX. (b8fd6f9) + o Do not track use_monotonic field when is no monotonic clock (cb653a0) + o EVENT_BASE_FLAG_PRECISE_TIMER indicates we want fine timer precision + (ddd69d3) + o On Linux, use CLOCK_MONOTONIC_COARSE by default (55780a7) + o Implement a GetTickCount-based monotonic timer for Windows (d5e1d5a) + o Refactor monotonic timer handling into a new type and set of + functions; add a gettimeofday-based ratcheting implementation (f5e4eb0) + o Add EVENT_PRECISE_TIMER environment var for selecting precise-but-slow + timer (a2598ec) + o Implement fast/precise monotonic clocks on Windows (2c47045) + o Simple unit tests for monotonic timers (630f077) + o Improve the monotonic-time unit test: make it check the step size (7428c78) + o When PRECISE_TIMERS is set with epoll, use timerfd for microsecond + precision (26c7582) + o Split out time-related evutil functions into a new evutil_time.c (c419485) + o Split out time-related prototypes into time-internal.h (71bca50) + o Add evutil_time.obj to Makefile.nmake (0ba0683) + o Avoid giving a spurious warning when timerfd support is unavailable + (1aaf9f0 Dave Hart) + o Make test_evutil_monotonic a little more tolerant (def3b83) + o Avoid unused-var warning on systems with clock_gettime but without + CLOCK_MONOTONIC_COARSE (9be5468) + +EVENT_BASE_ONCE LEAKS: + If a callback added by event_base_once() is never invoked, Libevent no + longer leaks internal memory. + + o Free dangling event_once objects on event_base_free() (c17dd59) + o Add a unit test in which an event is created with event_base_once() + but never fires (4343edf) + +TESTING SUPPORT, FIXES AND IMPROVEMENTS: + + Libevent now disables by default its unit tests that would touch the + network, or that tend to fail on heavily-loaded systems. To re-enable + them, invoke the ./test/regress program with the @all alias. + + o Simplify test.sh code significantly. (9b856fd Ross Lagerwall) + o Make all tests that hit the network disabled by default (f2cea87) + o Avoid a resource leak on error in http client benchmark (ea92fba) + o Update to latest tinytest (911b4f0349377) (ef7c4f7) + o Avoid (unlikely) overflow in bench_httpclient.c (5671033) + o Shave 700 msec off the persistent_timeout_jump test (21205b8) + o Check return value of write() in regress.c (c8009d2) + o Make load-dependent monotonic timer tests off-by-default (2b6fe8b) + o Add deferred_cb_skew to list of timing-dependent tests (34c8f31) + o Avoid test -e; older shs don't have one. (f1bd938) + o Fix renegotiation test to work around openssl 1.0.1 bug (c2f3086) + o Fix a couple of compile warnings in the unit tests (5a9a014) + +MISC: + o Change evutil_weakrand_() to avoid platform random() (e86af4b Nicholas + Marriott, 3aa4415) + +INFRASTRUCTURE (Active-later events): + As a simplification and optimization to Libevent's "deferred callback" + logic (introduced in 2.0 to avoid callback recursion), Libevent now + treats all of its deferrable callback types using the same logic it uses + for active events. Now deferred events no longer cause priority + inversion, no longer require special code to cancel them, and so on. + + o Refactor the callback part of an event into its own event_callback + type (cba59e5) + o Add "active later" event_callbacks to supersede deferred (745a63d) + o event_base_assert_ok: check value of event_active_count for + correctness (fec8bae) + o Replace deferred_cbs with event_callback-based implementation. (ae2b84b) + o Replace more deferred_cb names with event_callback (a4079aa) + o Give event_base_process_active a single exit path (581b5be) + o Restore our priority-inversion-prevention code with deferreds (c0e425a) + o Refactor event_persist_closure: raise and extract some common logic + (bec22b4) + o Remove the unused bits from EVLIST_ALL (9889a3d) +||||||| merged common ancestors +Changes in version 2.0.22-stable (?? Dec 2013) + + (As of 3b77d62829c4393bda6f9105a5d3b73b48a64b71.) + +BUGFIXES (evhttp) + o fix #73 and fix http_connection_fail_test to catch it (crash fix) (b618204 Greg Hazel) + o Avoid racy bufferevent activation (5eb1788 Nate Rosenblum) + +BUGFIXES (compilation and portability) + o Fix compilation with WIN32_HAVE_CONDITION_VARIABLES enabled (7e45739) + o Fix missing AC_PROG_SED on older Autoconfs (9ab2b3f Tay Ray Chuan) + o Backport libevent to vanilla Autoconf 2.59 (as used in RHEL5) (74d4c44 Kevin Bowling) + o Use AC_CONFIG_HEADERS in place of AM_CONFIG_HEADERS for autmake 1.13 compat (817ea36) + o Rename configure.in to configure.ac to appease newer autoconfs (0c79787) + o Avoid using top_srcdir in TESTS: new automakes do not like this (a55514e) + o Use windows vsnprintf fixup logic on all windows environments (e826f19) + o Fix a compiler warning when checking for arc4random_buf linker breakage. (5cb3865) + o Fix another arc4random_buf-related warning (e64a2b0) + +BUGFIXES (resource leaks/lock errors on error) + o Avoid leaking fds on evconnlistener with no callback set (69db261) + o Avoid double-close on getsockname error in evutil_ersatz_socketpair (0a822a6) + o Fix a locking error in bufferevent_socket_get_dns_error. (0a5eb2e) + o libevent/win32_dealloc() : fix sizeof(pointer) vs sizeof(*pointer) (b8f5980 Frank Denis) + +BUGFIXES (miscellaneous) + o Avoid other RNG initialization FS reads when urandom file is specified (9695e9c, bb52471) + o Avoid redundant invocations of init_extension_functions for IOCP (3b77d62) + +BUFGIXES (evdns) + o Checking request nameserver for NULL, before using it. (5c710c0 Belobrov Andrey) + o Fix SEGFAULT after evdns_base_resume if no nameservers installed. (f8d7df8 Azat Khuzhin) + +BUGFIXES (evutil_secure_random) + o When we seed from /proc/sys/kernel/random/uuid, count it as success (e35b540) + o Document that arc4random is not a great cryptographic PRNG. (6e49696) + o Add evutil_secure_rng_set_urandom_device_file (2bbb5d7) + o Really remove RNG seeds from the stack (f5ced88) + + +DOCUMENTATION FIXES + o Fix a mistake in evbuffer_remove() arguments in example http server code (c322c20 Gyepi Sam) + o Fix a typo in a comment in buffer.h. Spotted by Alt_F4 (773b0a5) + + + +Changes in version 2.0.21-stable (18 Nov 2012) +BUGFIXES: + o ssl: Don't discard SSL read event when timeout and read come close together (576b29f) + o ssl: Stop looping in "consider_reading" if reading is suspended. (f719b8a Joachim Bauch) + o ssl: No need to reserve space if reading is suspended. (1acf2eb Joachim Bauch) + o dns: Avoid a memory-leak on OOM in evdns. (73e85dd, f2bff75 George Danchev) + o build: Use python2 rather than python (0eb0109 Ross Lagerwall) + o build: Compile without warnings on mingw64 (94866c2) + o build: Fix compilation on mingw64 with -DUSE_DEBUG (62bd2c4) + o build: Make rpcgen_wrapper.sh work on systems without a "python2" binary (f3009e4) + o iocp: Close IOCP listener socket on free when LEV_OPT_CLOSE_ON_FREE is set (cb853ea Juan Pablo Fernandez) + o core: Avoid crash when event_pending() called with no event_base set on event (e3cccf3) + o misc: remove stray 'x' so print_err will compile when uncommented (ac35650 Patrick Pelletier) + o tests: Fix renegotiation test to work around openssl 1.0.1 bug (c2f3086) + o tests: Warn when openssl version in unit test mismatches compiled version. (ac009f9) + + +Changes in version 2.0.20-stable (23 Aug 2012) +BUGFIXES: + o core: Make event_pending() threadsafe. (be7a95c Simon Liu) + o win32: avoid crash when waiting forever on zero fds. (160e58b) + o evhttp: Fix a memory leak on error in evhttp_uriencode (11c8b31) + o evbuffer: Avoid possible needless call to writev. Found by coverity. (6a4ec5c) + o evdns: memset sockaddr_in before using it. Found by coverity. (a1a0e67) + o evhttp: Check more setsockopt return values when binding sockets. Found by coverity (a0912e3) + o evdns: Avoid segfault on weird timeout during name lookup. (dc32077 Greg Hazel) + o bufferevent_ssl: Correctly invoke callbacks when a SSL bufferevent reads some and then blocks. (606ac43) + + +PORTABILITY FIXES: + o check for arc4random_buf at runtime, on OS X (bff5f94 Greg Hazel) + o Correctly check for arc4random_buf (fcec3e8 Sebastian Hahn) + o Add explicit AC_PROG_SED to configure.in so all autoconfs will expose $(SED) (ca80ea6) + +BUILD FIXES: + o Add GCC annotations so that the vsprintf functions get checked properly (117e327) + o Fix an unused variable warning on *BSD. (c0720c1) + +UNIT TEST FIXES: + o Fix a couple of memory leaks (found with Valgrind). (3b2529a Ross Lagerwall) + o Remove deadcode in http regression tests. Found by coverity. (5553346) + o Fix possible uninitialized read in dns regression tests. Found by coverity. (2259777) + o Set umask before calling mkstemp in unit tests. Found by coverity (f1ce15d) + o Fix various check-after-dereference issues in unit tests: found by coverity (4f3732d) + o Fix resource leaks in the unit tests; found by coverity (270f279) + o Add some missing null checks to unit tests; found by coverity (f021c3d) + o Avoid more crashes/bad calls in unit tests; found by coverity (3cde5bf) + o Remove unused variable; spotted by coverity (6355b2a) + o Add checks to various return values in unit tests. Found by coverity (b9e7329) + o Move assignment outside tt_assert in ssl unit tests. Appeases coverity. (a2006c0) + + + +Changes in version 2.0.19-stable (3 May 2012) +BUGFIXES (CORE): + o Refactor event_persist_closure: raise and extract some common logic (bec22b4) + o If time has jumped so we'd reschedule a periodic event in the past, schedule it for the future instead (dfd808c) + o If a higher-priority event becomes active, don't continue running events of the current priority. (2bfda40) + +BUGFIXES (SSL): + o Fixed potential double-readcb execution with openssl bufferevents. (4e62cd1 Mark Ellzey) + +BUGFIXES (DNS): + o Cancel a probe request when the server is freed, and ignore cancelled probe callbacks (94d2336 Greg Hazel) + o Remove redundant DNS_ERR_CANCEL check, move comment (46b8060 Greg Hazel) + o When retransmitting a timed-out DNS request, pick a fresh nameserver. (3d9e52a) + +DOCUMENTATION FIXES: + o Fix a typo in the bufferevent documentation (98e9119) + o Add missing ) to changelog; spotted by rransom (4c7ee6b) + o Fix the website URL in the readme (f775521) + +COMPILATION FIXES: + o Fix a compilation error with MSVC 2005 due to use of mode_t (336dcae) + o Configure with gcc older than 2.95 (4a6fd43 Sebastian Hahn) + o Generate event-config.h with a single sed script (30b6f88 Zack Weinberg) + +FORWARD-COMPATIBILITY: + o Backport: provide EVENT_LOG_* names, and deprecate _EVENT_LOG_* (d1a03b2) + +TESTING/DEBUGGING SUPPORT: + o dns-example.c can now take a resolv.conf file on the commandline (6610fa5) + o Make some evdns.c debug logs more verbose (d873d67) + o Work-around a stupid gcov-breaking bug in OSX 10.6 (b3887cd) + + + +Changes in version 2.0.18-stable (22 Mar 2012) +BUGFIXES (core): + o Make uses of open() close-on-exec safe by introducing an internal evutil_open_closeonexec. (d2b5f72 Ross Lagerwall, 03dce42) + +BUGFIXES (kqueue): + o Properly zero the kevent in kq_setup_kevent() (c2c7b39 Sebastian Hahn) + +BUILD FIXES: + o Added OPENSSL_LDFLAGS env variable which is appended to SSL checks. (9278196 Mark Ellzey) + o Changed OPENSSL_LDFLAGS to OPENSSL_LIBADD (2d67b63 Mark Ellzey) + o Don't do clang version detection when disabling some flags (083296b Sebastian Hahn) + +BUGFIXES (dns): + o Stop crashing in evdns when nameserver probes give a weird error (bec5068) + + +Changes in version 2.0.17-stable (10 Feb 2012) + +BUGFIXES (core): + o Be absolutely sure to clear pncalls before leaving event_signal_closure (11f36a5) + o check for sysctl before we use it (358c745 Mike Frysinger) + o Remove bogus casts of socket to int before calling ev_callback (f032516) + o Make evconnlistener work around bug in older Linux when getting nmapped (ecfc720) + o Fix a list corruption bug when using event_reinit() with signals present (6e41cdc) + o Fix a fd leak in event_reinit() (3f18ad1) + o Do a memberwise comparison of threading function tables (c94a5f2 Nate R) + o Use C-style comments in C source files (for compatibility with compilers such as xlc on AIX). (d84d917 Greg Hewgill) + o Avoid crash when freeing event_iocp and using event_set_mem_functions (19715a6) + o In the kqueue backend, do not report EBADF as an EV_READ (5d7bfa1 Nicholas Marriott) + +BUGFIXES (evbuffer and bufferevents): + o Fix behavior of evbuffer_peek(buf,-1,NULL,NULL,0) (c986f23 Zack Weinberg) + o Loop on filtering SSL reads until we are blocked or exhausted. (5b4b812) + +BUGFIXES (evhttp): + o Force strict validation of HTTP version in response. (790f6b3 Catalin Patulea) + +BUGFIXES (evdns): + o evdns: fix a bug in circular-queue implementation (d6094b1) + +BUILD FIXES: + o Fix a silly compilation error with the sun compiler (1927776 Colin Watt) + o Suppress a gcc warning from ignoring fwrite return in http-sample.c (7206e8c) + +DOCUMENTATION FIXES: + o Slightly clarify evbuffer_peek documentation (7bbf6ca) + o Update copyright notices to 2012 (e49e289) + +NEW APIS: + o Backport evhttp_connection_get_bufferevent to Libevent 2.0 (da70fa7 Arno Bakker) + +TESTS AND TEST FIXES: + o Fix a race condition in the dns/bufferevent_connect_hostname test. (cba48c7) + o Add function to check referential integrity of an event_base (27737d5) + o Check event_base correctness at end of each unit test (3312b02) + o Workaround in the unit tests for an apparent epoll bug in Linux 3.2 (dab9187) + o Better workaround for Linux 3.2 edge-triggered epoll bug (9f9e259) + +Changes in version 2.0.16-stable (18 Nov 2011) +BUGFIXES (core): + o More detailed message in case of libevent self-debugging failure. (9e6a4ef Leonid Evdokimov) + o epoll: close fd on alloc fail at initialization (1aee718 Jamie Iles) + o Fix compile warning from saying event2/*.h inside a comment (447b0ba) + o Warn when unable to construct base because of failing make_base_notifiable (4e797f3) + o Don't try to make notifiable event_base when no threading fns are configured (e787413) + +BUGFIXES (evbuffer): + o unit test for remove_buffer bug (90bd620 Greg Hazel) + o Fix an evbuffer crash in evbuffer_remove_buffer() (c37069c) + +BUGFIXES (bufferevent_openssl): + o Refactor amount-to-read calculations in buffervent_ssl consider_reading() (a186e73 Mark Ellzey) + o Move SSL rate-limit enforcement into bytes_to_read() (96c562f) + o Avoid spinning on OpenSSL reads (2aa036f Mark Ellzey) + +BUGFIXES (dns) + o Empty DNS reply with OK status is another way to say NODATA. (21a08d6 Leonid Evdokimov) + +TESTING: + o Tests for 94fba5b and f72e8f6 (d58c15e Leonid Evdokimov) + o Test for commit aff6ba1 (f7841bf Leonid Evdokimov) + o Style and comment tweaks for dns/leak* tests (5e42202) + o improve test to remove at least one buffer from src (7eb52eb Greg Hazel) + +DOCUMENTATION: + o Add note about evhttp_send_reply_end to its doxygen (724bfb5) + o Update copyright dates to 2011. (3c824bd) + o Fix typo in whatsnew-2.0.txt (674bc6a Mansour Moufid) + o Improve win32 behavior of dns-sample.c code (a3f320e Gisle Vanem) + + + +Changes in version 2.0.15-stable (12 Oct 2011) +BUGFIXES (DNS): + o DNS: add ttl for negative answers using RFC 2308 idea. (f72e8f6 Leonid Evdokimov) + o Add DNS_ERR_NODATA error code to handle empty replies. (94fba5b Leonid Evdokimov) + +BUFGIXES (bufferevents and evbuffers): + o Make evbuffer callbacks get the right n_added value after evbuffer_add (1ef1f68 Alex) + o Prefer mmap to sendfile unless a DRAINS_TO_FD flag is set. Allows add_file to work with SSL. (0ba0af9) + +BUGFIXES (event loop): + o When a signal callback is activated to run multiple times, allow event_base_loopbreak to work even before they all have run. (4e8eb6a) + +DOCUMENTATION FIXES: + o Fix docstring in dns.h (2b6eae5 Leonid Evdokimov) + o refer to non-deprecated evdns functions in comments (ba5c27d Greg Hazel) + +BUILD AND TESTING FIXES: + o le-proxy and regress depend on openssl directly (9ae061a Sergey Avseyev) + o Use _SOURCES, not _sources, in sample/Makefile.am (7f82382) + o Fixed compiler warnings for unchecked read/write calls. (c3b62fd Mark Ellzey) + o Make write-checking fixes use tt_fail_perror (2b76847) + o Fix some "value never used" warnings with gcc 4.6.1 (39c0cf7) + + + +Changes in version 2.0.14-stable (31 Aug 2011) +BUGFIXES (bufferevents and evbuffers): + o Propagate errors on the underlying bufferevent to the user. (4a34394 Joachim Bauch) + o Ignore OpenSSL deprecation warnings on OS X (5d1b255 Sebastian Hahn) + o Fix handling of group rate limits under 64 bytes of burst (6d5440e) + o Solaris sendfile: correctly detect amount of data sent (643922e Michael Herf) + o Make rate limiting work with common_timeout logic (5b18f13) + o clear read watermark on underlying bufferevent when creating filtering bev to fix potentially failing fragmented ssl handshakes (54f7e61 Joachim Bauch) + +BUGFIXES (IOCP): + o IOCP: don't launch reads or writes on an unconnected socket (495c227) + o Make IOCP rate-limiting group support stricter and less surprising. (a98da7b) + o Have test-ratelim.c support IOCP (0ff2c5a) + o Make overlapped reads result in evbuffer callbacks getting invoked (6acfbdd) + o Correctly terminate IO on an async bufferevent on bufferevent_free (e6af35d) + +BUGFIXES (other): + o Fix evsig_dealloc memory leak with debugging turned on. (9b724b2 Leonid Evdokimov) + o Fix request_finished memory leak with debugging turned on. (aff6ba1 Leonid Evdokimov) + +BUILD AND TESTING FIXES: + o Allow OS-neutral builds for platforms where some versions have arc4random_buf (b442302 Mitchell Livingston) + o Try to fix 'make distcheck' errors when building out-of-tree (04656ea Dave Hart) + o Clean up some problems identified by Coverity. (7c11e51 Harlan Stenn) + + +Changes in version 2.0.13-stable (18 Jul 2011) +BUGFIXES + o Avoid race-condition when initializing global locks (b683cae) + o Fix bug in SSL bufferevents backed by a bev with a write high-watermarks (e050703 Joachim Bauch) + o Speed up invoke_callbacks on evbuffers when there are no callbacks (f87f568 Mark Ellzey) + o Avoid a segfault when all methods are disabled or broken (27ce38b) + o Fix incorrect results from evbuffer_search_eol(EOL_LF) (4461f1a) + o Add some missing checks for mm_calloc failures (89d5e09) + o Replace an assertion for event_base_free(NULL) with a check-and-warn (09fe97d) + o Report kqueue ebadf, epipe, and eperm as EV_READ events (1fd34ab) + o Check if the `evhttp_new_object' function in `http.c' returns NULL. (446cc7a Mansour Moufid) + o Use the correct printf args when formatting size_t (3203f88) + o Complain if the caller tries to change threading cbs after setting them (cb6ecee) + +DOCUMENTATION FIXES AND IMPROVEMENTS + o Revise the event/evbuffer/bufferevent doxygen for clarity and accuracy (2888fac) + o Update Doxyfile to produce more useful output (aea0555) + +TEST FIXES + o Fix up test_evutil_snprintf (caf695a) + o Fix tinytest invocation from windows shell (57def34 Ed Day) + +BUILD FIXES + o Use AM_CPPFLAGS in sample/Makefile.am, not AM_CFLAGS (4a5c82d) + o Fix select.c compilation on systems with no NFDBITS (49d1136) + o Fix a few warnings on OpenBSD (8ee9f9c Nicholas Marriott) + o Don't break when building tests from git without python installed (b031adf) + o Don't install event_rpcgen.py when --disable-libevent-install is used (e23cda3 Harlan Stenn) + o Fix AIX build issue with TAILQ_FOREACH definition (e934096) + + +Changes in version 2.0.12-stable (4 Jun 2011) +BUGFIXES + o Fix a warn-and-fail bug in kqueue by providing kevent() room to report errors (28317a0) + o Fix an assert-inducing fencepost bug in the select backend (d90149d) + o Fix failing http assertion introducd in commit 0d6622e (0848814 Kevin Ko) + o Fix a bug that prevented us from configuring IPv6 nameservers. (74760f1) + o Prevent size_t overflow in evhttp_htmlescape. (06c51cd Mansour Moufid) + o Added several checks for under/overflow conditions in evhttp_handle_chunked_read (a279272 Mark Ellzey) + o Added overflow checks in evhttp_read_body and evhttp_get_body (84560fc Mark Ellzey) + +DOCUMENTATION: + o Add missing words to EVLOOP_NONBLOCK documentation (9556a7d) + +BUILD FIXES + o libssl depends on libcrypto, not the other way around. (274dd03 Peter Rosin) + o Libtool brings in the dependencies of libevent_openssl.la automatically (7b819f2 Peter Rosin) + o Use OPENSSL_LIBS in Makefile.am (292092e Sebastian Hahn) + o Move the win32 detection in configure.in (ceb03b9 Sebastian Hahn) + o Correctly detect openssl on windows (6619385 Sebastian Hahn) + o Fix a compile warning with zlib 1.2.4 and 1.2.5 (5786b91 Sebastian Hahn) + o Fix compilation with GCC 2, which had no __builtin_expect (09d39a1 Dave Hart) + o Fix new warnings from GCC 4.6 (06a714f) + o Link with -lshell32 and -ladvapi32 on Win32. (86090ee Peter Rosin) + o Make the tests build when OpenSSL is not available. (07c41be Peter Rosin) + o Bring in the compile script from automake, if needed. (f3c7a4c Peter Rosin) + o MSVC does not provide S_ISDIR, so provide it manually. (70be7d1 Peter Rosin) + o unistd.h and sys/time.h might not exist. (fe93022 Peter Rosin) + o Make sure TINYTEST_LOCAL is defined when building tinytest.c (8fa030c Peter Rosin) + o Fix winsock2.h #include issues with MSVC (3d768dc Peter Rosin) + o Use evutil_gettimeofday instead of relying on the system gettimeofday. (0de87fe Peter Rosin) + o Always use evutil_snprintf, even if OS provides it (d1b2d11 Sebastian Hahn) + o InitializeCriticalSectionAndSpinCount requires _WIN32_WINNT >= 0x0403. (816115a Peter Rosin) + o cygwin: make it possible to build DLLs (d54d3fc) + + + +Changes in version 2.0.11-stable (27 Apr 2011) + [Autogenerated from the Git log, sorted and cleaned by hand.] +BUGFIXES: + o Fix evport handling of POLLHUP and POLLERR (b42ce4b) + o Fix compilation on Windows with NDEBUG (cb8059d) + o Check for POLLERR, POLLHUP and POLLNVAL for Solaris event ports (0144886 Trond Norbye) + o Detect and handle more allocation failures. (666b096 Jardel Weyrich) + o Use event_err() only if the failure is truly unrecoverable. (3f8d22a Jardel Weyrich) + o Handle resize failures in the select backend better. (83e805a) + o Correctly free selectop fields when select_resize fails in select_init (0c0ec0b) + o Make --enable-gcc-warnings a no-op if not using gcc (3267703) + o Fix a type error in our (unused) arc4random_stir() (f736198) + o Correctly detect and stop non-chunked http requests when the body is too long (63a715e) + o Have event_base_gettimeofday_cached() always return wall-clock time (a459ef7) + o Workaround for http crash bug 3078187 (5dc5662 Tomash Brechko) + o Fix incorrect assertions and possible use-after-free in evrpc_free() (4b8f02f Christophe Fillot) + o Reset outgoing http connection when read data in idle state. (272823f Tomash Brechko) + o Fix subtle recursion in evhttp_connection_cb_cleanup(). (218cf19 Tomash Brechko) + o Fix the case when failed evhttp_make_request() leaved request in the queue. (0d6622e Tomash Brechko) + o Fix a crash bug in evdns server circular list code (00e91b3) + o Handle calloc failure in evdns. (Found by Dave Hart) (364291e) + o Fix a memory leak on win32 socket->event map. (b4f89f0) + o Add a forgotten NULL check to evhttp_parse_headers (12311ff Sebastian Hahn) + o Fix possible NULL-deref in evdns_cancel_request (5208544 Sebastian Hahn) + +PORTABILITY: + o Fall back to sscanf if we have no other way to implement strtoll (453317b) + o Build correctly on platforms without sockaddr_storage (9184563) + o Try to build correctly on platforms with no IPv6 support (713c254) + o Build on systems without AI_PASSIVE (cb92113) + o Fix http unit test on non-windows platforms without getaddrinfo (6092f12) + o Do not check for gethostbyname_r versions if we have getaddrinfo (c1260b0) + o Include arpa/inet.h as needed on HPUX (10c834c Harlan Stenn) + o Include util-internal.h as needed to build on platforms with no sockaddr_storage (bbf5515 Harlan Stenn) + o Check for getservbyname even if not on win32. (af08a94 Harlan Stenn) + o Add -D_OSF_SOURCE to fix hpux builds (0b33479 Harlan Stenn) + o Check for allocation failures in apply_socktype_protocol_hack (637d17a) + o Fix the check for multicast or broadcast addresses in evutil_check_interfaces (1a21d7b) + o Avoid a free(NULL) if out-of-memory in evdns_getaddrinfo. Found by Dave Hart (3417f68) + +DEFENSIVE PROGRAMMING: + o Add compile-time check for AF_UNSPEC==PF_UNSPEC (3c8f4e7) + +BUGS IN TESTS: + o Fix test.sh output on solaris (b4f89b6 Dave Hart) + o Make test-eof fail with a timeout if we never get an eof. (05a2c22 Harlan Stenn) + o Use %s with printf in test.sh (039b9bd) + o Add an assert to appease clang's static analyzer (b0ff7eb Sebastian Hahn) + o Add a forgotten return value check in the unit tests (3819b62 Sebastian Hahn) + o Actually send NULL request in http_bad_request_test (b693c32 Sebastian Hahn) + o add some (void) casts for unused variables (65707d7 Sebastian Hahn) + o Refactor test_getaddrinfo_async_cancel_stress() (48c44a6 Sebastian Hahn) + o Be nice and "handle" error return values in sample code (4bac793 Sebastian Hahn) + o Check return value of evbuffer_add_cb in tests (93a1abb Sebastian Hahn) + o Remote some dead code from dns-example.c (744c745 Sebastian Hahn) + o Zero a struct sockaddr_in before using it (646f9fe Sebastian Hahn) + +BUILD FIXES: + o Fix warnings about AC_LANG_PROGRAM usage (f663112 Sebastian Hahn) + o Skip check for zlib if we have no zlib.h (a317c06 Harlan Stenn) + o Fix autoconf bracket issues; make check for getaddrinfo include netdb.h (833e5e9 Harlan Stenn) + o Correct an AM_CFLAGS to an AM_CPPFLAGS in test/Makefile.am (9c469db Dave Hart) + o Fix make distcheck & installation of libevent 1 headers (b5a1f9f Dave Hart) + o Fix compilation under LLVM/clang with --enable-gcc-warnings (ad9ff58 Sebastian Hahn) + +FEATURES: + o Make URI parser able to tolerate nonconformant URIs. (95060b5) + +DOCUMENTATION: + o Clarify event_set_mem_functions doc (926f816) + o Correct evhttp_del_accept_socket documentation on whether socket is closed (f665924) + o fix spelling mistake in whatsnew-2.0.txt (deb2f73) + o Fix sample/http-server ipv6 fixes (eb692be) + o Comment internal headers used in sample code. (4eb281c) + o Be explicit about how long event loops run in event.h documentation (f95bafb) + o Add comment to configure.in to explain gc-sections test logic (c621359) + o Fix a couple of memory leaks in samples/http-server.c. Found by Dave Hart. (2e9f665) + + + +BUILD IMPROVEMENTS: + Libevent 2.1.2-alpha modernizes Libevent's use of autotools, and makes + numerous other build system. Parallel builds should be faster, and all + builds should be quieter. + + o Split long lists in Makefile.am into one-item-per-line (2711cda) + o Remove unnecessary code in configure.in. (e65914f Ross Lagerwall) + o attempt to support OpenSSL in Makefile.nmake (eba0eb2 Patrick Pelletier) + o Use newer syntax for autoconf/automake init (7d60ba8) + o Enable silent build rules by default. Override with V=1 (7b18e5c) + o Switch to non-recursive makefiles (7092f3b) + o Rename subordinate Makefile.ams to include.am (6cdfeeb) + o Make quiet build even quieter (371a123) + o New --quiet option for event_rpcgen.py (aa59c1e) + o Be quiet when making regress.gen.[ch] (607a8ff) + o Fix handling of no-python case for nonrecursive make (1e3123d) + o We now require automake 1.9 or later. Modernize! (b7f6e89) + o Rename configure.in to configure.ac. (b3fea67 Ross Lagerwall) + o Use correct openssl libs and includes in pkgconfig file (d70af27) + o Use the same CFLAGS for openssl when building unit tests as with + libevent (1d9d511) + +DOCUMENTATION + o Note that make_base_notifiable should not be necessary (26ee5f9) + o Be more clear that LEV_OPT_DEFERRED_ACCEPT has tricky prereqs (371efeb) + o Add caveat to docs about bufferevent_free() with data in outbuf (6fab9ee) + o Make it more clear that NOLOCK means "I promise, no multithreading" + (9444524) + o Fix a comment in test-fdleak after 077c7e949. (3881d8f Ross Lagerwall) + o Make the Makefile.nmake warning slightly less dire (e7bf4c8) + o Fix typo : events instead of evets (05f1aca Azat Khuzhin) + o Additional comments about OPENSSL_DIR variable, prompted by Dave Hart + (6bde2ef Patrick Pelletier) + +EVHTTP: + o ignore LWS after field-content in headers (370a2c0 Artem Germanov) + o Clean up rtrim implementation (aa59d80) + o Remove trailing tabs in HTTP headers as well. (ac42519) + o Remove internal ws from multiline http headers correctly (c6ff381) + o Move evutil_rtrim_lws_ to evutil.c where it belongs (61b93af) + o add evhttp_request_get_response_code_line (4f4d0c9 Jay R. Wren) + o Use EVUTIL_SOCKET_ERROR() wrapper to save/restore errno in + evhttp_connection_fail_ (7afbd60) + o preserve errno in evhttp_connection_fail_ for inspection by the + callback (36d0ee5 Patrick Pelletier) + +BUGFIXES: + o Correctly handle running on a system where accept4 doesn't work. (9fbfe9b) + o Avoid double-free on error in evbuffer_add_file. Found by + coverity. (6a81b1f) + o Fix another possible uninitialized read in dns regression tests. Found + by coverity. (13525c5) + o Add checks for functions in test-ratelim.c; found by Coverity (aa501e1) + o Avoid memory leak in test_event_calloc unit test; found by coverity + (92817a1) + o Fix a shadowed variable in addfile_test_readcb; found by coverity + (225344c) + o Check return value when using LEV_OPT_DEFERRED_ACCEPT. Found by + coverity (6487f63) + o Prevent reference leak of bufferevent if getaddrinfo fails. (b757786 + Joachim Bauch) + o Make event_base_getnpriorities work with old "implicit base" code + (c46cb9c) + o Simplify and correct evutil_open_closeonexec_ (0de587f) + o Fix event_dlist definition when sys/queue not included (81b6209 + Derrick Pallas) + + + +Changes in version 2.1.1-alpha (4 Apr 2012) + + Libevent 2.1.1-alpha includes a number of new features and performance + improvements. The log below tries to organize them by rough area of + effect. It omits some commits which were pure bugfixes on other commits + listed below. For more detail, see the git changelogs. For more + insight, see the "whatsnew-2.1.txt" document included in the Libevent + 2.1.1-alpha distribution. + + Performance: Core + o Replace several TAILQ users with LIST. LIST can be a little faster than + TAILQ for cases where we don't need queue-like behavior. (f9db33d, + 6494772, d313c29, 974d004) + o Disabled code to optimize the case where we reinsert an existing + timeout (e47042f, 09cbc3d) + o Remove a needless base-notify when rescheduling the first timeout (77a96fd) + o Save a needless comparison when removing/adjusting timeouts (dd5189b) + o Possible optimization: split event_queue_insert/remove into + separate functions. needs testing (efc4dc5) + o Make event_count maintenance branchless at the expense of an + extra shift. Needs benchmarking (d1cee3b) + o In the 2.1 branch, let's try out lazy gettimeofday/clock_gettime + comparison (2a83ecc) + o Optimization in event_process_active(): ignore maxcb & endtime + for highest priority events. (a9866aa Alexander Drozdov) + o Bypass event_add when using event_base_once() for a 0-sec timeout (35c5c95) + o Remove the eventqueue list and the ev_next pointers. (604569b 066775e) + + Performance: Evbuffers + o Roughly 20% speed increase when line-draining a buffer using + EVBUFFER_EOL_CRLF (5dde0f0 Mina Naguib) + o Try to squeeze a little more speed out of EVBUFFER_EOL_CRLF (7b9d139) + o Fix a bug in the improved EOL_CRLF code (d927965) + o Remove a needless branch in evbuffer_drain() (d19a326) + + Performance: Linux + o Infrastructure for using faster/fewer syscalls when creating + sockets (a1c042b) + o Minimize syscalls during socket creation in listener.c (7e9e289) + o Use a wrapper function to create the notification + pipe/socketpair/eventfd (ca76cd9) + o Use pipes for telling signals to main thread when possible (a35f396) + o Save syscalls when constructing listener sockets for evhttp (af6c9d8) + o Save some syscalls when creating evdns sockets (713e570) + o Save some syscalls when constructing a socket for a bufferevent (33fca62) + o Prefer epoll_create1 on Linuxen that have it (bac906c) + + Performance: Epoll backend + o Use current event set rather than current pending change when + deciding whether to no-op a del (04ba27e Mike Smellie) + o Replace big chain of if/thens in epoll.c with a table lookup (8c83eb6) + o Clean up error handling in epoll_apply_one_change() a little (2d55a19) + + Performance: Evport backend + o evport: use evmap_io to track fdinfo status. Should save time and + RAM. (4687ce4) + o evport: Remove a linear search over recent events when + reactivating them (0f77efe) + o evport: Use portev_user to remember fdinfo struct (276ec0e) + o evport: don't scan more events in ed_pending than needed (849a5cf) + o evport: Remove artificial low limit on max events per getn call (c04d927) + o Reenable main/many_events_slow_add for evport in 2.1 (e903db3) + + Performance: Windows + o Use GetSystemTimeAsFileTime to implement gettimeofday on + win32. It's faster and more accurate than our old + approach. (b8b8aa5) + + New functions and features: debugging + o Add event_enable_debug_logging() to control use of debug logs (e30a82f) + + New functions and features: core + o Add event_config function to limit time/callbacks between calls + to dispatch (fd4de1e, 9fa56bd, a37a0c0, 3c63edd) + o New EVLOOP_NO_EXIT_ON_EMPTY option to keep looping even when no + events are pending (084e68f) + o Add event_base_get_npriorities() function. (ee3a4ee Alexander Drozdov) + o Make evbase_priority_init() and evbase_get_npriorities() + threadsafe (3c55b5e) + o New event_base_update_cache_time() to set cached_tv to current + time (212533e Abel Mathew) + o Add event_self_cbarg() to be used in conjunction with + event_new(). (ed36e6a Ross Lagerwall, fa931bb, 09a1906, 1338e6c, + 33e43ef) + o Add a new libevent_global_shutdown() to free all globals before + exiting. (041ca00 Mark Ellzey, f98c158, 15296d0, 55e991b) + o Use getifaddrs to detect our interfaces if possible (7085a45) + o Add event_base_get_running_event() to get the event* whose cb we + are in (c5732fd, 13dad99) + + New functions and features: building + o Implement --enable-gcc-hardening configure option (7550267 Sebastian Hahn) + + New functions and features: evbuffers + o Add evbuffer_add_file_segment() so one fd can be used efficiently + in more than one evbuffer_add_file at a time (e72afae, c2d9884, + 3f405d2, 0aad014) + o Fix windows file segment mappings (8254de7) + o Allow evbuffer_ptr_set to yield a point just after the end of the + buffer. (e6fe1da) + o Allow evbuffer_ptr to point to position 0 in an empty evbuffer + (7aeb2fd Nir Soffer) + o Set the special "not found" evbuffer_ptr consistently. (e3e97ae Nir Soffer) + o support adding buffers to other buffers non-destructively + (9d7368a Joachim Bauch) + o prevent nested multicast references, reworked locking (26041a8 + Joachim Bauch) + o New EVBUFFER_EOL_NUL to read NUL-terminated strings from an + evbuffer (d7a8b36 Andrea Montefusco, 54142c9) + o Make evbuffer_file_segment_types adaptable (c6bbbf1) + o Added evbuffer_add_iovec and unit tests. (aaec5ac Mark Ellzey, 27b5398) + o Add evbuffer_copyout_from to copy data from the middle of a + buffer (27e2225) + + New functions and features: bufferevents + o Allow users to set allow_dirty_shutdown (099d27d Catalin Patulea) + o Tweak allow_dirty_shutdown documentation (a44cd2b) + o Fix two issues in the allow_dirty_shutdown code. (f3b89de) + o Add a bufferevent_getcb() to find a bufferevent's current + callbacks (a650394) + o bufferevent: Add functions to set/get max_single_read/write + values. (998c813 Alexander Drozdov) + o bev_ssl: Be more specific in event callbacks. evhttp in particular gets + confused without at least one of BEV_EVENT_{READING|WRITING}. (f7eb69a + Catalin Patulea) + + New functions and features: evconnlisteners + o Support TCP_DEFER_ACCEPT sockopts for listeners (5880e4a Mark Ellzey, + a270728) + o Add another caveat to the TCP_DEFER_ACCEPT documentation (a270728) + o Allow evconnlistener to be created in disabled state. (9593a33 + Alexander Drozdov) + o The LEV_OPT_CLOSE_ON_EXEC flag now applies to accepted listener + sockets too (4970329) + + Evhttp: + o Add new evhttp_{connection_}set_timeout_tv() functions to set + finger-grained http timeouts (6350e6c Constantine Verutin) + o Performance tweak to evhttp_parse_request_line. (aee1a97 Mark Ellzey) + o Add missing break to evhttp_parse_request_line (0fcc536) + o Add evhttp callback for bufferevent creation; this lets evhttp + support SSL. (8d3a850) + o Remove calls to deprecated bufferevent functions from evhttp.c (4d63758) + o evhttp: Add evhttp_foreach_bound_socket. (a2c48e3 Samy Al Bahra) + + Build improvements: + o Add AC_USE_SYSTEM_EXTENSIONS to configure.in. Requires follow on + patches for correctness and robustness. (1fa7dbe Kevin Bowling) + o Filter '# define' statements from autoconf and generate + event-private.h (321b558 Kevin Bowling) + o Remove internal usage of _GNU_SOURCE (3b26541 Kevin Bowling) + o Eliminate a couple more manual internal _GNU_SOURCE defines (c51ef93 + Kevin Bowling) + o Add AC_GNU_SOURCE to the fallback case. (ea8fa4c Kevin Bowling) + o Use a Configuration Header Template for evconfig-private.h (868f888 + Kevin Bowling) + o Fix a comment warning and add evconfig-private.h to .gitignore + (f6d66bc Kevin Bowling) + o Include evconfig-private.h in internal files for great good. (0915ca0 + Kevin Bowling) + o Backport libevent to vanilla Autoconf 2.59 (as used in RHEL5) + (ad03952 Kevin Bowling) + o Prefer the ./configure evconfig-private.h in MinGW, just in + case. (f964b72 Kevin Bowling) + o Shell hack for weird mkdir -p commands (fd7b5a8 Kevin Bowling) + o Add evconfig-private to remaining files (ded0a09 Kevin Bowling) + o Allow use of --enable-silent-rules for quieter compilation with + automake 1.11 (f1f8514 Dave Hart) + o Use "_WIN32", not WIN32: it's standard and we don't need to fake it + (9f560b) + o In configure, test for _WIN32 not WIN32. (85078b1 Peter Rosin) + o Do not define WIN32 in Makefile.nmake (d41f3ea Peter Rosin) + o Provide the autoconf m4 macros for the new OpenSSL via pkg-config + stuff. (674dc3d Harlan Stenn) + o Use pkg-config (if available) to handle OpenSSL. (1c63860 Harlan Stenn) + o We need AM_CPPFLAGS when compiling bufferevent_openssl.c (6d2613b + Harlan Stenn) + o Fix OSX build: $(OPENSSL_INCS) needs to be after + $(AM_CPPFLAGS). (46f1769 Zack Weinberg) + o Make gcc warnings on by default, and --enable-gcc-warnings only add + -Werror (d46517e Sebastian Hahn) + o Split up extra-long AC_CHECK_FUNCS/HEADERS lines in configure.in (88a30ad) + o Move libevent 1.x headers to include/, to put all public headers in + one place. (bbea8d6) + o Put #ifdef around some files to support alternate build + systems. (76d4c92 Ross Lagerwall) + o Also make win32select.c conditional for IDE users (bf2c5a7) + + Debugging: + o Add a magic number to debug_locks to better catch lock-coding + errors. (b4a29c0 Dave Hart) + o munge the debug_lock signature before freeing it: it might help us + catch use-after-free (f28084d) + o Added --enable-event-debugging in configure (bc7b4e4, a9c2c9a Mark Ellzey) + o Debug addition for printing usec on TIMEOUT debugging. (ac43ce0 Mark Ellzey) + o Added usec debug in another area for debug (3baab0d Mark Ellzey) + o added timeout debug logs to include event ptr. (4b7d298 Mark Ellzey) + o more event dbg updates (6727543 Mark Ellzey) + o Clarify event_enable_debug_logging a little (6207826) + o Make --enable-verbose-debug option match its help text (10c3450) + o Add argument checks to some memory functions in `event.c'. (c8953d1 + Mansour Moufid) + + Testing: + o More abstraction in test.sh (cd74c4e) + o Add failing test for evbuffer_search_range. (8e26154 Nir Soffer) + o Tweaks to return types with end-of-buf ptrs (9ab8ab8) + o Add an (internal) usleep function for use by unit tests (f25d9d3) + o Synchronize with upstream tinytest (6c81be7) + o Make test-changelist faster (7622d26) + o Reduce the timeout in the main/fork test. (ab14f7c) + o New evhttp function to adjust initial retry timeout (350a3c4) + o Make regression tests run over 3x faster. (67a1763) + o Use test_timeval_diff_eq more consistently (b77b43f) + o Allow more slop in deferred_cb_skew test; freebsd needs it (b9f7e5f) + o When including an -internal.h header outside the main tree, do so + early (95e2455) + o Add a new test: test-fdleak which tests for fd leaks by creating many + sockets. (2ef9278 Ross Lagerwall, f7af194, 1c4288f, etc) + o Add a unit test for event_base_dump_events() (7afe48a, 8d08cce) + o Test more bufferevent_ratelim features (c24f91a) + + Documentation: + o Improve evbuffer_ptr documentation (261ba63) + o added comments to describe refcounting of multicast chains (ba24f61 + Joachim Bauch) + o Add doxygen for event_base_dump_events (cad5753) + + OSX: + o Use "unlimited select" on OSX so that we can have more than + FD_SETSIZE fds (1fb5cc6) + + KQueue: + o Use SIG_IGN instead of a do-nothing handler for signal events with + kqueue (148458e Zack Weinberg) + + evprc: + o event_rpcgen.py now prints status information to stdout and errors to + stderr. (ffb0ba0 Ross Lagerwall) + + Code improvement and refactoring: + o Make event_reinit() more robust and maintainable (272033e) + o Restore fast-path event_reinit() for slower backends (2c4b5de) + o Check changelist as part of checking representational integrity (39b3f38) + o Fix a compile warning in event_reinit (e4a56ed Sebastian Hahn) + o Refactor the functions that run over every event. (c89b4e6) + o Remove the last vestiges of _EVENT_USE_EVENTLIST (a3cec90) + o Make event-config.h depend on Makefile.am (2958a5c) + + Build fixes: + o Don't do clang version detection when disabling some flags (083296b + Sebastian Hahn) + + C standards conformance: + o Check for NULL return on win32 mm_calloc, and set ENOMEM. (af7ba69) + o Convert event-config.h macros to avoid reserved identifiers (68120d9) + o Generate event-config.h using the correct macros. (f82c57e) + o Convert include-guard macro convention to avoid reserved identifiers + (3f8c7cd) + o Make event_rpcgen.py output conform to identifier conventions (372bff1) + o Stop referring to an obsolete include guard in bench_http.h (5c0f7e0) + o Make the generated event-config.h use correct include guards (639383a) + o Fix all identifiers with names beginning with underscore. (cb9da0b) + o Make event_rpcgen.py output conform to identifier conventions, more + (bcefd24) + o Fix some problems introduced by automated identifier cleanup script + (c963534) + o Have all visible internal function names end with an underscore. (8ac3c4c) + o Apply the naming convention to our EVUTIL_IS* functions (c7848fa) + o Clean up lingering _identifiers. (946b584) + o Fix doxygen to use new macro conventions (da455e9) + + Bugfixes: + o Do not use system EAI/AI values if we are not using the system + getaddrinfo. (7bcac07) + + Sample Code: + o Fix up sample/event-test.c to use newer interfaces and make it + actually work. (19bab4f Ross Lagerwall) + o On Unix, remove event.fifo left by sample/event-test.c. (c0dacd2 Ross + Lagerwall) + o Rename event-test.c to event-read-fifo.c. (a5b370a Ross Lagerwall) + o event-read-fifo: Use EV_PERSIST appropriately (24dab0b) + + + + diff --git a/probe-busybox/libevent-2.1.11-stable/ChangeLog-1.4 b/probe-busybox/libevent-2.1.11-stable/ChangeLog-1.4 new file mode 100644 index 00000000..d7f6517b --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/ChangeLog-1.4 @@ -0,0 +1,231 @@ +Changes in 1.4.14b-stable + o Set the VERSION_INFO correctly for 1.4.14 + +Changes in 1.4.14-stable + o Add a .gitignore file for the 1.4 branch. (d014edb) + o Backport evbuffer_readln(). (b04cc60 Nicholas Marriott) + o Make the evbuffer_readln backport follow the current API (c545485) + o Valgrind fix: Clear struct kevent before checking for OSX bug. (5713d5d William Ahern) + o Fix a crash when reading badly formatted resolve.conf (5b10d00 Yasuoka Masahiko) + o Fix memory-leak of signal handler array with kqueue. [backport] (01f3775) + o Update sample/signal-test.c to use newer APIs and not leak. (891765c Evan Jones) + o Correct all versions in 1.4 branch (ac0d213) + o Make evutil_make_socket_nonblocking() leave any other flags alone. (81c26ba Jardel Weyrich) + o Adjusted fcntl() retval comparison on evutil_make_socket_nonblocking(). (5f2e250 Jardel Weyrich) + o Correct a debug message in evhttp_parse_request_line (35df59e) + o Merge branch 'readln-backport' into patches-1.4 (8771d5b) + o Do not send an HTTP error when we've already closed or responded. (4fd2dd9 Pavel Plesov) + o Re-add event_siglcb; some old code _was_ still using it. :( (bd03d06) + o Make Libevent 1.4 build on win32 with Unicode enabled. (bce58d6 Brodie Thiesfield) + o Distribute nmake makefile for 1.4 (20d706d) + o do not fail while sending on http connections the client closed. (5c8b446) + o make evhttp_send() safe against terminated connections, too (01ea0c5) + o Fix a free(NULL) in min_heap.h (2458934) + o Fix memory leak when setting up priorities; reported by Alexander Drozdov (cb1a722) + o Clean up properly when adding a signal handler fails. (ae6ece0 Gilad Benjamini) + o Do not abort HTTP requests missing a reason string. (29d7b32 Pierre Phaneuf) + o Fix compile warning in http.c (906d573) + o Define _REENTRANT as needed on Solaris, elsewhere (6cbea13) + + +Changes in 1.4.13-stable: + o If the kernel tells us that there are a negative number of bytes to read from a socket, do not believe it. Fixes bug 2841177; found by Alexander Pronchenkov. + o Do not allocate the maximum event queue and fd array for the epoll backend at startup. Instead, start out accepting 32 events at a time, and double the queue's size when it seems that the OS is generating events faster than we're requesting them. Saves up to 512K per epoll-based event_base. Resolves bug 2839240. + o Fix compilation on Android, which forgot to define fd_mask in its sys/select.h + o Do not drop data from evbuffer when out of memory; reported by Jacek Masiulaniec + o Rename our replacement compat/sys/_time.h header to avoid build a conflict on HPUX; reported by Kathryn Hogg. + o Build kqueue.c correctly on GNU/kFreeBSD platforms. Patch pulled upstream from Debian. + o Fix a problem with excessive memory allocation when using multiple event priorities. + o When running set[ug]id, don't check the environment. Based on a patch from OpenBSD. + + +Changes in 1.4.12-stable: + o Try to contain degree of failure when running on a win32 version so heavily firewalled that we can't fake a socketpair. + o Fix an obscure timing-dependent, allocator-dependent crash in the evdns code. + o Use __VA_ARGS__ syntax for varargs macros in event_rpcgen when compiler is not GCC. + o Activate fd events in a pseudorandom order with O(N) backends, so that we don't systematically favor low fds (select) or earlier-added fds (poll, win32). + o Fix another pair of fencepost bugs in epoll.c. [Patch from Adam Langley.] + o Do not break evdns connections to nameservers when our IP changes. + o Set truncated flag correctly in evdns server replies. + o Disable strict aliasing with GCC: our code is not compliant with it. + +Changes in 1.4.11-stable: + o Fix a bug when removing a timeout from the heap. [Patch from Marko Kreen] + o Remove the limit on size of HTTP headers by removing static buffers. + o Fix a nasty dangling pointer bug in epoll.c that could occur after epoll_recalc(). [Patch from Kevin Springborn] + o Distribute Win32-Code/event-config.h, not ./event-config.h + +Changes in 1.4.10-stable: + o clean up buffered http connection data on reset; reported by Brian O'Kelley + o bug fix and potential race condition in signal handling; from Alexander Drozdov + o rename the Solaris event ports backend to evport + o support compilation on Haiku + o fix signal processing when a signal callback delivers a signal; from Alexander Drozdov + o const-ify some arguments to evdns functions. + o off-by-one error in epoll_recalc; reported by Victor Goya + o include Doxyfile in tar ball; from Jeff Garzik + o correctly parse queries with encoded \r, \n or + characters + +Changes in 1.4.9-stable: + o event_add would not return error for some backends; from Dean McNamee + o Clear the timer cache on entering the event loop; reported by Victor Chang + o Only bind the socket on connect when a local address has been provided; reported by Alejo Sanchez + o Allow setting of local port for evhttp connections to support millions of connections from a single system; from Richard Jones. + o Clear the timer cache when leaving the event loop; reported by Robin Haberkorn + o Fix a typo in setting the global event base; reported by lance. + o Fix a memory leak when reading multi-line headers + o Fix a memory leak by not running explicit close detection for server connections + +Changes in 1.4.8-stable: + o Match the query in DNS replies to the query in the request; from Vsevolod Stakhov. + o Fix a merge problem in which name_from_addr returned pointers to the stack; found by Jiang Hong. + o Do not remove Accept-Encoding header + +Changes in 1.4.7-stable: + o Fix a bug where headers arriving in multiple packets were not parsed; fix from Jiang Hong; test by me. + +Changes in 1.4.6-stable: + o evutil.h now includes directly + o switch all uses of [v]snprintf over to evutil + o Correct handling of trailing headers in chunked replies; from Scott Lamb. + o Support multi-line HTTP headers; based on a patch from Moshe Litvin + o Reject negative Content-Length headers; anonymous bug report + o Detect CLOCK_MONOTONIC at runtime for evdns; anonymous bug report + o Fix a bug where deleting signals with the kqueue backend would cause subsequent adds to fail + o Support multiple events listening on the same signal; make signals regular events that go on the same event queue; problem report by Alexander Drozdov. + o Deal with evbuffer_read() returning -1 on EINTR|EAGAIN; from Adam Langley. + o Fix a bug in which the DNS server would incorrectly set the type of a cname reply to a. + o Fix a bug where setting the timeout on a bufferevent would take not effect if the event was already pending. + o Fix a memory leak when using signals for some event bases; reported by Alexander Drozdov. + o Add libevent.vcproj file to distribution to help with Windows build. + o Fix a problem with epoll() and reinit; problem report by Alexander Drozdov. + o Fix off-by-one errors in devpoll; from Ian Bell + o Make event_add not change any state if it fails; reported by Ian Bell. + o Do not warn on accept when errno is either EAGAIN or EINTR + +Changes in 1.4.5-stable: + o Fix connection keep-alive behavior for HTTP/1.0 + o Fix use of freed memory in event_reinit; pointed out by Peter Postma + o Constify struct timeval * where possible; pointed out by Forest Wilkinson + o allow min_heap_erase to be called on removed members; from liusifan. + o Rename INPUT and OUTPUT to EVRPC_INPUT and EVRPC_OUTPUT. Retain INPUT/OUTPUT aliases on on-win32 platforms for backwards compatibility. + o Do not use SO_REUSEADDR when connecting + o Fix Windows build + o Fix a bug in event_rpcgen when generated fixed-sized entries + +Changes in 1.4.4-stable: + o Correct the documentation on buffer printf functions. + o Don't warn on unimplemented epoll_create(): this isn't a problem, just a reason to fall back to poll or select. + o Correctly handle timeouts larger than 35 minutes on Linux with epoll.c. This is probably a kernel defect, but we'll have to support old kernels anyway even if it gets fixed. + o Fix a potential stack corruption bug in tagging on 64-bit CPUs. + o expose bufferevent_setwatermark via header files and fix high watermark on read + o fix a bug in bufferevent read water marks and add a test for them + o introduce bufferevent_setcb and bufferevent_setfd to allow better manipulation of bufferevents + o use libevent's internal timercmp on all platforms, to avoid bugs on old platforms where timercmp(a,b,<=) is buggy. + o reduce system calls for getting current time by caching it. + o fix evhttp_bind_socket() so that multiple sockets can be bound by the same http server. + o Build test directory correctly with CPPFLAGS set. + o Fix build under Visual C++ 2005. + o Expose evhttp_accept_socket() API. + o Merge windows gettimeofday() replacement into a new evutil_gettimeofday() function. + o Fix autoconf script behavior on IRIX. + o Make sure winsock2.h include always comes before windows.h include. + +Changes in 1.4.3-stable: + o include Content-Length in reply for HTTP/1.0 requests with keep-alive + o Patch from Tani Hosokawa: make some functions in http.c threadsafe. + o Do not free the kqop file descriptor in other processes, also allow it to be 0; from Andrei Nigmatulin + o make event_rpcgen.py generate code include event-config.h; reported by Sam Banks. + o make event methods static so that they are not exported; from Andrei Nigmatulin + o make RPC replies use application/octet-stream as mime type + o do not delete uninitialized timeout event in evdns + +Changes in 1.4.2-rc: + o remove pending timeouts on event_base_free() + o also check EAGAIN for Solaris' event ports; from W.C.A. Wijngaards + o devpoll and evport need reinit; tested by W.C.A Wijngaards + o event_base_get_method; from Springande Ulv + o Send CRLF after each chunk in HTTP output, for compliance with RFC2626. Patch from "propanbutan". Fixes bug 1894184. + o Add a int64_t parsing function, with unit tests, so we can apply Scott Lamb's fix to allow large HTTP values. + o Use a 64-bit field to hold HTTP content-lengths. Patch from Scott Lamb. + o Allow regression code to build even without Python installed + o remove NDEBUG ifdefs from evdns.c + o update documentation of event_loop and event_base_loop; from Tani Hosokawa. + o detect integer types properly on platforms without stdint.h + o Remove "AM_MAINTAINER_MODE" declaration in configure.in: now makefiles and configure should get re-generated automatically when Makefile.am or configure.in chanes. + o do not insert event into list when evsel->add fails + +Changes in 1.4.1-beta: + o free minheap on event_base_free(); from Christopher Layne + o debug cleanups in signal.c; from Christopher Layne + o provide event_base_new() that does not set the current_base global + o bufferevent_write now uses a const source argument; report from Charles Kerr + o better documentation for event_base_loopexit; from Scott Lamb. + o Make kqueue have the same behavior as other backends when a signal is caught between event_add() and event_loop(). Previously, it would catch and ignore such signals. + o Make kqueue restore signal handlers correctly when event_del() is called. + o provide event_reinit() to reintialize an event_base after fork + o small improvements to evhttp documentation + o always generate Date and Content-Length headers for HTTP/1.1 replies + o set the correct event base for HTTP close events + o New function, event_{base_}loopbreak. Like event_loopexit, it makes an event loop stop executing and return. Unlike event_loopexit, it keeps subsequent pending events from getting executed. Patch from Scott Lamb + o Removed obsoleted recalc code + o pull setters/getters out of RPC structures into a base class to which we just need to store a pointer; this reduces the memory footprint of these structures. + o fix a bug with event_rpcgen for integers + o move EV_PERSIST handling out of the event backends + o support for 32-bit tag numbers in rpc structures; this is wire compatible, but changes the API slightly. + o prefix {encode,decode}_tag functions with evtag to avoid collisions + o Correctly handle DNS replies with no answers set (Fixes bug 1846282) + o The configure script now takes an --enable-gcc-warnings option that turns on many optional gcc warnings. (Nick has been building with these for a while, but they might be useful to other developers.) + o When building with GCC, use the "format" attribute to verify type correctness of calls to printf-like functions. + o removed linger from http server socket; reported by Ilya Martynov + o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr. + o demote most http warnings to debug messages + o Fix Solaris compilation; from Magne Mahre + o Add a "Date" header to HTTP responses, as required by HTTP 1.1. + o Support specifying the local address of an evhttp_connection using set_local_address + o Fix a memory leak in which failed HTTP connections would not free the request object + o Make adding of array members in event_rpcgen more efficient, but doubling memory allocation + o Fix a memory leak in the DNS server + o Fix compilation when DNS_USE_OPENSSL_FOR_ID is enabled + o Fix buffer size and string generation in evdns_resolve_reverse_ipv6(). + o Respond to nonstandard DNS queries with "NOTIMPL" rather than by ignoring them. + o In DNS responses, the CD flag should be preserved, not the TC flag. + o Fix http.c to compile properly with USE_DEBUG; from Christopher Layne + o Handle NULL timeouts correctly on Solaris; from Trond Norbye + o Recalculate pending events properly when reallocating event array on Solaris; from Trond Norbye + o Add Doxygen documentation to header files; from Mark Heily + o Add a evdns_set_transaction_id_fn() function to override the default + transaction ID generation code. + o Add an evutil module (with header evutil.h) to implement our standard cross-platform hacks, on the theory that somebody else would like to use them too. + o Fix signals implementation on windows. + o Fix http module on windows to close sockets properly. + o Make autogen.sh script run correctly on systems where /bin/sh isn't bash. (Patch from Trond Norbye, rewritten by Hagne Mahre and then Hannah Schroeter.) + o Skip calling gettime() in timeout_process if we are not in fact waiting for any events. (Patch from Trond Norbye) + o Make test subdirectory compile under mingw. + o Fix win32 buffer.c behavior so that it is correct for sockets (which do not like ReadFile and WriteFile). + o Make the test.sh script run unit tests for the evpoll method. + o Make the entire evdns.h header enclosed in "extern C" as appropriate. + o Fix implementation of strsep on platforms that lack it + o Fix implementation of getaddrinfo on platforms that lack it; mainly, this will make Windows http.c work better. Original patch by Lubomir Marinov. + o Fix evport implementation: port_disassociate called on unassociated events resulting in bogus errors; more efficient memory management; from Trond Norbye and Prakash Sangappa + o support for hooks on rpc input and output; can be used to implement rpc independent processing such as compression or authentication. + o use a min heap instead of a red-black tree for timeouts; as a result finding the min is a O(1) operation now; from Maxim Yegorushkin + o associate an event base with an rpc pool + o added two additional libraries: libevent_core and libevent_extra in addition to the regular libevent. libevent_core contains only the event core whereas libevent_extra contains dns, http and rpc support + o Begin using libtool's library versioning support correctly. If we don't mess up, this will more or less guarantee binaries linked against old versions of libevent continue working when we make changes to libevent that do not break backward compatibility. + o Fix evhttp.h compilation when TAILQ_ENTRY is not defined. + o Small code cleanups in epoll_dispatch(). + o Increase the maximum number of addresses read from a packet in evdns to 32. + o Remove support for the rtsig method: it hasn't compiled for a while, and nobody seems to miss it very much. Let us know if there's a good reason to put it back in. + o Rename the "class" field in evdns_server_request to dns_question_class, so that it won't break compilation under C++. Use a macro so that old code won't break. Mark the macro as deprecated. + o Fix DNS unit tests so that having a DNS server with broken IPv6 support is no longer cause for aborting the unit tests. + o Make event_base_free() succeed even if there are pending non-internal events on a base. This may still leak memory and fds, but at least it no longer crashes. + o Post-process the config.h file into a new, installed event-config.h file that we can install, and whose macros will be safe to include in header files. + o Remove the long-deprecated acconfig.h file. + o Do not require #include before #include . + o Add new evutil_timer* functions to wrap (or replace) the regular timeval manipulation functions. + o Fix many build issues when using the Microsoft C compiler. + o Remove a bash-ism in autogen.sh + o When calling event_del on a signal, restore the signal handler's previous value rather than setting it to SIG_DFL. Patch from Christopher Layne. + o Make the logic for active events work better with internal events; patch from Christopher Layne. + o We do not need to specially remove a timeout before calling event_del; patch from Christopher Layne. diff --git a/probe-busybox/libevent-2.1.11-stable/ChangeLog-2.0 b/probe-busybox/libevent-2.1.11-stable/ChangeLog-2.0 new file mode 100644 index 00000000..16f36008 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/ChangeLog-2.0 @@ -0,0 +1,1280 @@ +Changes in version 2.0.21-stable (18 Nov 2012) +BUGFIXES: + o ssl: Don't discard SSL read event when timeout and read come close together (576b29f) + o ssl: Stop looping in "consider_reading" if reading is suspended. (f719b8a Joachim Bauch) + o ssl: No need to reserve space if reading is suspended. (1acf2eb Joachim Bauch) + o dns: Avoid a memory-leak on OOM in evdns. (73e85dd, f2bff75 George Danchev) + o build: Use python2 rather than python (0eb0109 Ross Lagerwall) + o build: Compile without warnings on mingw64 (94866c2) + o build: Fix compilation on mingw64 with -DUSE_DEBUG (62bd2c4) + o build: Make rpcgen_wrapper.sh work on systems without a "python2" binary (f3009e4) + o iocp: Close IOCP listener socket on free when LEV_OPT_CLOSE_ON_FREE is set (cb853ea Juan Pablo Fernandez) + o core: Avoid crash when event_pending() called with no event_base set on event (e3cccf3) + o misc: remove stray 'x' so print_err will compile when uncommented (ac35650 Patrick Pelletier) + o tests: Fix renegotiation test to work around openssl 1.0.1 bug (c2f3086) + o tests: Warn when openssl version in unit test mismatches compiled version. (ac009f9) + + +Changes in version 2.0.20-stable (23 Aug 2012) +BUGFIXES: + o core: Make event_pending() threadsafe. (be7a95c Simon Liu) + o win32: avoid crash when waiting forever on zero fds. (160e58b) + o evhttp: Fix a memory leak on error in evhttp_uriencode (11c8b31) + o evbuffer: Avoid possible needless call to writev. Found by coverity. (6a4ec5c) + o evdns: memset sockaddr_in before using it. Found by coverity. (a1a0e67) + o evhttp: Check more setsockopt return values when binding sockets. Found by coverity (a0912e3) + o evdns: Avoid segfault on weird timeout during name lookup. (dc32077 Greg Hazel) + o bufferevent_ssl: Correctly invoke callbacks when a SSL bufferevent reads some and then blocks. (606ac43) + + +PORTABILITY FIXES: + o check for arc4random_buf at runtime, on OS X (bff5f94 Greg Hazel) + o Correctly check for arc4random_buf (fcec3e8 Sebastian Hahn) + o Add explicit AC_PROG_SED to configure.in so all autoconfs will expose $(SED) (ca80ea6) + +BUILD FIXES: + o Add GCC annotations so that the vsprintf functions get checked properly (117e327) + o Fix an unused variable warning on *BSD. (c0720c1) + +UNIT TEST FIXES: + o Fix a couple of memory leaks (found with Valgrind). (3b2529a Ross Lagerwall) + o Remove deadcode in http regression tests. Found by coverity. (5553346) + o Fix possible uninitialized read in dns regression tests. Found by coverity. (2259777) + o Set umask before calling mkstemp in unit tests. Found by coverity (f1ce15d) + o Fix various check-after-dereference issues in unit tests: found by coverity (4f3732d) + o Fix resource leaks in the unit tests; found by coverity (270f279) + o Add some missing null checks to unit tests; found by coverity (f021c3d) + o Avoid more crashes/bad calls in unit tests; found by coverity (3cde5bf) + o Remove unused variable; spotted by coverity (6355b2a) + o Add checks to various return values in unit tests. Found by coverity (b9e7329) + o Move assignment outside tt_assert in ssl unit tests. Appeases coverity. (a2006c0) + + + +Changes in version 2.0.19-stable (3 May 2012) +BUGFIXES (CORE): + o Refactor event_persist_closure: raise and extract some common logic (bec22b4) + o If time has jumped so we'd reschedule a periodic event in the past, schedule it for the future instead (dfd808c) + o If a higher-priority event becomes active, don't continue running events of the current priority. (2bfda40) + +BUGFIXES (SSL): + o Fixed potential double-readcb execution with openssl bufferevents. (4e62cd1 Mark Ellzey) + +BUGFIXES (DNS): + o Cancel a probe request when the server is freed, and ignore cancelled probe callbacks (94d2336 Greg Hazel) + o Remove redundant DNS_ERR_CANCEL check, move comment (46b8060 Greg Hazel) + o When retransmitting a timed-out DNS request, pick a fresh nameserver. (3d9e52a) + +DOCUMENTATION FIXES: + o Fix a typo in the bufferevent documentation (98e9119) + o Add missing ) to changelog; spotted by rransom (4c7ee6b) + o Fix the website URL in the readme (f775521) + +COMPILATION FIXES: + o Fix a compilation error with MSVC 2005 due to use of mode_t (336dcae) + o Configure with gcc older than 2.95 (4a6fd43 Sebastian Hahn) + o Generate event-config.h with a single sed script (30b6f88 Zack Weinberg) + +FORWARD-COMPATIBILITY: + o Backport: provide EVENT_LOG_* names, and deprecate _EVENT_LOG_* (d1a03b2) + +TESTING/DEBUGGING SUPPORT: + o dns-example.c can now take a resolv.conf file on the commandline (6610fa5) + o Make some evdns.c debug logs more verbose (d873d67) + o Work-around a stupid gcov-breaking bug in OSX 10.6 (b3887cd) + + + +Changes in version 2.0.18-stable (22 Mar 2012) +BUGFIXES (core): + o Make uses of open() close-on-exec safe by introducing an internal evutil_open_closeonexec. (d2b5f72 Ross Lagerwall, 03dce42) + +BUGFIXES (kqueue): + o Properly zero the kevent in kq_setup_kevent() (c2c7b39 Sebastian Hahn) + +BUILD FIXES: + o Added OPENSSL_LDFLAGS env variable which is appended to SSL checks. (9278196 Mark Ellzey) + o Changed OPENSSL_LDFLAGS to OPENSSL_LIBADD (2d67b63 Mark Ellzey) + o Don't do clang version detection when disabling some flags (083296b Sebastian Hahn) + +BUGFIXES (dns): + o Stop crashing in evdns when nameserver probes give a weird error (bec5068) + + +Changes in version 2.0.17-stable (10 Feb 2012) + +BUGFIXES (core): + o Be absolutely sure to clear pncalls before leaving event_signal_closure (11f36a5) + o check for sysctl before we use it (358c745 Mike Frysinger) + o Remove bogus casts of socket to int before calling ev_callback (f032516) + o Make evconnlistener work around bug in older Linux when getting nmapped (ecfc720) + o Fix a list corruption bug when using event_reinit() with signals present (6e41cdc) + o Fix a fd leak in event_reinit() (3f18ad1) + o Do a memberwise comparison of threading function tables (c94a5f2 Nate R) + o Use C-style comments in C source files (for compatibility with compilers such as xlc on AIX). (d84d917 Greg Hewgill) + o Avoid crash when freeing event_iocp and using event_set_mem_functions (19715a6) + o In the kqueue backend, do not report EBADF as an EV_READ (5d7bfa1 Nicholas Marriott) + +BUGFIXES (evbuffer and bufferevents): + o Fix behavior of evbuffer_peek(buf,-1,NULL,NULL,0) (c986f23 Zack Weinberg) + o Loop on filtering SSL reads until we are blocked or exhausted. (5b4b812) + +BUGFIXES (evhttp): + o Force strict validation of HTTP version in response. (790f6b3 Catalin Patulea) + +BUGFIXES (evdns): + o evdns: fix a bug in circular-queue implementation (d6094b1) + +BUILD FIXES: + o Fix a silly compilation error with the sun compiler (1927776 Colin Watt) + o Suppress a gcc warning from ignoring fwrite return in http-sample.c (7206e8c) + +DOCUMENTATION FIXES: + o Slightly clarify evbuffer_peek documentation (7bbf6ca) + o Update copyright notices to 2012 (e49e289) + +NEW APIS: + o Backport evhttp_connection_get_bufferevent to Libevent 2.0 (da70fa7 Arno Bakker) + +TESTS AND TEST FIXES: + o Fix a race condition in the dns/bufferevent_connect_hostname test. (cba48c7) + o Add function to check referential integrity of an event_base (27737d5) + o Check event_base correctness at end of each unit test (3312b02) + o Workaround in the unit tests for an apparent epoll bug in Linux 3.2 (dab9187) + o Better workaround for Linux 3.2 edge-triggered epoll bug (9f9e259) + +Changes in version 2.0.16-stable (18 Nov 2011) +BUGFIXES (core): + o More detailed message in case of libevent self-debugging failure. (9e6a4ef Leonid Evdokimov) + o epoll: close fd on alloc fail at initialization (1aee718 Jamie Iles) + o Fix compile warning from saying event2/*.h inside a comment (447b0ba) + o Warn when unable to construct base because of failing make_base_notifiable (4e797f3) + o Don't try to make notifiable event_base when no threading fns are configured (e787413) + +BUGFIXES (evbuffer): + o unit test for remove_buffer bug (90bd620 Greg Hazel) + o Fix an evbuffer crash in evbuffer_remove_buffer() (c37069c) + +BUGFIXES (bufferevent_openssl): + o Refactor amount-to-read calculations in buffervent_ssl consider_reading() (a186e73 Mark Ellzey) + o Move SSL rate-limit enforcement into bytes_to_read() (96c562f) + o Avoid spinning on OpenSSL reads (2aa036f Mark Ellzey) + +BUGFIXES (dns) + o Empty DNS reply with OK status is another way to say NODATA. (21a08d6 Leonid Evdokimov) + +TESTING: + o Tests for 94fba5b and f72e8f6 (d58c15e Leonid Evdokimov) + o Test for commit aff6ba1 (f7841bf Leonid Evdokimov) + o Style and comment tweaks for dns/leak* tests (5e42202) + o improve test to remove at least one buffer from src (7eb52eb Greg Hazel) + +DOCUMENTATION: + o Add note about evhttp_send_reply_end to its doxygen (724bfb5) + o Update copyright dates to 2011. (3c824bd) + o Fix typo in whatsnew-2.0.txt (674bc6a Mansour Moufid) + o Improve win32 behavior of dns-sample.c code (a3f320e Gisle Vanem) + + + +Changes in version 2.0.15-stable (12 Oct 2011) +BUGFIXES (DNS): + o DNS: add ttl for negative answers using RFC 2308 idea. (f72e8f6 Leonid Evdokimov) + o Add DNS_ERR_NODATA error code to handle empty replies. (94fba5b Leonid Evdokimov) + +BUFGIXES (bufferevents and evbuffers): + o Make evbuffer callbacks get the right n_added value after evbuffer_add (1ef1f68 Alex) + o Prefer mmap to sendfile unless a DRAINS_TO_FD flag is set. Allows add_file to work with SSL. (0ba0af9) + +BUGFIXES (event loop): + o When a signal callback is activated to run multiple times, allow event_base_loopbreak to work even before they all have run. (4e8eb6a) + +DOCUMENTATION FIXES: + o Fix docstring in dns.h (2b6eae5 Leonid Evdokimov) + o refer to non-deprecated evdns functions in comments (ba5c27d Greg Hazel) + +BUILD AND TESTING FIXES: + o le-proxy and regress depend on openssl directly (9ae061a Sergey Avseyev) + o Use _SOURCES, not _sources, in sample/Makefile.am (7f82382) + o Fixed compiler warnings for unchecked read/write calls. (c3b62fd Mark Ellzey) + o Make write-checking fixes use tt_fail_perror (2b76847) + o Fix some "value never used" warnings with gcc 4.6.1 (39c0cf7) + + + +Changes in version 2.0.14-stable (31 Aug 2011) +BUGFIXES (bufferevents and evbuffers): + o Propagate errors on the underlying bufferevent to the user. (4a34394 Joachim Bauch) + o Ignore OpenSSL deprecation warnings on OS X (5d1b255 Sebastian Hahn) + o Fix handling of group rate limits under 64 bytes of burst (6d5440e) + o Solaris sendfile: correctly detect amount of data sent (643922e Michael Herf) + o Make rate limiting work with common_timeout logic (5b18f13) + o clear read watermark on underlying bufferevent when creating filtering bev to fix potentially failing fragmented ssl handshakes (54f7e61 Joachim Bauch) + +BUGFIXES (IOCP): + o IOCP: don't launch reads or writes on an unconnected socket (495c227) + o Make IOCP rate-limiting group support stricter and less surprising. (a98da7b) + o Have test-ratelim.c support IOCP (0ff2c5a) + o Make overlapped reads result in evbuffer callbacks getting invoked (6acfbdd) + o Correctly terminate IO on an async bufferevent on bufferevent_free (e6af35d) + +BUGFIXES (other): + o Fix evsig_dealloc memory leak with debugging turned on. (9b724b2 Leonid Evdokimov) + o Fix request_finished memory leak with debugging turned on. (aff6ba1 Leonid Evdokimov) + +BUILD AND TESTING FIXES: + o Allow OS-neutral builds for platforms where some versions have arc4random_buf (b442302 Mitchell Livingston) + o Try to fix 'make distcheck' errors when building out-of-tree (04656ea Dave Hart) + o Clean up some problems identified by Coverity. (7c11e51 Harlan Stenn) + + +Changes in version 2.0.13-stable (18 Jul 2011) +BUGFIXES + o Avoid race-condition when initializing global locks (b683cae) + o Fix bug in SSL bufferevents backed by a bev with a write high-watermarks (e050703 Joachim Bauch) + o Speed up invoke_callbacks on evbuffers when there are no callbacks (f87f568 Mark Ellzey) + o Avoid a segfault when all methods are disabled or broken (27ce38b) + o Fix incorrect results from evbuffer_search_eol(EOL_LF) (4461f1a) + o Add some missing checks for mm_calloc failures (89d5e09) + o Replace an assertion for event_base_free(NULL) with a check-and-warn (09fe97d) + o Report kqueue ebadf, epipe, and eperm as EV_READ events (1fd34ab) + o Check if the `evhttp_new_object' function in `http.c' returns NULL. (446cc7a Mansour Moufid) + o Use the correct printf args when formatting size_t (3203f88) + o Complain if the caller tries to change threading cbs after setting them (cb6ecee) + +DOCUMENTATION FIXES AND IMPROVEMENTS + o Revise the event/evbuffer/bufferevent doxygen for clarity and accuracy (2888fac) + o Update Doxyfile to produce more useful output (aea0555) + +TEST FIXES + o Fix up test_evutil_snprintf (caf695a) + o Fix tinytest invocation from windows shell (57def34 Ed Day) + +BUILD FIXES + o Use AM_CPPFLAGS in sample/Makefile.am, not AM_CFLAGS (4a5c82d) + o Fix select.c compilation on systems with no NFDBITS (49d1136) + o Fix a few warnings on OpenBSD (8ee9f9c Nicholas Marriott) + o Don't break when building tests from git without python installed (b031adf) + o Don't install event_rpcgen.py when --disable-libevent-install is used (e23cda3 Harlan Stenn) + o Fix AIX build issue with TAILQ_FOREACH definition (e934096) + + +Changes in version 2.0.12-stable (4 Jun 2011) +BUGFIXES + o Fix a warn-and-fail bug in kqueue by providing kevent() room to report errors (28317a0) + o Fix an assert-inducing fencepost bug in the select backend (d90149d) + o Fix failing http assertion introducd in commit 0d6622e (0848814 Kevin Ko) + o Fix a bug that prevented us from configuring IPv6 nameservers. (74760f1) + o Prevent size_t overflow in evhttp_htmlescape. (06c51cd Mansour Moufid) + o Added several checks for under/overflow conditions in evhttp_handle_chunked_read (a279272 Mark Ellzey) + o Added overflow checks in evhttp_read_body and evhttp_get_body (84560fc Mark Ellzey) + +DOCUMENTATION: + o Add missing words to EVLOOP_NONBLOCK documentation (9556a7d) + +BUILD FIXES + o libssl depends on libcrypto, not the other way around. (274dd03 Peter Rosin) + o Libtool brings in the dependencies of libevent_openssl.la automatically (7b819f2 Peter Rosin) + o Use OPENSSL_LIBS in Makefile.am (292092e Sebastian Hahn) + o Move the win32 detection in configure.in (ceb03b9 Sebastian Hahn) + o Correctly detect openssl on windows (6619385 Sebastian Hahn) + o Fix a compile warning with zlib 1.2.4 and 1.2.5 (5786b91 Sebastian Hahn) + o Fix compilation with GCC 2, which had no __builtin_expect (09d39a1 Dave Hart) + o Fix new warnings from GCC 4.6 (06a714f) + o Link with -lshell32 and -ladvapi32 on Win32. (86090ee Peter Rosin) + o Make the tests build when OpenSSL is not available. (07c41be Peter Rosin) + o Bring in the compile script from automake, if needed. (f3c7a4c Peter Rosin) + o MSVC does not provide S_ISDIR, so provide it manually. (70be7d1 Peter Rosin) + o unistd.h and sys/time.h might not exist. (fe93022 Peter Rosin) + o Make sure TINYTEST_LOCAL is defined when building tinytest.c (8fa030c Peter Rosin) + o Fix winsock2.h #include issues with MSVC (3d768dc Peter Rosin) + o Use evutil_gettimeofday instead of relying on the system gettimeofday. (0de87fe Peter Rosin) + o Always use evutil_snprintf, even if OS provides it (d1b2d11 Sebastian Hahn) + o InitializeCriticalSectionAndSpinCount requires _WIN32_WINNT >= 0x0403. (816115a Peter Rosin) + o cygwin: make it possible to build DLLs (d54d3fc) + + + +Changes in version 2.0.11-stable (27 Apr 2011) + [Autogenerated from the Git log, sorted and cleaned by hand.] +BUGFIXES: + o Fix evport handling of POLLHUP and POLLERR (b42ce4b) + o Fix compilation on Windows with NDEBUG (cb8059d) + o Check for POLLERR, POLLHUP and POLLNVAL for Solaris event ports (0144886 Trond Norbye) + o Detect and handle more allocation failures. (666b096 Jardel Weyrich) + o Use event_err() only if the failure is truly unrecoverable. (3f8d22a Jardel Weyrich) + o Handle resize failures in the select backend better. (83e805a) + o Correctly free selectop fields when select_resize fails in select_init (0c0ec0b) + o Make --enable-gcc-warnings a no-op if not using gcc (3267703) + o Fix a type error in our (unused) arc4random_stir() (f736198) + o Correctly detect and stop non-chunked http requests when the body is too long (63a715e) + o Have event_base_gettimeofday_cached() always return wall-clock time (a459ef7) + o Workaround for http crash bug 3078187 (5dc5662 Tomash Brechko) + o Fix incorrect assertions and possible use-after-free in evrpc_free() (4b8f02f Christophe Fillot) + o Reset outgoing http connection when read data in idle state. (272823f Tomash Brechko) + o Fix subtle recursion in evhttp_connection_cb_cleanup(). (218cf19 Tomash Brechko) + o Fix the case when failed evhttp_make_request() leaved request in the queue. (0d6622e Tomash Brechko) + o Fix a crash bug in evdns server circular list code (00e91b3) + o Handle calloc failure in evdns. (Found by Dave Hart) (364291e) + o Fix a memory leak on win32 socket->event map. (b4f89f0) + o Add a forgotten NULL check to evhttp_parse_headers (12311ff Sebastian Hahn) + o Fix possible NULL-deref in evdns_cancel_request (5208544 Sebastian Hahn) + +PORTABILITY: + o Fall back to sscanf if we have no other way to implement strtoll (453317b) + o Build correctly on platforms without sockaddr_storage (9184563) + o Try to build correctly on platforms with no IPv6 support (713c254) + o Build on systems without AI_PASSIVE (cb92113) + o Fix http unit test on non-windows platforms without getaddrinfo (6092f12) + o Do not check for gethostbyname_r versions if we have getaddrinfo (c1260b0) + o Include arpa/inet.h as needed on HPUX (10c834c Harlan Stenn) + o Include util-internal.h as needed to build on platforms with no sockaddr_storage (bbf5515 Harlan Stenn) + o Check for getservbyname even if not on win32. (af08a94 Harlan Stenn) + o Add -D_OSF_SOURCE to fix hpux builds (0b33479 Harlan Stenn) + o Check for allocation failures in apply_socktype_protocol_hack (637d17a) + o Fix the check for multicast or broadcast addresses in evutil_check_interfaces (1a21d7b) + o Avoid a free(NULL) if out-of-memory in evdns_getaddrinfo. Found by Dave Hart (3417f68) + +DEFENSIVE PROGRAMMING: + o Add compile-time check for AF_UNSPEC==PF_UNSPEC (3c8f4e7) + +BUGS IN TESTS: + o Fix test.sh output on solaris (b4f89b6 Dave Hart) + o Make test-eof fail with a timeout if we never get an eof. (05a2c22 Harlan Stenn) + o Use %s with printf in test.sh (039b9bd) + o Add an assert to appease clang's static analyzer (b0ff7eb Sebastian Hahn) + o Add a forgotten return value check in the unit tests (3819b62 Sebastian Hahn) + o Actually send NULL request in http_bad_request_test (b693c32 Sebastian Hahn) + o add some (void) casts for unused variables (65707d7 Sebastian Hahn) + o Refactor test_getaddrinfo_async_cancel_stress() (48c44a6 Sebastian Hahn) + o Be nice and "handle" error return values in sample code (4bac793 Sebastian Hahn) + o Check return value of evbuffer_add_cb in tests (93a1abb Sebastian Hahn) + o Remote some dead code from dns-example.c (744c745 Sebastian Hahn) + o Zero a struct sockaddr_in before using it (646f9fe Sebastian Hahn) + +BUILD FIXES: + o Fix warnings about AC_LANG_PROGRAM usage (f663112 Sebastian Hahn) + o Skip check for zlib if we have no zlib.h (a317c06 Harlan Stenn) + o Fix autoconf bracket issues; make check for getaddrinfo include netdb.h (833e5e9 Harlan Stenn) + o Correct an AM_CFLAGS to an AM_CPPFLAGS in test/Makefile.am (9c469db Dave Hart) + o Fix make distcheck & installation of libevent 1 headers (b5a1f9f Dave Hart) + o Fix compilation under LLVM/clang with --enable-gcc-warnings (ad9ff58 Sebastian Hahn) + +FEATURES: + o Make URI parser able to tolerate nonconformant URIs. (95060b5) + +DOCUMENTATION: + o Clarify event_set_mem_functions doc (926f816) + o Correct evhttp_del_accept_socket documentation on whether socket is closed (f665924) + o fix spelling mistake in whatsnew-2.0.txt (deb2f73) + o Fix sample/http-server ipv6 fixes (eb692be) + o Comment internal headers used in sample code. (4eb281c) + o Be explicit about how long event loops run in event.h documentation (f95bafb) + o Add comment to configure.in to explain gc-sections test logic (c621359) + o Fix a couple of memory leaks in samples/http-server.c. Found by Dave Hart. (2e9f665) + +BUILD IMPROVEMENTS: + o Use the gcc -ffunction-segments feature to allow gc when linking with static libevent (0965c56 Dave Hart) + o Add configure options to disable installation, regression tests (49e9bb7 Dave Hart) + + + +Changes in version 2.0.10-stable (16 Dec 2010) + [Autogenerated from the Git log, sorted and cleaned by hand.] +BUGFIXES + o Minor fix for IOCP shutdown handling fix (2599b2d Kelly Brock) + o Correctly notify the main thread when activating an event from a subthread (5beeec9) + o Reject overlong http requests early when Expect:100-continue is set (d23839f Constantine Verutin) + o EVUTIL_ASSERT: Use sizeof() to avoid "unused variable" warnings with -DNDEBUG. (b63ab17 Evan Jones) + +CODE CLEANUPS + o bufferevent-internal.h: Use the new event2/util.h header, not evutil.h (ef5e65a Evan Jones) + o Use relative includes instead of system includes consistently. (fbe64f2 Evan Jones) + o Make whitespace more consistent + +TESTING + o tests: Use new event2 headers instead of old compatibility headers. (4f33209 Evan Jones) + +DOCUMENTATION + o Document that the cpu_hint is only used on Windows with IOCP for now (57689c4) + o Add stuff to "whats new in 2.0" based on reading include changes since August. (18adc3f) + + +Changes in 2.0.9-rc (30 Nov 2010): + [Autogenerated from the Git log, sorted and cleaned by hand.] +NEW AND MODIFIED APIs + o Add a function to change a listener's callback. (46ee061) + o Make evbuffer_add_file take ev_off_t, not off_t (ac7e52d) + o Make rate-limits go up to SIZE_MAX/EV_SSIZE_MAX, not just INT32_MAX (2cbb1a1) + o Add a bufferevent_get_base function (aab49b6) + +MAJOR BUGFIXES + o Disable changelist for epoll by default because of Linux dup() bug; add an option and/or an envvar to reenable it for speed. (9531763) + o Fix a 100%-CPU bug where an SSL connection would sometimes never stop trying to write (1213d3d) + o Fix a nasty bug related to use of dup() with epoll on Linux (c281aba) + o Fix bugs in posix thread-id calculation when sizeof(pthread_t) != sizeof(long) (fbaf077) + o Fix some ints to evutil_socket_t; make tests pass on win64. (f817bfa Dimitre Piskyulev) + o Set _EVENT_SIZEOF_VOID_P correctly on win32 and win64 (1ae82cd Dimitre Piskyulev) + o Avoid double-invocation of user callback with EVUTIL_EAI_CANCEL (abf01ed) + o Set SO_UPDATE_ACCEPT_CONTEXT on sockets from AcceptEx so that shutdown() can work (52aa419) + o When closing a filtering bufferevent, clear callbacks on the underlying bufferevent (fc7b1b0) + +NEW AND MODIFIED HTTP APIs + o Add evhttp_parse_query_str to be used with evhttp_uri_parse. (2075fbc) + o Add evhttp_response_code to remove one more reason to include http_struct.h (22e0a9b) + o Define enumerators for all HTTP methods, including PATCH from RFC5789 (75a7341 Felix Nawothnig) + o Functions to actually use evhttp_bound_socket with/as evconnlistener. (006efa7) + o Add evhttp_request_get_command so code can tell GET from POST without peeking at the struct. (49f4bf7) + o Introduce absolute URI parsing helpers. (86dd720 Pavel Plesov) + o Revise evhttp_uri_parse implementation to handle more of RFC3986 (eaa5f1d) + o Add evhttp_connection_get_base() to get the event_base from an http connection (cd00079) + o Let evhttp_parse_query return -1 on failure (b1756d0) + o New evhttp_uri(encode|decode) functions to handle + and NUL characters right (a8148ce) + o Add evhttp_response_code to remove one more reason to include http_struct.h (22e0a9b) + o Tweak interface for allowed methods (f5b391e) + o Add evhttp server alias interface, correct flagging of proxy requests. (aab8c38 Christopher Davis) + +HTTP BUGFIXES + o Add some comments to http.c and make a few functions static. (90b3ed5) + o Fix Content-Length when trying send more than 100GB of data (!) on an evhttp. (525da3e) + o Fix a bug where we would read too much data in HTTP bodies or requests. (58a1cc6) + o Correctly count req->body_size on http usage without Content-Length (8e342e5) + o Avoid missed-request bug when entire http request arrives before data is flushed (74c0e86) + o reset "chunked" flag when sending non-chunked reply (aa5f55f Joachim Bauch) + o evhttp_encode_uri encodes all reserved characters, including !$'()*+,/:=@ (2e63a60) + o Replace exact-version checks for HTTP/1.1 with >= or < checks (647e094) + o evhttp: Return 501 when we get an unrecognized method, not 400. (536311a) + o Don't disable reading from the HTTP connection after sending the request to be notified of connection-close in time (c76640b Felix Nawothnig) + o Never call evhttp_readcb while writing. (0512487) + o Try to fix an assertion failure related to close detection (0faaa39) + o Correctly detect timeouts during http connects (04861d5) + o Preliminary support for Continue expectation in evhttp. (fa9305f Christopher Davis) + +OTHER BUGFIXES + o Correct logic for realigning a chain in evbuffer_add (e4f34e8) + o Fix a minor syntax error that most compilers didn't care about (e56ff65) + o Fix some uses of int for socket in regress (5d389dc) + o Check return value for ioctlsocket on win32 (f5ad31c Trond Norbye) + o Fix som event_warns that should have been event_warnx (19c71e7) + o Fix signal handler types for win64. (b81217f) + o Try to clear up more size_t vs int/long issues. (598d133) + o Make sure IOCP evconnlistener uses virtual events. (7b40a00 Christopher Davis) + o Don't free evdns_request handles until after the callback is invoked (9ed30de) + o Fix some more cancel-related bugs in getaddrinfo_async (c7cfbcf) + o Make evdns_getaddrinfo_cancel threadsafe (d51b2fc) + o Only clear underlying callbacks when the user hasn't reset them. (1ac5b23) + o Fix bug in bufferevent_connect on an openssl bufferevent that already had an fd (4f228a1) + o Resolve an evport bug in the thread/forking test (3a67d0b) + o Make sure the CLOEXEC flag is set on fds we open for base notification (3ab578f) + o Fix IRIX build. sa_family collides with a #define in sys/socket.h on IRIX. (e874982 Kevin Bowling) + o If not WIN32, include in event2/util.h. (1cd45e5 Kevin Bowling) + o Fix some C99-style comments to work with the xlC compiler. (c2e5e22 Kevin Bowling) + o Add some checks since lack of TAILQ_FOREACH doesn't imply lack of FIRST, END, NEXT, or INSERT_BEFORE. Quiet some warnings in XL C. (c4dc335 Kevin Bowling) + o Reworked AIX __ss_family workaround to use AC_STRUCT_MEMBER. (2e2a3d7 Kevin Bowling) + o Take select from when testing in autoconf. AIX build fix. (a3a9f6b Kevin Bowling) + o Fix snprintf related failures on IRIX. (3239073 Kevin Bowling) + o Remove _event_initialized(); make event_initialized() a function(); make it consistent on windows and non-windows (652024b) + o Do not let EVLOOP_ONCE exit the loop until all deferred callbacks have run (2d5e1bd) + o Make EVLOOP_ONCE ignore internal events (0617a81) + o Possible crash fix when freeing an underlying bufferevent of an openssl bufferevent (29f7623) + +HTTP CLEANUPS + o Stop using Libevent-1 headers in regress_http (1f507d7) + o Modernize header usage in bench_http.c (e587069) + o fix signed/unsigned warnings in http.c (74a91e5) + o Update the HTTP regression tests to use Libevent2 apis for non-http stuff (d9ffa89) + o Start porting http tests to not use legacy interfaces (8505a74) + o Convert the rest of the http tests to be non-legacy unit tests. (9bb8239) + o Rename the confusing "base" static variable in regress_http.c (353402a) + o Stop accessing http request struct directly from in the unit tests. (0b137f4) + o Refactor http version parsing into a single function (a38140b) + +TESTING + o Improvements to tinytest_macros.h (ad923a1) + o Add a huge pile of tests for the new URI functions, and make them pass. (a5a76e6) + o Unit tests for evhttp_uri_set* (bc98f5e) + o Increase the skew tolerance to 2 seconds in thread/deferred_cb_skew (f806476 Christopher Davis) + o Reorder backends in test.sh to match preference order in event.c (ece974f) + o Add a stress test for getaddrinfo_cancel (da1bf52) + o Units test for unexpected evhttp methods. (75e3320) + +DOCUMENTATION + o Document behavior of URI parsing more thoroughly. (3a33462) + o Document that two bufferevent functions only work on socket bufferevents (70e1b60) + o add a requested docstring for event_rpcgen.CommandLine.__init__ (f1250eb) + o Fix a mistake in http documentation found by Julien Blache (229714d) + o Add a basic example of how to write a static HTTP server. (4e794d5) + o Document event_get_assignment (88be27d) + o Note that reentrant calls to libevent from logging cbs may fail badly (e431bcd) + o Clarify EVLOOP_* documentation to be more precise. (057a514) + +CLEANUPS + o Simplify the logic for choosing EPOLL_CTL_ADD vs EPOLL_CTL_MOD (2c66983) + o Rename "size" variables in win32select that were really fd counts. (b6a158c) + o Fix even more win64 warnings (7484df6) + o Fix even more win64 warnings: buffer, event_tagging, http, evdns, evrpc (545a611) + o Fix more wn64 warnings. (34b84b9 Christopher Davis) + o Use the label_len local variable in evdns instead of recalculating it over and over (ba01456) + o Fix some irix compilation warnings spotted by Kevin Bowling (7bcace2) + + + +Changes in 2.0.8-rc (14 Oct 2010): + [Autogenerated from the Git log, sorted and cleaned by hand.] +NEW APIS + o Add error callback to evconnlistener (c4be8d8 Simon Perreault) + o Add a LEV_OPT_THREADSAFE option for threadsafe evconnlisteners (127d4f2) + +CHANGED BEHAVIOR + o Correct logic on disabling underlying bufferevents when disabling a filter (ac27eb8) + +BUGFIXES + o Obey enabled status when unsuspending (040a019 Simon Perreault) + o Warn when using the error-prone EV_SIGNAL interface in an error-prone way. Also, fix a couple of race conditions in signal.c (720bd93) + O Make default signal backend fully threadsafe (95a7d41) + o Put internal events at highest priority (90651b3) + o Fix warnings in the main codebase flagged by -Wsigned-compare (9c8db0, 5e4bafb, 5c214a, 6be589a, e06f514) + o Fix compile in kqueue.c (b395392 Sebastian Hahn) + o Do not search outside of the system directory for windows DLLs (d49b5e3) + o Fix a spurious-call bug on epoll.c (0faaee0) + o Send a shutdown(SHUT_WR) before closing an http connection (e0fd870 Christopher Davis) + o Fix warnings on mingw with gcc 4.5 (5b7a370) + o Fix an EINVAL on evbuffer_write_iovec on OpenSolaris. (fdc640b) + o Fix allocation error for IOCP listeners. Probably harmless, since struct event is big (481ef92) + o Make iocp/listener/error work; don't accept again if lev is disabled. (62b429a Christopher Davis) + o Handle rate-limiting for reading on OpenSSL bufferevents correctly. (819b171) + o Fix serious bugs in per-bufferevent rate-limiting code (34d64f8) + o Avoid spurious reads from just-created open openssl bufferevents (223ee40) + o Fix a case where an ssl bufferevent with CLOSE_ON_FREE didn't close its fd (93bb7d8) + o The corrected bufferevent filter semantics let us fix our openssl tests (34331e4) + +TESTING + o Make SSL tests cover enabling/disabling EV_READ. (a5ce9ad) + o Bump to the latest version of tinytest (f0bd83e) + o Unit tests for listener error callbacks (045eef4) + o New unit test for ssl bufferevents starting with connected SSLs. (02f6259) + +DEBUGGABILITY + o Make debugging output for epoll backend more comprehensive (ec2b05e) + o Make event.c debugging messages report fds (e119899) + o Make the --enable-gcc-warnings option include signed comparison warnings (d3b096c) + +DEADCODE REMOVAL + o Remove the now-useless evsig_caught and evsig_process (4858b79) + o Remove event_base.evsigbase; nothing used it. (38d0960) + + + +Changes in 2.0.7-rc (9 Sep 2010): + [Autogenerated from the Git log, sorted and cleaned by hand.] +NEW APIS + o Expose a evdns_base_nameserver_sockaddr_add() function to add a nameserver by sockaddr (1952143) + o Add event_config_set_num_cpus_hint() for tuning win32 IOCP thread pools, etc. (2447fe8 Christopher Davis) + +BUGFIXES + o Fix a nasty dangling-event bug when using rate-limiting groups (0bffe43) + o Clean up syntax on TAILQ_ENTRY() usage to build correctly with recent MSVC (60433a0 Gilad Benjamini) + o Make definition of WIN32_LEAN_AND_MEAN in event.h conditional (3920172 Gilad Benjamini) + o Correctly detect failure to delete bufferevent read-timeout event (da6e7cd) + o Set close-on-exec bit for filedescriptors created by dns subsystem (d0b8843) + o Fix kqueue correctness test on x84_64 (6123d12) + o Detect events with no ev_base; warn instead of crashing (f1074b7) + o Fix an issue with forking and signal socketpairs in select/poll backends (d61b2f3) + o Stop using global arrays to implement the EVUTIL_ctype functions (1fdec20) + o On windows, make lock/thread function tables static (5de2bcb) + o Close th_notify_fds and open a new pair on reinit (495ed66) + o Declare signal handler function as "__cdecl" on Windows (f0056d0) + o Use the _func() replacements for open, fstat, etc in evutil.c on win32 (e50c0fc) + o Only process up to MAX_DEFERRED deferred_cbs at a time (17a14f1 Christopher Davis) + +THREADING BUGFIXES + o Avoid deadlock when activating signals (970e6ad) + o Add a condition variable backend, with implementations for pthreads and win32 (d4977b5) + o Use conditions instead of current_event_lock to fix a deadlock (e0972c2) + o Fix logic error in win32 TRY_LOCK that caused problems with rate-limiting (4c32b9d) + o Avoid needlessly calling evthread_notify_base() when the loop is not running (c7a06bf) + o Minimize calls to base_notify implementation functions, thereby avoiding needless syscalls (4632b78) + +IOCP BUGFIXES + o IOCP-related evbuffer fixes (03afa20 Christopher Davis) + o Stop IOCP when freeing the event_base (d844242 Christopher Davis) + o Some IOCP bufferevent tweaks (76f7e7a Christopher Davis) + +TESTS + o Make the regress_pthread.c tests work on windows with current test APIs (d74ae38) + o Add a unit test for conditions (5fb1095) + o Allow more than one copy of regression tests to run at once (a97320a) + o Fix event_del(0) instance in bench.c (b0f284c Shuo Chen) + o Fix a few memory leaks in the tests (1115366) + o IOCP-related unit test tweaks (499452f Christopher Davis) + o Improve testing of when thread-notification occurs (ce85280) + +BUILD AND DISTRIBUTION + o Add pkgconfig files for libevent_{openssl,pthreads} (ebcb1f0) + o Change include order in Makefile.nmake (4022b28) + o Make include/event2/event-config.h not included in source dist (a4af9be) + o Honor NDEBUG; build without warnings with NDEBUG; make NDEBUG always-off in unit test code (743f866) + o Declare evkeyvalq and event_list even if event_struct.h comes before sys/queue.h (d3ceca8) + o Move evkeyvalq into a separate header for evhttp_parse_query users (ca9048f) + o Prefer autoreconf -ivf to manual autogen.sh (7ea8e89) + +CLEANUP + o Completely remove the (mostly-removed) obsolete thread functions (3808168) + o Rename regress_pthread.c to regress_thread.c (041989f) + o Make defer-internal.h use lock macros, not direct calls to lock fns (5218d2a) + +DOCUMENTATION + o Document that DNS_NO_SEARCH is an obsolete alias for DNS_QUERY_NO_SEARCH (33200e7) + o Update the whatsnew-2.0.txt document (4991669) + + + +Changes in 2.0.6-rc (6 Aug 2010): + [Autogenerated from the Git log, sorted by hand.] +DOCUMENTATION + o Document a change in the semantics of event_get_struct_event_size() (e21f5d1) + o Add a comment to describe our plan for library versioning (9659ece) + o Fix sentence fragment in docs for event_get_struct_event_size() (7b259b6) + +NEW FEATURES AND INTERFACE CHANGES + o Remove the obsolete evthread interfaces (c5bab56) + o Let evhttp_send_error infer the right error reasons (3990669) + o Add a function to retrieve the other side of a bufferevent pair (17a8e2d) + o Add bufferevent_lock()/bufferevent_unlock() (215e629) + o Stop asserting when asked for a (unsupported) TCP dns port. Just return NULL. (7e87a59) + o Replace (unused,always 0) is_tcp argument to evdns_add_server_port*() with flags (e1c1167) + o Constify a couple of arguments to evdns_server_request_add_*_reply (cc2379d) + o Add an interface to expose min_share in ratelimiting groups (6ae53d6) + +BUGFIXES + o Avoid event_del on uninitialized event in event_base_free (6d19510) + o Add some missing includes to fix Linux build again (75701e8) + o Avoid close of uninitialized socket in evbuffer unit test (bda21e7) + o Correctly recognize .255 addresses as link-local when looking for interfaces (8c3452b) + o If no evdns request can be launched, return NULL, not a handle (b14f151) + o Use generic win32 interfaces, not ASCII-only ones, where possible. (899b0a3) + o Fix the default HTTP error template (06bd056 Felix Nawothnig) + o Close the file in evutil_read_file whether there's an error or not. (0798dd1 Pierre Phaneuf) + o Fix possible nullptr dereference in evhttp_send_reply_end() (29b2e23 Felix Nawothnig) + o never let bufferevent_rlim functions return negative (0859870) + o Make sample/hello_world work on windows (d89fdba) + o Fix a deadlock related to event-base notification. Diagnosed by Zhou Li, Avi Bab, and Scott Lamb. (17522d2) + o Possible fix to 100% cpu usage with epoll and openssl (cf249e7 Mike Smellie) + o Don't race when calling event_active/event_add on a running signal event (fc5e0a2) + o Suppress a spurious EPERM warning in epoll.c (e73cbde) + o Fix wrong size calculation of iovec buffers when exact=1 (65abdc2 niks) + o Change bufferevent_openssl::do_write so it doesn't call SSL_write with a 0 length buffer (c991317 Mike Smellie) + o Fixed compilation of sample/le-proxy.c on win32 (13b912e Trond Norbye) + o Fix rate-limit calculation on openssl bufferevents. (009f300) + o Remember to initialize timeout events for bufferevent_async (de1f5d6 Christopher Davis) + +BUILD AND DISTRIBUTION CHANGES + o Test the unlocked-deferred callback case of bufferevents (dfb75ab) + o Remove the now-unusable EVTHREAD_LOCK/UNLOCK constants (fdfc3fc) + o Use -Wlogical-op on gcc 4.5 or higher (d14bb92) + o Add the libtool-generated /m4/* stuff to .gitignore (c21c663) + o Remove some automake-generated files from version control. (9b14911) + o Have autogen.sh pass --force-missing to automake (8a44062) + o Set library version for libevent_pthreads correctly (b2d7440) + o Really only add libevent_core.la to LIBADD on mingw (1425003 Sebastian Hahn) + o Build more cleanly with NetBSDs that dislike toupper(char) (42a8c71) + o Fix unit tests with -DUSE_DEBUG enabled (28f31a4) + o Fix evdns build with -DUNICODE (5fa30d2) + o Move event-config.h to include/event2 (ec347b9) + +TESTING + o Add options to test-ratelim.c to check its results (2b44dcc) + o Make test-ratelim clean up after itself better. (b5bfc44) + o Remove the now-obsolete setup_test() and cleanup_test() functions (e73f1d7) + o Remove all non-error prints from test/regress.c (8bc1e3d) + o Make test.sh exit with nonzero status if tests fail (faf2a04) + o Have the unit tests report errors from test.sh (3689bd2) + o Fix logic in correcting high values from FIONREAD (3467f2f) + o Add test for behavior on remote socket close (44d57ee) + o Unit test for event_get_struct_event_size() (7510aac) + o Make test/test.sh call test-changelist (7c92691) + o Fix badly-behaved subtest of dns/bufferevent_connect_hostname (840a72f Joachim Bauch) + o Add option to test-ratelim to test min_share (42f6b62) + o Fix an assertion bug in test-ratelim (b2c6202) + o Make tests quieter on local dns resolver failure (e996b3d) + o Increase the tolerance in our unit tests for sloppy clocks. (170ffd2) + o Use AF_INET socketpair to test sendfile on Solaris (9b60209) + o Make test-changelist count cpu usage right on win32 (ea1ea3d) + +INTERNALS, PERFORMANCE, AND CODE CLEANUPS + o Mark the event_err() functions as __attribute__((noreturn)) (33bbbed) + o Do not check that event_base is set in EVBASE_ACQUIRE_LOCK (218a3c3) + o Replace (safe) use of strcpy with memcpy to appease OpenBSD (caca2f4) + o Remove some dead assignments (47c5dfb) + o Fix a pedantic gcc 4.4 warning in event2/event.h (276e7ee) + o Drain th_notify_fd[0] more bytes at a time. (a5bc15b) + o Tidy up the code in evthread_make_base_notifiable a little (61e1eee) + o Pass flags to fcntl(F_SETFL) and fcntl(F_SETFD) as int, not long (7c2dea1) + o Remove unused variables in test/test-changelist.c (b00d4c0) + o Fix whitespace. (cb927a5) + o Improve error message for failed epoll to make debugging easier. (9e725f7) + o Turn our socketpair() replacement into its own function (57b30cd) + + + +Changes in 2.0.5-beta (10 May 2010): + [Autogenerated from the Git log, sorted by hand.] +DOCUMENTATION + o Update all our copyright notices to say "2010" (17efc1c) + o Add Christopher Clark and Maxim Yegorushkin to the LICENSE file (38b7b57) + o Clarify Christopher Clark's status as writer of original ht code. (78772c3) + o Try to comment some of the event code more (cdd4c49) + o Add a few more evmap/changelist comments (c247adc) + o Add a comment to explain why evdns_request is now separte from request (ceefbe8) + o Document evutil_secure_rng_init() and evutil_secure_rng_add_bytes() (a5bf43a) + o Stop distributing and installing manpages: they were too inaccurate (7731ec8) + +NEW FEATURES AND INTERFACE CHANGES + o Remove signal_assign() and signal_new() macros. (2fac0f7) + o Make evdns use the regular logging system by default (b2f2be6) + o Allow evbuffer_read() to split across more than 2 iovecs (e470ad3) + o Functions to manipulate existing rate limiting groups. (ee41aca) + o Functions to track the total bytes sent over a rate limit group. (fb366c1) + o Detect and refuse reentrant event_base_loop() calls (b557b17) + o Limit the maximum number of events on each socket to 65535 (819f949) + o Add evbuffer_copyout to copy data from an evbuffer without draining (eb86c8c) + o Expose the request and reply members of rpc_req_generic() (07edf78 Shuo Chen) + o Add void* arguments to request_new and reply_new evrpc hooks (755fbf1 Shuo Chen) + o Seed the RNG using sysctl() as well as /dev/urandom (71fc3eb) + o Make evutil_secure_rng_init() work even with builtin arc4random (f980716) + o Report DNS error when lookup fails during bufferevent_socket_connect_hostname. (0ef4070 Christopher Davis) + o Release locks on bufferevents while executing callbacks (a5208fe Joachim Bauch) o Make debug mode catch mixed ET and non-ET events on an fd (cb67074) + o Catch attempts to enable debug_mode too late (9ecf0d4) + o Refuse null keys in evhttp_parse_query() (953e229 Frank Denis) + +BUGFIXES + o Avoid a spurious close(-1) on Linux (70a44b6) + o Do not close(-1) when freeing an uninitialized socket bufferevent (b34abf3) + o Free evdns_base->req_heads on evdns_base_free (859af67) + o Avoid an (untriggerable so far) crash bug in bufferevent_free() (0cf1431) + o Set mem_offset for every bufferevent type (657d1b6) + o Fix infrequent memory leak in bufferevent_init_common(). (8398641 Jardel Weyrich) + o Make evutil_signal_active() match declaration. (e1e703d Patrick Galbraith) + o Fix minheap code to use replacement malloc functions (a527618) + o Fix a free(NULL) in minheap-internal.h (6f20492) + o Fix critical bug in evbuffer_write when writev is not available (cda56ab) + o Make the no_iovecs case of write_atmost compile (8e227b0) + o Fix a memory leak when appending/prepending to a buffer with unused space. (45068a3) + o Clean up a mistake in pointer manipulation in evbuffer_remove (28bfed4 Christopher Davis) + o Always round up when there's a fractional number of msecs. (8f9e60c Christopher Davis) + o Fix compiler warnings under WIN32 (d469c50 Giuseppe Scrivano) + o Clean up properly when adding a signal handler fails. (b84b598 Gilad Benjamini) o Ensure that evdns_request is a persistent handle. (15bb82d Christopher Davis) + o Free search state when finished searching to avoid an infinite loop. (a625840 Christopher Davis) + o Assert for valid requests as necessary. (67072f3 Christopher Davis) + o do not leak the request object on persistent connections (9d8edf2) + o Make evdns logging threadsafe (b1c7950) + o Fix a couple of bugs in the BSD sysctl arc4seed logic (a47a4b7) + o Remove one last bug in last_with_datap logic. Found with valgrind (d49b92a) + o fix a leak when unpausing evrpc requests (94ee125) + o Fix a memory leak when unmarshalling RPC object arrays (f6ab2a2) + o Fix compilation when openssl support is disabled (40c301b) + o Allow empty reason line in HTTP status (739e688 Pierre Phaneuf) + o Fix a compile warning introduced in 739e688 (bd1ed5f Sebastian Hahn) + o Fix nonstandard TAILQ_FOREACH_REVERSE() definition (71afc52 Frank Denis) + o Try /proc on Linux as entropy fallback; use sysctl as last resort (20fda29) + o Fix symbol conflict between mm_*() macros and libmm (99e50e9) + o Fix some crazy macro mistakes in arc4random.c (90d4225) + o Make evbuffer_add_file() work on windows (dcdae6b) + o Fix unused-variable warning when building with threads disabled (ad811cd) + o Numerous opensolaris compilation fixes (c44de06) + o Fix getaddrinfo with protocol unset on Solaris 9. Found by Dagobert Michelsen (2cf2a28) + o Fix another nasty solaris getaddrinfo() behavior (3557071) + o Define _REENTRANT as needed on Solaris, elsewhere (c1cd32a) + o Fix some autoconf issues on OpenBSD (7c519df) + +BUILD AND DISTRIBUTION CHANGES + o Distribute libevent.pc.in, not libevent.pc (22aff04) + o Avoid errors in evutil.c when building with _UNICODE defined (b677032 Brodie Thiesfield) + o Avoid errors in http.c when building with VC 2003 .NET (13e4f3b Brodie Thiesfield) + o Support the standard 'make check' target in place of 'make verify' (426c8fb) + o Remove redundant stuff from EXTRA_DIST (b660edf) + o Switch to using AM conditionals in place of AC_LIBOBJ (2e898f5) + o Remove an orphaned RELEASE flag in Makefile.am (0794b0d) + o Give a better warning for bad automake versions. (77c917d) + o Use dist_bin_SCRIPTS, not EXTRA_DIST, to distribute scripts (9eb2fd7) + o Never test for select() on windows (3eb044d Trond Norbye) + o Do not inhibit automake dependencies generation (10c4c90 Giuseppe Scrivano) + o Create shared libraries under Windows (3cbca86 Giuseppe Scrivano) + o Add ctags/etags files to .gitignore (0861d17) + o Only specify -no-undefined on mingw (25433b9) + o Only add libevent_core.la to LIBADD on mingw (fdc6297) + +TESTING + o Get bench_http to work on Windows; add a switch to enable IOCP. (4ac38a5 Christopher Davis) + o VC has no getopt(), so do without in bench_http. (1273d2f Christopher Davis) + o Fix an obnoxious typo in the bufferevent_timeout_filter test (0d047c3) + o Fix a write of uninitialized RAM in regression tests (68dc742) + o Fix some memory leaks in the unit tests (274a7bd) + o Make 'main/many_events' test 70 fds, not 64. (33874b0) + o Unit-test every evbuffer_add_file() implementation. (06a4443) + o Add more unit tests for evbuffer_expand (8c83e99) + o Test another case of evbuffer_prepend (1234b95) + o Fix a possible double-free bug in SSL bufferevents with CLOSE_ON_FREE (7501895) o Add dns/search_cancel unit test. (39b870b Christopher Davis) + o Make http_base_test stop leaking an event_base. (96730d3) + o Detect broken unsetenv at unit-test runtime (f37cd4c) + o Implement regress_make_tempfile on win32 to test evbuffer_add_file (b4f12a1) + o add more (currently skipped) add_file tests on win32 (05de45d) + o Fix bench_http build on win32. (384d124) + o Make unit test for add_file able to tell "error" from "done" (88a543f) + o Make test for bufferevent_connect_hostname system-neutral (f89168e) + o Make test.sh support mingw/msys on win32 (0ee6f6c) + o Fix test.sh on freebsd (3d9e05b) + +INTERNALS, PERFORMANCE, AND AND CODE CLEANUPS + o Improve the speed of evbuffer_readln() (cc1600a) + o more whitespace normalization (2c2618d) + o Revise evbuffer to add last_with_data (2a6d2a1) + o Use last_with_data in place of previous_to_last (c8ac57f) + o Remove previous_to_last from evbuffer (6f47bd1) + o Fix last_with_data compilation on windows (1e7b986) + o Add some glass-box tests for the last_with_data code. (17da042) + o Improve robustness for refcounting (f1bc125) + o Remove a needless min_heap_shift_up_() call (7204b91) + o Increase MIN_BUFFER_SIZE to 512 (1024 on 64-bit) (2014ae4) + o Do not use evbuffer_expand() to add the first chain to a buffer (5c0ebb3) + o Make evbuffer_prepend handle empty buffers better (c87272b) + o Replace last_with_data with a slightly smarter version (b7442f8) + o Turn the increasingly complex *_CHAIN() macros into functions (96865c4) + o Rewrite evbuffer_expand and its users (d5ebcf3) + o Add evutil_tv_to_msec for safe conversion of timevals to milliseconds. (850c3ff Christopher Davis) + o Initialize last_with_datap correctly in evbuffer_overlapped (a0983b6) + o Replace EVUTIL_CLOSESOCKET macro with a function (899c1dc Sebastian Sjöberg) + o Move domain search state to evdns_request. (beaa14a Christopher Davis) + o Remove redundant checks for lock!=NULL before calling EVLOCK_LOCK (50ec59f) + o Rename current_base symbol to event_global_current_base_ (c16e684) + o Fix whitespace in evutil.c (935e150) + o Replace users of "int fd" with "evutil_socket_t fd" in portable code (c7cf6f0) + + + +Changes in 2.0.4-alpha (28 Feb 2010): + [Autogenerated from the Git log, sorted by hand.] +DOCUMENTATION + o Add stub header for 2.0.4-alpha changelog. (94d0065) + o Improve the README with more information and links. (0b42726) + o Add more people who wrote patches to the acknowledgments (0af10d5) + o Add a warning about the use of event_initialized. (f32b575) + o Add a LICENSE file so people can find our license easily (7067006) + o Add a new "hello world" sample program (becb9f9) + o Clarify status of example programs (d60a1bd) + o Update time-test.c to use event2 (f4190bf) + o Add the arc4random.c license to the LICENSE file. (e15e1e9) + +NEW FEATURES AND INTERFACE CHANGES + o Improved optional lock debugging. (0cd3bb9) + o Rate-limiting for bufferevents; group and individual limits are supported. (737c9cd) + o Testing code for bufferevent rate-limiting. (f0c0124) + o Make the initial nameserver probe timeout configurable. (1e56a32) + o Revise the locking API: deprecate the old locking callbacks and add trylock. (347952f) + o Do not make bufferevent_setfd implicitly disable EV_READ and EV_WRITE. (8274379) + o Do not ignore bufferevent_enable(EV_READ) before bufferevent_connect(). (4a5b534) + o Introduced evutil_make_socket_closeonexec() to preserve fd flags for F_SETFD. (d0939d2 Jardel Weyrich) + o evdns_getaddrinfo() now supports the /etc/hosts file. (72dd666) + o Look at the proper /etc/hosts file on windows. (66c02c7) + o Allow http connections to use evdns for hostname looksups. (c698b77) + o Changelist code to defer event changes until just before dispatch (27308aa) + o do not use a function to assign the evdns base; instead assign it via evhttp_connection_base_new() which is a new function introduced in 2.0 (5032e52) + o Functions to access more fields of struct event. (0683950) + o Make kqueue use changelists. (45e5ae3) + o Remove kqueue->pend_changes. (3225dfb) + o Minimize epoll_ctl calls by using changelist (c8c6a89) + o Add support for a "debug mode" to try to catch common errors. (cd17c3a) + o Note a missing ratelim function (361da8f) + o Add ev_[u]intptr_t to include/event2/util.h (1fa4c81) + o const-ify a few more functions in event.h (d38a7a1) + o Deprecate EVENT_FD and EVENT_SIGNAL. (f6b2694) + o Remove EVUTIL_CHECK_FMT. (6c21c89) + o Add EV_*_MAX macros to event2/util.h to expose limits for ev_* types. (aba1fff) o Functions to view and manipulate rate-limiting buckets. (85047a6) + o Add the rest of the integer limits, and add a test for them. (60742d5) + o Remove the 'flags' argument from evdns_base_set_option() (1dd7e6d) + o Add an arc4random implementation for use by evdns (d4de062) + o Use off_t for the length parameter of evbuffer_add_file (3fe60fd) + o Construct Windows locks using InitializeCriticalSectionAndSpinCount (32c6f1b) + o Expose view of current rate limit as constrained by group limit (162ce8a) + o Provide consistent, tested semantics for bufferevent timeouts (d328829) + +BUGFIXES AND TESTS + o Tolerate code that returns from a fatal_cb. (91fe23f) + o Parenthesize macro arguments more aggressively (07e9e9b) + o Fix memory-leak of signal handler array with kqueue. (e1ffbb8) + o Stop passing EVTHREAD_READ and EVTHREAD_WRITE to non-rw locks. (76cd2b7) + o Fix two use-after-free bugs in unit tests spoted by lock debugging (d84d838) + o Fix a locking bug in event_base_loop() (da1718b) + o Fix an evdns lock violation. (2df1f82 Zhuang Yuyao) + o Valgrind fix: Clear struct kevent before checking for OSX bug. (56771a3 William Ahern) + o Fix up evthread compilation on windows (bd6f1ba Roman Puls) + o Fix regress_iocp.c usage of old lock allocation macros. (31687b4 unknown) + o Update nmake makefile to build evthread.c (b62d979 unknown) + o Fix a crash when reading badly formatted resolve.conf; from Yasuoka Masahiko (6c7c579 Yasuoka Masahiko) + o Fix a snow leopard compile warning in the unit tests. (7ae9445) + o Fix compile on Snow Leopard with gcc warnings enabled (70cdfe4 Sebastian Hahn) + o Only define _GNU_SOURCE if it is not already defined. (ea6b1df Joachim Bauch) + o Update sample/signal-test.c to use newer APIs and not leak. (f6430ac Evan Jones) + o Fix a segfault when writing a very fragmented evbuffer onto an SSL (a6adeca Joachim Bauch) + o Fix a segfault when freeing SSL bufferevents in an unusual order (a773df5 Joachim Bauch) + o Drop install-sh from our git repo: a mismatched version could break "make dist" (6799527) + o Set all instances of the version number correctly. (5a112d3) + o Fix a few locking issues on windows. (c51bb3c unknown) + o Use evutil_socket_t, not int, when logging socket errors. (292467c) + o Fix up behavior of never-defered callbacks a little (390e056) + o Replace some cases of uint32_t with ev_uint32_t. (a47d88d) + o Fix compilation of devpoll.c by adding missing thread includes. (fee2c77 Dagobert Michelsen) + o Make evutil_make_socket_nonblocking() leave any other flags alone. (4c8b7cd Jardel Weyrich) + o Fix an fd leak in evconnlistener_new_bind(). (24fb502 Jardel Weyrich) + o Fix a bogus free in evutil_new_addrinfo() (0d64051 Jardel Weyrich) + o Adjusted fcntl() retval comparison on evutil_make_socket_nonblocking(). (4df7dbc Jardel Weyrich) + o Fix the code that allowed DNS options to not end with : (ee4953f) + o Fix crash bugs when a bufferevent's eventcb is not set. (2e8eeea) + o Fix test-ratelim compilation on Linux. (885b427) + o Fix compilation of rate-limiting code on win32. (165d30e) + o Eradicated the last free() call. Let mm_free() take care of deallocation. (0546ce1 Jardel Weyrich) + o Fix byte counts when mixing deferred and non-deferred evbuffer callbacks. (29151e6) + o Fixed a memory leak on windows threads implementation. The CRITICAL_SECTION was not being free'd in evthread_win32_lock_free(). (2f33e00 Jardel Weyrich) + o Fixed a fd leak in start_accepting(), plus cosmetic changes (4367a33 Jardel Weyrich) + o Improved error handling in evconnlistener_new_async(). Also keeping the fd open because it is not opened by this function, so the caller is responsible for closing it. Additionally, since evconnlistener_new_bind() creates a socket and passes it to the function above, it required error checking to close the same socket. (fec66f9 Jardel Weyrich) + o Don't use a bind address for nameservers on loopback (8d4aaf9) + o Fix compilation of rate-limit code when threading support is disabled (97a8c79) + o Detect setenv/unsetenv; skip main/base_environ test if we can't fake them. (7296971) + o Check more internal event_add() calls for failure (ff3f6cd) + o Fix windows and msvc build (5c7a7bc) + o Call event_debug_unassign on internal events (a19b4a0) + o Try to fix a warning in hash_debug_entry (137f2c6) + o Fix a dumb typo in ev_intptr_t definitions. (27c9a40) + o do not fail while sending on http connections the client closed. (93d7369) + o make evhttp_send() safe against terminated connections, too (3978180) + o Make Libevent 1.4.12 build on win32 with Unicode enabled. (000a33e Brodie Thiesfield) + o Fix some additional -DUNICODE issues on win32. (a7a9431) + o Add a check to make soure our EVUTIL_AI flags do not conflict with the native ones (c18490e) + o Always use our own gai_strerror() replacement. (6810bdb) + o Make RNG work when we have arc4random() but not arc4random_buf() (4ec8fea) + o validate close cb on server when client connection closes (2f782af) + o Fix two unlocked reads in evbuffer. (7116bf2) + o When working without a current event base, don't try to use IOCP listeners (cb52838) + o Fix getpid() usage on Windows (ff2a134) + o Add a unit test for secure rng. (48a29b6) + o Add some headers to fix freebsd compilation (b72be50) + o When connect() succeeds immediately, don't invoke the callback immediately. (7515de9) + o Suspend read/write on bufferevents during hostname lookup (db08f64) + o Make bufferevent_free() clear all callbacks immediately. (b2fbeb3) + o Fix some race conditions in persistent events and event_reinit (e2642f0) + o Fix a bug in resetting timeouts on persistent events when IO triggers. (38ec0a7) + o Add a test for timeouts on filtering bufferevents. (c02bfe1) + o Add test for periodic timers that get activated for other reasons (8fcb7a1) + o Use new timeval diff comparison function in bufferevent test (f3dfe46) + o Delete stack-alloced event in new unit test before returning. (7ffd387) + o Fix mingw compilation (23170a6) + o Try to define a sane _EVENT_SIZEOF_SIZE_T for msvc compilation (1e14f82) + o Fix arc4random compilation on MSVC. (98edb89) + o deal with connect() failing immediately (7bc48bf) + o Small cleanups on freebsd-connect-refused patch. (57b7248) + +BUILD AND DISTRIBUTION CHANGES + o Remove the contents of WIN32-Prj as unmaintained. (c69d5a5) + o Allow the user to redirect the verbose output of test/test.sh to a file (c382de6) + o Allow test.sh to be run as ./test/test.sh (7dfbe94) + o Never believe that we have pthreads on win32, even if gcc thinks we do. (78ed097) + o Make it compile under gcc --std=c89. (e2ca403) + o Fix a number of warnings from gcc -pedantic (918e9c5) + o Add the msvc-generated .lib files to .gitignore. (e244a2e) + o Add the "compile" script to gitignore. (1ba6bed) + +INTERNALS AND CODE CLEANUPS + o Add a .gitignore file. (ba34071) + o New EVTHREAD_TRY_LOCK function to try to grab a lock. (689fc09) + o Add the abilitity to mark some buffer callbacks as never-deferred. (438f9ed) + o Refactor our 'suspend operation' logic on bufferevents. (0d744aa) + o Simplify the read high-watermark checking. (5846bf6) + o Improve readability of evutil_unparse_protoname() (5a43df8 Jardel Weyrich) + o Expose our cached gettimeofday value with a new interface (47854a8) + o Whitespace fixes in test.sh (0b151a9) + o Enable branch-prediction hints with EVUTIL_UNLIKELY. (eaaf27f) + o Refactor code from evdns into a new internal "read a file" function. (0f7144f) + o Comestic changes in evconnlistener_new(), new_accepting_socket(), accepted_socket_invoke_user_cb() and iocp_listener_enable(). (510ab6b Jardel Weyrich) + o Add unit-test for bad_request bug fixed in 1.4 recently. (6cc79c6 Pavel Plesov) o Add a comment on evthread_enable_lock_debuging. (b9f43b2) + o Fix test.sh on shells without echo -n (94131e9) + o More unit tests for getaddrinfo_async: v4timeout and cancel. (a334b31) + o Make http use evconnlistener. (ec34533) + o move dns utility functions into a separate file so that we can use them for http testing (b822639) + o add a test for evhttp_connection_base_new with a dns_base (26714ca) + o forgot to add void to test function (78a50fe) + o Add a forgotten header (changelist-internal.h) (4b9f307) + o Remove some commented-out code in evutil (26e1b6f) + o Remove a needless include of rpc_compat.h (70a4a3e) + o Use less memory for each entry in a hashtable (a66e947) + o Try to untangle the logic in server_port_flush(). (439aea0) + o Use ev_[u]intptr_t types in place of [u]intptr_t (cef61a2) + o Reduce windows header includes in our own headers. (da6135e) + o clean up terminate_chunked test (e8a9782) + o Increment the submicro version number. (63e868e) + o Update event-config.h version number to match configure.in (aae7db5) + o Clean up formatting: Disallow space-before-tab. (8fdf09c) + o Clean up formatting: use tabs, not 8-spaces, to indent. (e5bbd40) + o Clean up formatting: remove trailing spaces (e5cf987) + o Clean up formatting: function/keyword spacing consistency. (4faeaea) + + + +Changes in 2.0.3-alpha (20 Nov 2009): + o Add a new code to support SSL/TLS on bufferevents, using the OpenSSL library (where available). + o Fix a bug where we didn't allocate enough memory in event_get_supported_methods(). + o Avoid segfault during failed allocation of locked evdns_base. (Found by Rocco Carbone.) + o Export new evutil_ascii_* functions to perform locale-independent character type operations. + o Try to compile better with MSVC: patches from Brodie Thiesfield + o New evconnlistener_get_fd function to expose a listener's associated socket. + o Expose an ev_socklen_t type for consistent use across platforms. + o Make bufferevent_socket_connect() work when the original fd was -1. + o Fix a bug in bufferevent_socket_connect() when the connection succeeds too quickly. + o Export an evutil_sockaddr_cmp() to compare to sockaddr objects for equality. + o Add a bufferevent_get_enabled() to tell what a bufferevent has been configured to do. + o Add an evbuffer_search_eol() function to locate the end of a line nondestructively. + o Add an evbuffer_search_range() function to search a bounded range of a buffer. + o Fix a rare crash bug in evdns. + o Have bufferevent_socket_connect() with no arguments put a bufferevent into connecting mode. + o Support sendfile on Solaris: patch from Caitlin Mercer. + o New functions to explicitly reference a socket used by an evhttp object. Patches from David Reiss. + o When we send a BEV_EVENT_CONNECTED to indicate connected status, we no longer invoke the write callback as well unless we actually wrote data too. + o If the kernel tells us that there are a negative number of bytes to read from a socket, do not believe it. Fixes bug 2841177; found by Alexander Pronchenkov. + o Do not detect whether we have monotonic clock support every time a new event base is created: instead do it only once. Patch taken from Chromium. + o Do not allocate the maximum event queue for the epoll backend at startup. Instead, start out accepting 32 events at a time, and double the queue's size when it seems that the OS is generating events faster than we're requesting them. Saves up to 374K per epoll-based event_base. Resolves bug 2839240. + o Treat an event with a negative fd as valid but untriggerable by Libevent. This is useful for applications that want to manually activate events. + o Fix compilation on Android, which forgot to define fd_mask in its sys/select.h + o Do not drop data from evbuffer when out of memory; reported by Jacek Masiulaniec + o New event_base_got_exit() and event_base_got_break() functions to tell whether an event loop exited because of an event_base_loopexit() or an event_base_loopbreak(). Patch from Ka-Hing Cheung. + o When adding or deleting an event from a non-main thread, only wake up the main thread when its behavior actually needs to change. + o Fix some bugs when using the old evdns interfaces to initialize the evdns module. + o Detect errors during bufferevent_connect(). Patch from Christopher Davis. + o Fix compilation for listener.h for C++ - missing extern "C". Patch from Ferenc Szalai. + o Make the event_base_loop() family of functions respect thread-safety better. This should clear up a few hard-to-debug race conditions. + o Fix a bug when using a specialized memory allocator on win32. + o Have the win32 select() backend label TCP-socket-connected events as EV_WRITE, not EV_READ. This should bring it in line with the other backends, and improve portability. Patch from Christopher Davis. + o Stop using enums as arguments or return values when what we mean is a bitfield of enum values. C++ doesn't believe that you can OR two enum values together and get another enum, and C++ takes its typing seriously. Patch from Christopher Davis. + o Add an API to replace all fatal calls to exit() with a user-provided panic function. + o Replace all assert() calls with a variant that is aware of the user-provided logging and panic functions. + o Add a return value to event_assign so that it can fail rather than asserting when the user gives it bad input. event_set still dies on bad input. + o The event_base_new() and event_base_new_with_config() functions now never call exit() on failure. For backward "compatibility", event_init() still does, but more consistently. + o Remove compat/sys/_time.h. It interfered with system headers on HPUX, and its functionality has been subsumed by event2/util.h and util-internal.h. + o Add a new bufferevent_socket_connect_hostname() to encapsulate the resolve-then-connect operation. + o Build kqueue.c correctly on GNU/kFreeBSD platforms. Patch pulled upstream from Debian. + o Alternative queue-based timeout algorithm for programs that use a large number of timeouts with the same value. + o New event_base_config option to disable the timeval cache entirely. + o Make EV_PERSIST timeouts more accurate: schedule the next event based on the scheduled time of the previous event, not based on the current time. + o Allow http.c to handle cases where getaddrinfo returns an IPv6 address. Patch from Ryan Phillips. + o Fix a problem with excessive memory allocation when using multiple event priorities. + o Default to using arc4random for DNS transaction IDs on systems that have it; from OpenBSD. + o Never check the environment when we're running setuid or setgid; from OpenBSD. + o Options passed to evdns_set_option() no longer need to end with a colon. + o Add an evutil_getaddrinfo() function to clone getaddrinfo on platforms that don't have it. + o Add an evdns_getaddrinfo() function to provide a nonblocking getaddrinfo using evdns, so programs can perform useful hostname lookup. + o Finally expose the IOCP-based bufferevent backend. It passes its unit tests, but probably still has some bugs remaining. Code by Nick Mathewson and Christopher Davis. + o Numerous other bugfixes. + o On FreeBSD and other OSes, connect can return ECONREFUSED immediately; instead of failing the function call, pretend with faileld in the callback. + o Fix a race condition in the pthreads test case; found by Nick Mathewson + o Remove most calls to event_err() in http and deal with memory errors instead + + + +Changes in 2.0.2-alpha (25 Jul 2009): + o Add a new flag to bufferevents to make all callbacks automatically deferred. + o Make evdns functionality locked, and automatically defer dns callbacks. + o Fix a possible free(NULL) when freeing an event_base with no signals. + o Add a flag to disable checking environment varibles when making an event_base + o Disallow setting less than 1 priority. + o Fix a bug when removing a timeout from the heap. [Patch from Marko Kreen] + o Use signal.h, not sys/signal.h. [Patch from mmadia] + o Try harder to build with certain older c99 compilers. + o Make sure that an event_config's flags field is always initialized to 0. [Bug report from Victor Goya] + o Avoid data corruption when reading data entirely into the second-to-last chain of an evbuffer. [Bug report from Victor Goya] + o Make sendfile work on FreeBSD + o Do not use vararg macros for accessing evrpc structures; this is not backwards compatible, but we did not promise any backwards compatibility for the rpc code. + o Actually define the event_config_set_flag() function. + o Try harder to compile with Visual C++. + o Move event_set() and its allies to event2/event_compat.h where they belong. + o Remove the event_gotsig code, which has long been deprecated and unused. + o Add an event_get_base() function to return the base assigned to an event. + o New function to automate connecting on a socket-based bufferevent. + o New functions to automate listening for incoming TCP connections. + o Do case-insensitive checks with a locale-independent comparison function. + o Rename the evbuffercb and everrorcb callbacks to bufferevent_data_cb and bufferevent_event_cb respectively. The old names are available in bufferevent_compat.h. + o Rename the EVBUFFER_* codes used by bufferevent event callbacks to BEV_EVENT_*, to avoid namespace collision with evbuffer flags. The old names are available in bufferevent_compat.h. + o Move the EVBUFFER_INPUT and EVBUFFER_OUTPUT macros to bufferevent_compat.h + o Add a bufferevent_getfd() function to mirror bufferevent_setfd() + o Make bufferevent_setfd() return an error code if the operation is not successful. + o Shave 22 bytes off struct event on 32-bit platforms by shrinking and re-ordering fields. The savings on 64-bit platforms is likely higher. + o Cap the maximum number of priorities at 256. + o Change the semantics of evbuffer_cb_set_flags() to be set-flag only; add a new evbuffer_cb_clear_flags() to remove set flags. + o Change the interface of evbuffer_add_reference so that the cleanup callback gets more information + o Revise the new evbuffer_reserve_space/evbuffer_commit_space() interfaces so that you can use them without causing extraneous copies or leaving gaps in the evbuffer. + o Add a new evbuffer_peek() interface to inspect data in an evbuffer without removing it. + o Fix a deadlock when suspending reads in a bufferevent due to a full buffer. (Spotted by Joachim Bauch.) + o Fix a memory error when freeing a thread-enabled event base with registered events. (Spotted by Joachim Bauch.) + o Try to contain degree of failure when running on a win32 version so heavily firewalled that we can't fake a socketpair. + o Activate fd events in a pseudorandom order with O(N) backends, so that we don't systematically favor low fds (select) or earlier-added fds (poll, win32). + o Replace some read()/write() instances with send()/recv() to work properly on win32. + o Set truncated flag correctly in evdns server replies. + o Raise RpcGenError in event_rpcgen.py; from jmanison and Zack Weinberg + o Fix preamble of rpcgen-generated files to rely on event2 includes; based on work by jmansion; patch from Zack Weinberg. + o Allow specifying the output filename for rpcgen; based on work by jmansion; patch from Zack Weinberg. + o Allow C identifiers as struct names; allow multiple comments in .rpc files; from Zack Weinberg + o Mitigate a race condition when using socket bufferevents in multiple threads. + o Use AC_SEARCH_LIBS, not AC_CHECK_LIB to avoid needless library use. + o Do not allow event_del(ev) to return while that event's callback is executing in another thread. This fixes a nasty race condition. + o event_get_supported_methods() now lists methods that have been disabled with the EVENT_NO* environment options. + o Rename encode_int[64] to evtag_encode_int[64] to avoid polluting the global namespace. The old method names are still available as macros in event2/tag_compat.h. + + + +Changes in 2.0.1-alpha (17 Apr 2009): + o free minheap on event_base_free(); from Christopher Layne + o debug cleanups in signal.c; from Christopher Layne + o provide event_base_new() that does not set the current_base global + o bufferevent_write now uses a const source argument; report from Charles Kerr + o improve documentation on event_base_loopexit; patch from Scott Lamb + o New function, event_{base_}loopbreak. Like event_loopexit, it makes an event loop stop executing and return. Unlike event_loopexit, it keeps subsequent pending events from getting executed. Patch from Scott Lamb + o Check return value of event_add in signal.c + o provide event_reinit() to reintialize an event_base after fork + o New function event_set_mem_functinons. It allows the user to give libevent replacement functions to use for memory management in place of malloc(), free(), etc. This should be generally useful for memory instrumentation, specialized allocators, and so on. + o The kqueue implementation now catches signals that are raised after event_add() is called but before the event_loop() call. This makes it match the other implementations. + o The kqueue implementation now restores original signal handlers correctly when its signal events are removed. + o Check return value of event_add in signal.c + o Add a more powerful evbuffer_readln as a replacement for evbuffer_readline. The new function handles more newline styles, and is more useful with buffers that may contain a nul characters. + o Do not mangle socket handles on 64-bit windows. + o The configure script now takes an --enable-gcc-warnings option that turns on many optional gcc warnings. (Nick has been building with these for a while, but they might be useful to other developers.) + o move EV_PERSIST handling out of the event backends + o small improvements to evhttp documentation + o always generate Date and Content-Length headers for HTTP/1.1 replies + o set the correct event base for HTTP close events + o When building with GCC, use the "format" attribute to verify type correctness of calls to printf-like functions. + o Rewrite win32.c backend to be O(n lg n) rather than O(n^2). + o Removed obsoleted recalc code + o support for 32-bit tag numbers in rpc structures; this is wire compatible, but changes the API slightly. + o pull setters/getters out of RPC structures into a base class to which we just need to store a pointer; this reduces the memory footprint of these structures. + o prefix {encode,decode}_tag functions with evtag to avoid collisions + o fix a bug with event_rpcgen for integers + o Correctly handle DNS replies with no answers set (Fixes bug 1846282) + o add -Wstrict-aliasing to warnings and more cleanup + o removed linger from http server socket; reported by Ilya Martynov + o event_rpcgen now allows creating integer arrays + o support string arrays in event_rpcgen + o change evrpc hooking to allow pausing of RPCs; this will make it possible for the hook to do some meaning ful work; this is not backwards compatible. + o allow an http request callback to take ownership of a request structure + o allow association of meta data with RPC requests for hook processing + o associate more context for hooks to query such as the connection object + o remove pending timeouts on event_base_free() + o also check EAGAIN for Solaris' event ports; from W.C.A. Wijngaards + o devpoll and evport need reinit; tested by W.C.A Wijngaards + o event_base_get_method; from Springande Ulv + o Send CRLF after each chunk in HTTP output, for compliance with RFC2626. Patch from "propanbutan". Fixes bug 1894184. + o Add a int64_t parsing function, with unit tests, so we can apply Scott Lamb's fix to allow large HTTP values. + o Use a 64-bit field to hold HTTP content-lengths. Patch from Scott Lamb. + o Allow regression code to build even without Python installed + o remove NDEBUG ifdefs from evdns.c + o detect integer types properly on platforms without stdint.h + o udpate documentation of event_loop and event_base_loop; from Tani Hosokawa. + o simplify evbuffer by removing orig_buffer + o do not insert event into list when evsel->add fails + o add support for PUT/DELETE requests; from Josh Rotenberg + o introduce evhttp_accept_socket() to accept from an already created socket + o include Content-Length in reply for HTTP/1.0 requests with keep-alive + o increase listen queue for http sockets to 128; if that is not enough the evhttp_accpet_socket() api can be used with a prepared socket. + o Patch from Tani Hosokawa: make some functions in http.c threadsafe. + o test support for PUT/DELETE requests; from Josh Rotenberg + o rewrite of the evbuffer code to reduce memory copies + o Some older Solaris versions demand that _REENTRANT be defined to get strtok_r(); do so. + o Do not free the kqop file descriptor in other processes, also allow it to be 0; from Andrei Nigmatulin + o Provide OpenSSL style support for multiple threads accessing the same event_base + o make event_rpcgen.py generate code include event-config.h; reported by Sam Banks. + o switch thread support so that locks get allocated as they are needed. + o make event methods static so that they are not exported; from Andrei Nigmatulin + o make RPC replies use application/octet-stream as mime type + o do not delete uninitialized timeout event in evdns + o Correct the documentation on buffer printf functions. + o Don't warn on unimplemented epoll_create(): this isn't a problem, just a reason to fall back to poll or select. + o Correctly handle timeouts larger than 35 minutes on Linux with epoll.c. This is probably a kernel defect, but we'll have to support old kernels anyway even if it gets fixed. + o Make name_from_addr() threadsafe in http.c + o Add new thread-safe interfaces to evdns functions. + o Make all event_tagging interfaces threadsafe. + o Rename internal memory management functions. + o New functions (event_assign, event_new, event_free) for use by apps that want to be safely threadsafe, or want to remain ignorant of the contents of struct event. + o introduce bufferevent_read_buffer; allows reading without memory copy. + o expose bufferevent_setwatermark via header files and fix high watermark on read + o fix a bug in buffrevent read water marks and add a test for them + o fix a bug in which bufferevent_write_buffer would not schedule a write event + o provide bufferevent_input and bufferevent_output without requiring knowledge of the structure + o introduce bufferevent_setcb and bufferevent_setfd to allow better manipulation of bufferevents + o convert evhttp_connection to use bufferevents. + o use libevent's internal timercmp on all platforms, to avoid bugs on old platforms where timercmp(a,b,<=) is buggy. + o Remove the never-exported, never-used evhttp_hostportfile function. + o Support input/output filters for bufferevents; somewhat similar to libio's model. This will allow us to implement SSL, compression, etc, transparently to users of bufferevents such as the http layer. + o allow connections to be removed from an rpc pool + o add new evtimer_assign, signal_assign, evtimer_new, and signal_new functions to manipulate timer and signal events, analagous to the now-recommended event_assign and event_new + o switch internal uses of event_set over to use event_assign. + o introduce evbuffer_contiguous_space() api that tells a user how much data is available in the first buffer chain + o introduce evbuffer_reserve_space() and evbuffer_commit_space() to make processing in filters more efficient. + o reduce system calls for getting current time by caching it. + o separate signal events from io events; making the code less complex. + o support for periodic timeouts + o support for virtual HTTP hosts. + o turn event_initialized() into a function, and add function equivalents to EVENT_SIGNAL and EVENT_FD so that people don't need to include event_struct.h + o Build test directory correctly with CPPFLAGS set. + o Provide an API for retrieving the supported event mechanisms. + o event_base_new_with_config() and corresponding config APIs. + o migrate the evhttp header to event2/ but accessors are still missing. + o deprecate timeout_* event functions by moving them to event_compat.h + o Move windows gettimeofday replacement into a new evutil_gettimeofday(). + o Make configure script work on IRIX. + o provide a method for canceling ongoing http requests. + o Make vsnprintf() returns consistent on win32. + o Fix connection keep-alive behavior for HTTP/1.0 + o Fix use of freed memory in event_reinit; pointed out by Peter Postma + o constify struct timeval * where possible + o make event_get_supported_methods obey environment variables + o support for edge-triggered events on epoll and kqueue backends: patch from Valery Kholodkov + o support for selecting event backends by their features, and for querying the features of a backend. + o change failing behavior of event_base_new_with_config: if a config is provided and no backend is selected, return NULL instead of aborting. + o deliver partial data to request callbacks when chunked callback is set even if there is no chunking on the http level; allows cancelation of requests from within the chunked callback; from Scott Lamb. + o allow min_heap_erase to be called on removed members; from liusifan. + o Rename INPUT and OUTPUT to EVRPC_INPUT and EVRPC_OUTPUT. Retain INPUT/OUTPUT aliases on on-win32 platforms for backwards compatibility. + o Do not use SO_REUSEADDR when connecting + o Support 64-bit integers in RPC structs + o Correct handling of trailing headers in chunked replies; from Scott Lamb. + o Support multi-line HTTP headers; based on a patch from Moshe Litvin + o Reject negative Content-Length headers; anonymous bug report + o Detect CLOCK_MONOTONIC at runtime for evdns; anonymous bug report + o Various HTTP correctness fixes from Scott Lamb + o Fix a bug where deleting signals with the kqueue backend would cause subsequent adds to fail + o Support multiple events listening on the same signal; make signals regular events that go on the same event queue; problem report by Alexander Drozdov. + o Fix a problem with epoll() and reinit; problem report by Alexander Drozdov. + o Fix off-by-one errors in devpoll; from Ian Bell + o Make event_add not change any state if it fails; reported by Ian Bell. + o Fix a bug where headers arriving in multiple packets were not parsed; fix from Jiang Hong; test by me. + o Match the query in DNS replies to the query in the request; from Vsevolod Stakhov. + o Add new utility functions to correctly observe and log winsock errors. + o Do not remove Accept-Encoding header + o Clear the timer cache on entering the event loop; reported by Victor Chang + o Only bind the socket on connect when a local address has been provided; reported by Alejo Sanchez + o Allow setting of local port for evhttp connections to support millions of connections from a single system; from Richard Jones. + o Clear the timer cache when leaving the event loop; reported by Robin Haberkorn + o Fix a typo in setting the global event base; reported by lance. + o Set the 0x20 bit on outgoing alphabetic characters in DNS requests randomly, and insist on a match in replies. This helps resist DNS poisoning attacks. + o Make the http connection close detection work properly with bufferevents and fix a potential memory leak associated with it. + o Restructure the event backends so that they do not need to keep track of events themselves, as a side effect multiple events can use the same fd or signal. + o Add generic implementations for parsing and emiting IPv6 addresses on platforms that do not have inet_ntop and/or inet_pton. + o Allow DNS servers that have IPv6 addresses. + o Add an evbuffer_write_atmost() function to write a limited number of bytes to an fd. + o Refactor internal notify-main-thread logic to prefer eventfd to pipe, then pipe to socketpair, and only use socketpairs as a last resort. + o Try harder to pack all evbuffer reads into as few chains as possible, using readv/WSARecv as appropriate. + o New evthread_use_windows_threads() and evthread_use_pthreads() functions to set up the evthread callbacks with reasonable defaults. + o Change the semantics of timeouts in conjunction with EV_PERSIST; timeouts in that case will now repeat until deleted. + o sendfile, mmap and memory reference support for evbuffers. + o New evutil_make_listen_socket_reuseable() to abstract SO_REUSEADDR. + o New bind-to option to allow DNS clients to bind to an arbitrary port for outgoing requests. + o evbuffers can now be "frozen" to prevent operations at one or both ends. + o Bufferevents now notice external attempts to add data to an inbuf or remove it from an outbuf, and stop them. + o Fix parsing of queries where the encoded queries contained \r, \n or + + o Do not allow internal events to starve lower-priority events. + diff --git a/probe-busybox/libevent-2.1.11-stable/Doxyfile b/probe-busybox/libevent-2.1.11-stable/Doxyfile new file mode 100644 index 00000000..d9d66034 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/Doxyfile @@ -0,0 +1,257 @@ +# Doxyfile 1.5.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = libevent + +# Place all output under 'doxygen/' + +OUTPUT_DIRECTORY = doxygen/ + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like the Qt-style comments (thus requiring an +# explicit @brief command for a brief description. + +JAVADOC_AUTOBRIEF = YES + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = include/ + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = \ + include/event2/buffer.h \ + include/event2/buffer_compat.h \ + include/event2/bufferevent.h \ + include/event2/bufferevent_compat.h \ + include/event2/bufferevent_ssl.h \ + include/event2/dns.h \ + include/event2/dns_compat.h \ + include/event2/event.h \ + include/event2/event_compat.h \ + include/event2/http.h \ + include/event2/http_compat.h \ + include/event2/listener.h \ + include/event2/rpc.h \ + include/event2/rpc_compat.h \ + include/event2/tag.h \ + include/event2/tag_compat.h \ + include/event2/thread.h \ + include/event2/util.h + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = YES + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = NO + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = TAILQ_ENTRY RB_ENTRY EVENT_DEFINED_TQENTRY_ EVENT_IN_DOXYGEN_ + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES diff --git a/probe-busybox/libevent-2.1.11-stable/LICENSE b/probe-busybox/libevent-2.1.11-stable/LICENSE new file mode 100644 index 00000000..402ca508 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/LICENSE @@ -0,0 +1,99 @@ +Libevent is available for use under the following license, commonly known +as the 3-clause (or "modified") BSD license: + +============================== +Copyright (c) 2000-2007 Niels Provos +Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +============================== + +Portions of Libevent are based on works by others, also made available by +them under the three-clause BSD license above. The copyright notices are +available in the corresponding source files; the license is as above. Here's +a list: + +log.c: + Copyright (c) 2000 Dug Song + Copyright (c) 1993 The Regents of the University of California. + +strlcpy.c: + Copyright (c) 1998 Todd C. Miller + +win32select.c: + Copyright (c) 2003 Michael A. Davis + +evport.c: + Copyright (c) 2007 Sun Microsystems + +ht-internal.h: + Copyright (c) 2002 Christopher Clark + +minheap-internal.h: + Copyright (c) 2006 Maxim Yegorushkin + +============================== + +The arc4module is available under the following, sometimes called the +"OpenBSD" license: + + Copyright (c) 1996, David Mazieres + Copyright (c) 2008, Damien Miller + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +============================== + +The Windows timer code is based on code from libutp, which is +distributed under this license, sometimes called the "MIT" license. + + +Copyright (c) 2010 BitTorrent, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/probe-busybox/libevent-2.1.11-stable/Makefile.am b/probe-busybox/libevent-2.1.11-stable/Makefile.am new file mode 100644 index 00000000..05dd3a7d --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/Makefile.am @@ -0,0 +1,340 @@ +# Makefile.am for libevent +# Copyright 2000-2007 Niels Provos +# Copyright 2007-2012 Niels Provos and Nick Mathewson +# +# See LICENSE for copying information. + +# 'foreign' means that we're not enforcing GNU package rules strictly. +# '1.11.2' means that we need automake 1.11.2 or later (and we do). +AUTOMAKE_OPTIONS = foreign 1.11.2 subdir-objects + +ACLOCAL_AMFLAGS = -I m4 + +# This is the "Release" of the Libevent ABI. It takes precedence over +# the VERSION_INFO, so that two versions of Libevent with the same +# "Release" are never binary-compatible. +# +# This number incremented once for the 2.0 release candidate, and +# will increment for each series until we revise our interfaces enough +# that we can seriously expect ABI compatibility between series. +# +RELEASE = -release 2.1 + +# This is the version info for the libevent binary API. It has three +# numbers: +# Current -- the number of the binary API that we're implementing +# Revision -- which iteration of the implementation of the binary +# API are we supplying? +# Age -- How many previous binary API versions do we also +# support? +# +# To increment a VERSION_INFO (current:revision:age): +# If the ABI didn't change: +# Return (current:revision+1:age) +# If the ABI changed, but it's backward-compatible: +# Return (current+1:0:age+1) +# If the ABI changed and it isn't backward-compatible: +# Return (current+1:0:0) +# +# Once an RC is out, DO NOT MAKE ANY ABI-BREAKING CHANGES IN THAT SERIES +# UNLESS YOU REALLY REALLY HAVE TO. +VERSION_INFO = 7:0:0 + +# History: RELEASE VERSION_INFO +# 2.0.1-alpha -- 2.0 1:0:0 +# 2.0.2-alpha -- 2:0:0 +# 2.0.3-alpha -- 2:0:0 (should have incremented; didn't.) +# 2.0.4-alpha -- 3:0:0 +# 2.0.5-beta -- 4:0:0 +# 2.0.6-rc -- 2.0 2:0:0 +# 2.0.7-rc -- 2.0 3:0:1 +# 2.0.8-rc -- 2.0 4:0:2 +# 2.0.9-rc -- 2.0 5:0:0 (ABI changed slightly) +# 2.0.10-stable-- 2.0 5:1:0 (No ABI change) +# 2.0.11-stable-- 2.0 6:0:1 (ABI changed, backward-compatible) +# 2.0.12-stable-- 2.0 6:1:1 (No ABI change) +# 2.0.13-stable-- 2.0 6:2:1 (No ABI change) +# 2.0.14-stable-- 2.0 6:3:1 (No ABI change) +# 2.0.15-stable-- 2.0 6:3:1 (Forgot to update :( ) +# 2.0.16-stable-- 2.0 6:4:1 (No ABI change) +# 2.0.17-stable-- 2.0 6:5:1 (No ABI change) +# 2.0.18-stable-- 2.0 6:6:1 (No ABI change) +# 2.0.19-stable-- 2.0 6:7:1 (No ABI change) +# 2.0.20-stable-- 2.0 6:8:1 (No ABI change) +# 2.0.21-stable-- 2.0 6:9:1 (No ABI change) +# +# For Libevent 2.1: +# 2.1.1-alpha -- 2.1 1:0:0 +# 2.1.2-alpha -- 2.1 1:0:0 (should have been 2:0:1) +# 2.1.3-alpha -- 2.1 3:0:0 (ABI changed slightly) +# 2.1.4-alpha -- 2.1 4:0:0 (ABI changed slightly) +# 2.1.5-beta -- 2.1 5:0:0 (ABI changed slightly) +# 2.1.6-beta -- 2.1 6:0:0 (ABI changed slightly) +# 2.1.7-beta -- 2.1 6:1:0 (ABI changed slightly) +# 2.1.8-stable-- 2.1 6:2:0 (No ABI change) +# 2.1.9-beta-- 2.1 6:3:0 (No ABI change) +# 2.1.10-stable-- 2.1 6:4:0 (No ABI change, WRONG) +# 2.1.11-stable-- 2.1 7:0:0 (ABI changed) + +# ABI version history for this package effectively restarts every time +# we change RELEASE. Version 1.4.x had RELEASE of 1.4. +# +# Ideally, we would not be using RELEASE at all; instead we could just +# use the VERSION_INFO field to label our backward-incompatible ABI +# changes, and those would be few and far between. Unfortunately, +# Libevent still exposes far too many volatile structures in its +# headers, so we pretty much have to assume that most development +# series will break ABI compatibility. For now, it's simplest just to +# keep incrementing the RELEASE between series and resetting VERSION_INFO. +# +# Eventually, when we get to the point where the structures in the +# headers are all non-changing (or not there at all!), we can shift to +# a more normal worldview where backward-incompatible ABI changes are +# nice and rare. For the next couple of years, though, 'struct event' +# is user-visible, and so we can pretty much guarantee that release +# series won't be binary-compatible. + +pkgconfigdir=$(libdir)/pkgconfig +LIBEVENT_PKGCONFIG=libevent.pc libevent_core.pc libevent_extra.pc + +# These sources are conditionally added by configure.ac or conditionally +# included from other files. +PLATFORM_DEPENDENT_SRC = \ + arc4random.c \ + epoll_sub.c + +CMAKE_FILES = \ + cmake/AddCompilerFlags.cmake \ + cmake/AddEventLibrary.cmake \ + cmake/CheckConstExists.cmake \ + cmake/CheckFileOffsetBits.c \ + cmake/CheckFileOffsetBits.cmake \ + cmake/CheckFunctionExistsEx.c \ + cmake/CheckFunctionExistsEx.cmake \ + cmake/CheckFunctionKeywords.cmake \ + cmake/CheckPrototypeDefinition.c.in \ + cmake/CheckPrototypeDefinition.cmake \ + cmake/CheckWorkingKqueue.cmake \ + cmake/CodeCoverage.cmake \ + cmake/COPYING-CMAKE-SCRIPTS \ + cmake/Copyright.txt \ + cmake/FindGit.cmake \ + cmake/LibeventConfigBuildTree.cmake.in \ + cmake/LibeventConfig.cmake.in \ + cmake/LibeventConfigVersion.cmake.in \ + cmake/VersionViaGit.cmake \ + event-config.h.cmake \ + evconfig-private.h.cmake \ + CMakeLists.txt + +EXTRA_DIST = \ + ChangeLog-1.4 \ + ChangeLog-2.0 \ + Doxyfile \ + LICENSE \ + Makefile.nmake test/Makefile.nmake \ + autogen.sh \ + event_rpcgen.py \ + libevent.pc.in \ + make-event-config.sed \ + whatsnew-2.0.txt \ + whatsnew-2.1.txt \ + README.md \ + $(CMAKE_FILES) \ + $(PLATFORM_DEPENDENT_SRC) + +LIBEVENT_LIBS_LA = libevent.la libevent_core.la libevent_extra.la +if PTHREADS +LIBEVENT_LIBS_LA += libevent_pthreads.la +LIBEVENT_PKGCONFIG += libevent_pthreads.pc +endif +if OPENSSL +LIBEVENT_LIBS_LA += libevent_openssl.la +LIBEVENT_PKGCONFIG += libevent_openssl.pc +endif + +if INSTALL_LIBEVENT +lib_LTLIBRARIES = $(LIBEVENT_LIBS_LA) +pkgconfig_DATA = $(LIBEVENT_PKGCONFIG) +else +noinst_LTLIBRARIES = $(LIBEVENT_LIBS_LA) +endif + +EXTRA_SOURCE= +noinst_HEADERS= +noinst_PROGRAMS= +EXTRA_PROGRAMS= +CLEANFILES= +DISTCLEANFILES= +BUILT_SOURCES = +include include/include.am +include sample/include.am +include test/include.am + +if BUILD_WIN32 + +SYS_LIBS = -lws2_32 -lshell32 -ladvapi32 +SYS_SRC = win32select.c buffer_iocp.c event_iocp.c \ + bufferevent_async.c +SYS_INCLUDES = -IWIN32-Code -IWIN32-Code/nmake + +if THREADS +SYS_SRC += evthread_win32.c +endif + +else + +SYS_LIBS = +SYS_SRC = +SYS_INCLUDES = + +endif + +if STRLCPY_IMPL +SYS_SRC += strlcpy.c +endif +if SELECT_BACKEND +SYS_SRC += select.c +endif +if POLL_BACKEND +SYS_SRC += poll.c +endif +if DEVPOLL_BACKEND +SYS_SRC += devpoll.c +endif +if KQUEUE_BACKEND +SYS_SRC += kqueue.c +endif +if EPOLL_BACKEND +SYS_SRC += epoll.c +endif +if EVPORT_BACKEND +SYS_SRC += evport.c +endif +if SIGNAL_SUPPORT +SYS_SRC += signal.c +endif + +BUILT_SOURCES += include/event2/event-config.h + +include/event2/event-config.h: config.h make-event-config.sed + $(AM_V_GEN)test -d include/event2 || $(MKDIR_P) include/event2 + $(AM_V_at)$(SED) -f $(srcdir)/make-event-config.sed < config.h > $@T + $(AM_V_at)mv -f $@T $@ + +CORE_SRC = \ + buffer.c \ + bufferevent.c \ + bufferevent_filter.c \ + bufferevent_pair.c \ + bufferevent_ratelim.c \ + bufferevent_sock.c \ + event.c \ + evmap.c \ + evthread.c \ + evutil.c \ + evutil_rand.c \ + evutil_time.c \ + listener.c \ + log.c \ + $(SYS_SRC) + +EXTRAS_SRC = \ + evdns.c \ + event_tagging.c \ + evrpc.c \ + http.c + +if BUILD_WITH_NO_UNDEFINED +NO_UNDEFINED = -no-undefined +MAYBE_CORE = libevent_core.la +else +NO_UNDEFINED = +MAYBE_CORE = +endif + +AM_CFLAGS = $(LIBEVENT_CFLAGS) +AM_CPPFLAGS = -I$(srcdir)/compat -I./include -I$(srcdir)/include $(SYS_INCLUDES) $(LIBEVENT_CPPFLAGS) +AM_LDFLAGS = $(LIBEVENT_LDFLAGS) + +GENERIC_LDFLAGS = -version-info $(VERSION_INFO) $(RELEASE) $(NO_UNDEFINED) $(AM_LDFLAGS) + +libevent_la_SOURCES = $(CORE_SRC) $(EXTRAS_SRC) +libevent_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS) +libevent_la_LDFLAGS = $(GENERIC_LDFLAGS) + +libevent_core_la_SOURCES = $(CORE_SRC) +libevent_core_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS) +libevent_core_la_LDFLAGS = $(GENERIC_LDFLAGS) + +if PTHREADS +libevent_pthreads_la_SOURCES = evthread_pthread.c +libevent_pthreads_la_LIBADD = $(MAYBE_CORE) +libevent_pthreads_la_LDFLAGS = $(GENERIC_LDFLAGS) +endif + +libevent_extra_la_SOURCES = $(EXTRAS_SRC) +libevent_extra_la_LIBADD = $(MAYBE_CORE) $(SYS_LIBS) +libevent_extra_la_LDFLAGS = $(GENERIC_LDFLAGS) + +if OPENSSL +libevent_openssl_la_SOURCES = bufferevent_openssl.c +libevent_openssl_la_LIBADD = $(MAYBE_CORE) $(OPENSSL_LIBS) +libevent_openssl_la_LDFLAGS = $(GENERIC_LDFLAGS) +libevent_openssl_la_CPPFLAGS = $(AM_CPPFLAGS) $(OPENSSL_INCS) +endif + +noinst_HEADERS += \ + WIN32-Code/nmake/evconfig-private.h \ + WIN32-Code/nmake/event2/event-config.h \ + WIN32-Code/getopt.h \ + WIN32-Code/getopt.c \ + WIN32-Code/getopt_long.c \ + WIN32-Code/tree.h \ + bufferevent-internal.h \ + changelist-internal.h \ + compat/sys/queue.h \ + defer-internal.h \ + epolltable-internal.h \ + evbuffer-internal.h \ + event-internal.h \ + evmap-internal.h \ + evrpc-internal.h \ + evsignal-internal.h \ + evthread-internal.h \ + ht-internal.h \ + http-internal.h \ + iocp-internal.h \ + ipv6-internal.h \ + kqueue-internal.h \ + log-internal.h \ + minheap-internal.h \ + mm-internal.h \ + ratelim-internal.h \ + ratelim-internal.h \ + strlcpy-internal.h \ + time-internal.h \ + util-internal.h \ + openssl-compat.h + +EVENT1_HDRS = \ + include/evdns.h \ + include/event.h \ + include/evhttp.h \ + include/evrpc.h \ + include/evutil.h + +if INSTALL_LIBEVENT +include_HEADERS = $(EVENT1_HDRS) +else +noinst_HEADERS += $(EVENT1_HDRS) +endif + +verify: check + +doxygen: FORCE + doxygen $(srcdir)/Doxyfile +FORCE: + +DISTCLEANFILES += *~ libevent.pc libevent_core.pc libevent_extra.pc ./include/event2/event-config.h + diff --git a/probe-busybox/libevent-2.1.11-stable/Makefile.nmake b/probe-busybox/libevent-2.1.11-stable/Makefile.nmake new file mode 100644 index 00000000..f27cd619 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/Makefile.nmake @@ -0,0 +1,82 @@ +# WATCH OUT! This makefile is a work in progress. -*- makefile -*- +# +# I'm not very knowledgeable about MSVC and nmake beyond their most basic +# aspects. If anything here looks wrong to you, please let me know. + +# If OPENSSL_DIR is not set, builds without OpenSSL support. If you want +# OpenSSL support, you can set the OPENSSL_DIR variable to where you +# installed OpenSSL. This can be done in the environment: +# set OPENSSL_DIR=c:\openssl +# Or on the nmake command line: +# nmake OPENSSL_DIR=C:\openssl -f Makefile.nmake +# Or by uncommenting the following line here in the makefile... + +# OPENSSL_DIR=c:\openssl + +!IFDEF OPENSSL_DIR +SSL_CFLAGS=/I$(OPENSSL_DIR)\include /DEVENT__HAVE_OPENSSL +!ELSE +SSL_CFLAGS= +!ENDIF + +# Needed for correctness +CFLAGS=/IWIN32-Code /IWIN32-Code/nmake /Iinclude /Icompat /DHAVE_CONFIG_H /I. $(SSL_CFLAGS) + +# For optimization and warnings +CFLAGS=$(CFLAGS) /Ox /W3 /wd4996 /nologo + +# XXXX have a debug mode + +LIBFLAGS=/nologo + +CORE_OBJS=event.obj buffer.obj bufferevent.obj bufferevent_sock.obj \ + bufferevent_pair.obj listener.obj evmap.obj log.obj evutil.obj \ + strlcpy.obj signal.obj bufferevent_filter.obj evthread.obj \ + bufferevent_ratelim.obj evutil_rand.obj evutil_time.obj +WIN_OBJS=win32select.obj evthread_win32.obj buffer_iocp.obj \ + event_iocp.obj bufferevent_async.obj +EXTRA_OBJS=event_tagging.obj http.obj evdns.obj evrpc.obj + +!IFDEF OPENSSL_DIR +SSL_OBJS=bufferevent_openssl.obj +SSL_LIBS=libevent_openssl.lib +!ELSE +SSL_OBJS= +SSL_LIBS= +!ENDIF + +ALL_OBJS=$(CORE_OBJS) $(WIN_OBJS) $(EXTRA_OBJS) $(SSL_OBJS) +STATIC_LIBS=libevent_core.lib libevent_extras.lib libevent.lib $(SSL_LIBS) + + +all: static_libs tests + +static_libs: $(STATIC_LIBS) + +libevent_core.lib: $(CORE_OBJS) $(WIN_OBJS) + lib $(LIBFLAGS) $(CORE_OBJS) $(WIN_OBJS) /out:libevent_core.lib + +libevent_extras.lib: $(EXTRA_OBJS) + lib $(LIBFLAGS) $(EXTRA_OBJS) /out:libevent_extras.lib + +libevent.lib: $(CORE_OBJS) $(WIN_OBJS) $(EXTRA_OBJS) + lib $(LIBFLAGS) $(CORE_OBJS) $(EXTRA_OBJS) $(WIN_OBJS) /out:libevent.lib + +libevent_openssl.lib: $(SSL_OBJS) + lib $(LIBFLAGS) $(SSL_OBJS) /out:libevent_openssl.lib + +clean: + del $(ALL_OBJS) + del $(STATIC_LIBS) + cd test + $(MAKE) /F Makefile.nmake clean + cd .. + +tests: + cd test +!IFDEF OPENSSL_DIR + $(MAKE) OPENSSL_DIR=$(OPENSSL_DIR) /F Makefile.nmake +!ELSE + $(MAKE) /F Makefile.nmake +!ENDIF + cd .. diff --git a/probe-busybox/libevent-2.1.11-stable/README.md b/probe-busybox/libevent-2.1.11-stable/README.md new file mode 100644 index 00000000..8be37f49 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/README.md @@ -0,0 +1,469 @@ +

+ libevent logo +

+ + + +[![Appveyor Win32 Build Status](https://ci.appveyor.com/api/projects/status/ng3jg0uhy44mp7ik?svg=true)](https://ci.appveyor.com/project/libevent/libevent) +[![Travis Build Status](https://travis-ci.org/libevent/libevent.svg?branch=master)](https://travis-ci.org/libevent/libevent) +[![Coverage Status](https://coveralls.io/repos/github/libevent/libevent/badge.svg)](https://coveralls.io/github/libevent/libevent) +[![Join the chat at https://gitter.im/libevent/libevent](https://badges.gitter.im/libevent/libevent.svg)](https://gitter.im/libevent/libevent?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + + + +# 0. BUILDING AND INSTALLATION (Briefly) + +## Autoconf + + $ ./configure + $ make + $ make verify # (optional) + $ sudo make install + +## CMake (General) + + +The following Libevent specific CMake variables are as follows (the values being +the default). + +``` +# Type of the library to build (SHARED or STATIC) +# Default is: SHARED for MSVC, otherwise BOTH +EVENT__LIBRARY_TYPE:STRING=DEFAULT + +# Installation directory for CMake files +EVENT_INSTALL_CMAKE_DIR:PATH=lib/cmake/libevent + +# Enable running gcov to get a test coverage report (only works with +# GCC/CLang). Make sure to enable -DCMAKE_BUILD_TYPE=Debug as well. +EVENT__COVERAGE:BOOL=OFF + +# Defines if Libevent should build without the benchmark executables +EVENT__DISABLE_BENCHMARK:BOOL=OFF + +# Define if Libevent should build without support for a debug mode +EVENT__DISABLE_DEBUG_MODE:BOOL=OFF + +# Define if Libevent should not allow replacing the mm functions +EVENT__DISABLE_MM_REPLACEMENT:BOOL=OFF + +# Define if Libevent should build without support for OpenSSL encryption +EVENT__DISABLE_OPENSSL:BOOL=OFF + +# Disable the regress tests +EVENT__DISABLE_REGRESS:BOOL=OFF + +# Disable sample files +EVENT__DISABLE_SAMPLES:BOOL=OFF + +# If tests should be compiled or not +EVENT__DISABLE_TESTS:BOOL=OFF + +# Define if Libevent should not be compiled with thread support +EVENT__DISABLE_THREAD_SUPPORT:BOOL=OFF + +# Enables verbose debugging +EVENT__ENABLE_VERBOSE_DEBUG:BOOL=OFF + +# When cross compiling, forces running a test program that verifies that Kqueue +# works with pipes. Note that this requires you to manually run the test program +# on the the cross compilation target to verify that it works. See CMake +# documentation for try_run for more details +EVENT__FORCE_KQUEUE_CHECK:BOOL=OFF +``` + +__More variables can be found by running `cmake -LAH `__ + + +## CMake (Windows) + +Install CMake: + + + $ md build && cd build + $ cmake -G "Visual Studio 10" .. # Or whatever generator you want to use cmake --help for a list. + $ start libevent.sln + +## CMake (Unix) + + $ mkdir build && cd build + $ cmake .. # Default to Unix Makefiles. + $ make + $ make verify # (optional) + + +# 1. BUILDING AND INSTALLATION (In Depth) + +## Autoconf + +To build Libevent, type + + $ ./configure && make + + + (If you got Libevent from the git repository, you will + first need to run the included "autogen.sh" script in order to + generate the configure script.) + +You can run the regression tests by running + + $ make verify + +Install as root via + + $ make install + +Before reporting any problems, please run the regression tests. + +To enable low-level tracing, build the library as: + + $ CFLAGS=-DUSE_DEBUG ./configure [...] + +Standard configure flags should work. In particular, see: + + --disable-shared Only build static libraries. + --prefix Install all files relative to this directory. + + +The configure script also supports the following flags: + + --enable-gcc-warnings Enable extra compiler checking with GCC. + --disable-malloc-replacement + Don't let applications replace our memory + management functions. + --disable-openssl Disable support for OpenSSL encryption. + --disable-thread-support Don't support multithreaded environments. + +## CMake (Windows) + +(Note that autoconf is currently the most mature and supported build +environment for Libevent; the CMake instructions here are new and +experimental, though they _should_ be solid. We hope that CMake will +still be supported in future versions of Libevent, and will try to +make sure that happens.) + +First of all install . + +To build Libevent using Microsoft Visual studio open the "Visual Studio Command prompt" and type: + +``` +$ cd +$ mkdir build && cd build +$ cmake -G "Visual Studio 10" .. # Or whatever generator you want to use cmake --help for a list. +$ start libevent.sln +``` + +In the above, the ".." refers to the dir containing the Libevent source code. +You can build multiple versions (with different compile time settings) from the same source tree +by creating other build directories. + +It is highly recommended to build "out of source" when using +CMake instead of "in source" like the normal behaviour of autoconf for this reason. + +The "NMake Makefiles" CMake generator can be used to build entirely via the command line. + +To get a list of settings available for the project you can type: + +``` +$ cmake -LH .. +``` + +### GUI + +CMake also provides a GUI that lets you specify the source directory and output (binary) directory +that the build should be placed in. + +# 2. USEFUL LINKS: + +For the latest released version of Libevent, see the official website at + . + +There's a pretty good work-in-progress manual up at + . + +For the latest development versions of Libevent, access our Git repository +via + +``` +$ git clone https://github.com/libevent/libevent.git +``` + +You can browse the git repository online at: + + + +To report bugs, issues, or ask for new features: + +__Patches__: https://github.com/libevent/libevent/pulls +> OK, those are not really _patches_. You fork, modify, and hit the "Create Pull Request" button. +> You can still submit normal git patches via the mailing list. + +__Bugs, Features [RFC], and Issues__: https://github.com/libevent/libevent/issues +> Or you can do it via the mailing list. + +There's also a libevent-users mailing list for talking about Libevent +use and development: + + + +# 3. ACKNOWLEDGMENTS + +The following people have helped with suggestions, ideas, code or +fixing bugs: + + * Samy Al Bahra + * Antony Antony + * Jacob Appelbaum + * Arno Bakker + * Weston Andros Adamson + * William Ahern + * Ivan Andropov + * Sergey Avseyev + * Avi Bab + * Joachim Bauch + * Andrey Belobrov + * Gilad Benjamini + * Stas Bekman + * Denis Bilenko + * Julien Blache + * Kevin Bowling + * Tomash Brechko + * Kelly Brock + * Ralph Castain + * Adrian Chadd + * Lawnstein Chan + * Shuo Chen + * Ka-Hing Cheung + * Andrew Cox + * Paul Croome + * George Danchev + * Andrew Danforth + * Ed Day + * Christopher Davis + * Mike Davis + * Frank Denis + * Antony Dovgal + * Mihai Draghicioiu + * Alexander Drozdov + * Mark Ellzey + * Shie Erlich + * Leonid Evdokimov + * Juan Pablo Fernandez + * Christophe Fillot + * Mike Frysinger + * Remi Gacogne + * Artem Germanov + * Alexander von Gernler + * Diego Giagio + * Artur Grabowski + * Diwaker Gupta + * Kuldeep Gupta + * Sebastian Hahn + * Dave Hart + * Greg Hazel + * Nicholas Heath + * Michael Herf + * Savg He + * Mark Heily + * Maxime Henrion + * Michael Herf + * Greg Hewgill + * Andrew Hochhaus + * Aaron Hopkins + * Tani Hosokawa + * Jamie Iles + * Xiuqiang Jiang + * Claudio Jeker + * Evan Jones + * Marcin Juszkiewicz + * George Kadianakis + * Makoto Kato + * Phua Keat + * Azat Khuzhin + * Alexander Klauer + * Kevin Ko + * Brian Koehmstedt + * Marko Kreen + * Ondřej Kuzník + * Valery Kyholodov + * Ross Lagerwall + * Scott Lamb + * Christopher Layne + * Adam Langley + * Graham Leggett + * Volker Lendecke + * Philip Lewis + * Zhou Li + * David Libenzi + * Yan Lin + * Moshe Litvin + * Simon Liu + * Mitchell Livingston + * Hagne Mahre + * Lubomir Marinov + * Abilio Marques + * Nicolas Martyanoff + * Abel Mathew + * Nick Mathewson + * James Mansion + * Nicholas Marriott + * Andrey Matveev + * Caitlin Mercer + * Dagobert Michelsen + * Andrea Montefusco + * Mansour Moufid + * Mina Naguib + * Felix Nawothnig + * Trond Norbye + * Linus Nordberg + * Richard Nyberg + * Jon Oberheide + * John Ohl + * Phil Oleson + * Alexey Ozeritsky + * Dave Pacheco + * Derrick Pallas + * Tassilo von Parseval + * Catalin Patulea + * Patrick Pelletier + * Simon Perreault + * Dan Petro + * Pierre Phaneuf + * Amarin Phaosawasdi + * Ryan Phillips + * Dimitre Piskyulev + * Pavel Plesov + * Jon Poland + * Roman Puls + * Nate R + * Robert Ransom + * Balint Reczey + * Bert JW Regeer + * Nate Rosenblum + * Peter Rosin + * Maseeb Abdul Qadir + * Wang Qin + * Alex S + * Gyepi Sam + * Hanna Schroeter + * Ralf Schmitt + * Mike Smellie + * Steve Snyder + * Nir Soffer + * Dug Song + * Dongsheng Song + * Hannes Sowa + * Joakim Soderberg + * Joseph Spadavecchia + * Kevin Springborn + * Harlan Stenn + * Andrew Sweeney + * Ferenc Szalai + * Brodie Thiesfield + * Jason Toffaletti + * Brian Utterback + * Gisle Vanem + * Bas Verhoeven + * Constantine Verutin + * Colin Watt + * Zack Weinberg + * Jardel Weyrich + * Jay R. Wren + * Zack Weinberg + * Mobai Zhang + * Alejo + * Alex + * Taral + * propanbutan + * masksqwe + * mmadia + * yangacer + * Andrey Skriabin + * basavesh.as + * billsegall + * Bill Vaughan + * Christopher Wiley + * David Paschich + * Ed Schouten + * Eduardo Panisset + * Jan Heylen + * jer-gentoo + * Joakim Söderberg + * kirillDanshin + * lzmths + * Marcus Sundberg + * Mark Mentovai + * Mattes D + * Matyas Dolak + * Neeraj Badlani + * Nick Mathewson + * Rainer Keller + * Seungmo Koo + * Thomas Bernard + * Xiao Bao Clark + * zeliard + * Zonr Chang + * Kurt Roeckx + * Seven + * Simone Basso + * Vlad Shcherban + * Tim Hentenaar + * Breaker + * johnsonlee + * Philip Prindeville + * Vis Virial + * Andreas Gustafsson + * Andrey Okoshkin + * an-tao + * baixiangcpp + * Bernard Spil + * Bogdan Harjoc + * Carlo Marcelo Arenas Belón + * David Benjamin + * David Disseldorp + * Dmitry Alimov + * Dominic Chen + * dpayne + * ejurgensen + * Fredrik Strupe + * Gonçalo Ribeiro + * James Synge + * Jan Beich + * Jesse Fang + * Jiri Luznicky + * José Luis Millán + * Kiyoshi Aman + * Leo Zhang + * lightningkay + * Luke Dashjr + * Marcin Szewczyk + * Maximilian Brunner + * Maya Rashish + * Murat Demirten + * Nathan French + * Nikolay Edigaryev + * Philip Herron + * Redfoxmoon + * stenn + * SuckShit + * The Gitter Badger + * tim-le + * Vincent JARDIN + * Xiang Zhang + * Xiaozhou Liu + * yongqing.jiao + * Enji Cooper + * linxiaohui + * Seong-Joong Kim + * Tobias Stoeckmann + * Yury Korzhetsky + * zhuizhuhaomeng + * Pierce Lopez + * yuangongji + * Keith Smiley + * jeremyerb + * Fabrice Fontaine + * wenyg + + +If we have forgotten your name, please contact us. diff --git a/probe-busybox/libevent-2.1.11-stable/Vagrantfile b/probe-busybox/libevent-2.1.11-stable/Vagrantfile new file mode 100644 index 00000000..d275089e --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/Vagrantfile @@ -0,0 +1,397 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# DESCRIPTION: +# ============ +# Vagrant for running libevent tests with: +# - timeout 30min, to avoid hungs +# - run tests in parallel under ctest (10 concurency) +# - if you have uncommited changes, you should commit them first to check +# - unix only, because of some tar'ing to avoid one vm affect another +# +# ENVIRONMENT: +# ============ +# - NO_PKG -- do not install packages +# - NO_CMAKE -- do not run with cmake +# - NO_AUTOTOOLS -- do not run with autoconf/automake + +Vagrant.configure("2") do |config| + # to allow running boxes provisions in parallel, we can't share the same dirs + # via virtualbox, however sometimes it is the only way, so instead let's + # create an archive of HEAD (this way we will not have any trash there) and + # extract it for every box to the separate folder. + # + # P.S. we will change this --prefix with tar(1) --trasnform + system('git archive --prefix=libevent/ --output=.vagrant/libevent.tar HEAD') + + config.vm.provider "virtualbox" do |vb| + vb.memory = "512" + + # otherwise osx fails, anyway we do not need this + vb.customize ["modifyvm", :id, "--usb", "off"] + vb.customize ["modifyvm", :id, "--usbehci", "off"] + end + + # disable /vagrant share, in case we will not use default mount + config.vm.synced_folder ".", "/vagrant", disabled: true + + config.vm.define "ubuntu" do |ubuntu| + system('tar --overwrite --transform=s/libevent/libevent-linux/ -xf .vagrant/libevent.tar -C .vagrant/') + + ubuntu.vm.box = "ubuntu/xenial64" + ubuntu.vm.synced_folder ".vagrant/libevent-linux", "/vagrant", + type: "rsync" + + if ENV['NO_PKG'] != "true" + ubuntu.vm.provision "shell", inline: <<-SHELL + apt-get update + apt-get install -y zlib1g-dev libssl-dev python2.7 + apt-get install -y build-essential cmake ninja-build + apt-get install -y autoconf automake libtool + SHELL + end + + if ENV['NO_CMAKE'] != "true" + ubuntu.vm.provision "shell", privileged: false, inline: <<-SHELL + cd /vagrant + rm -fr .cmake-vagrant + mkdir -p .cmake-vagrant + cd .cmake-vagrant + cmake -G Ninja .. + + export CTEST_TEST_TIMEOUT=1800 + export CTEST_OUTPUT_ON_FAILURE=1 + export CTEST_PARALLEL_LEVEL=20 + cmake --build . --target verify + SHELL + end + + if ENV['NO_AUTOTOOLS'] != "true" + ubuntu.vm.provision "shell", privileged: false, inline: <<-SHELL + cd /vagrant + ./autogen.sh + ./configure + make -j20 verify + SHELL + end + end + + config.vm.define "freebsd" do |freebsd| + system('tar --overwrite --transform=s/libevent/libevent-freebsd/ -xf .vagrant/libevent.tar -C .vagrant/') + + freebsd.vm.box = "freebsd/FreeBSD-11.0-STABLE" + freebsd.vm.synced_folder ".vagrant/libevent-freebsd", "/vagrant", + type: "rsync", group: "wheel" + + # otherwise reports error + freebsd.ssh.shell = "sh" + + if ENV['NO_PKG'] != "true" + freebsd.vm.provision "shell", inline: <<-SHELL + pkg install --yes openssl cmake ninja automake autotools + SHELL + end + + if ENV['NO_CMAKE'] != "true" + freebsd.vm.provision "shell", privileged: false, inline: <<-SHELL + cd /vagrant + rm -fr .cmake-vagrant + mkdir -p .cmake-vagrant + cd .cmake-vagrant + cmake -G Ninja .. + + export CTEST_TEST_TIMEOUT=1800 + export CTEST_OUTPUT_ON_FAILURE=1 + export CTEST_PARALLEL_LEVEL=20 + cmake --build . --target verify + SHELL + end + + if ENV['NO_AUTOTOOLS'] != "true" + freebsd.vm.provision "shell", privileged: false, inline: <<-SHELL + cd /vagrant + ./autogen.sh + ./configure + make -j20 verify + SHELL + end + end + + config.vm.define "netbsd" do |netbsd| + system('tar --overwrite --transform=s/libevent/libevent-netbsd/ -xf .vagrant/libevent.tar -C .vagrant/') + + netbsd.vm.box = "kja/netbsd-7-amd64" + netbsd.vm.synced_folder ".vagrant/libevent-netbsd", "/vagrant", + type: "rsync", group: "wheel" + + if ENV['NO_PKG'] != "true" + netbsd.vm.provision "shell", inline: <<-SHELL + export PKG_PATH="ftp://ftp.netbsd.org/pub/pkgsrc/packages/NetBSD/x86_64/7.0_2016Q2/All/" + pkg_add ncurses ninja-build automake cmake libtool + SHELL + end + + if ENV['NO_CMAKE'] != "true" + netbsd.vm.provision "shell", privileged: false, inline: <<-SHELL + cd /vagrant + rm -fr .cmake-vagrant + mkdir -p .cmake-vagrant + cd .cmake-vagrant + cmake -G Ninja .. + + export CTEST_TEST_TIMEOUT=1800 + export CTEST_OUTPUT_ON_FAILURE=1 + export CTEST_PARALLEL_LEVEL=20 + cmake --build . --target verify + SHELL + end + + if ENV['NO_AUTOTOOLS'] != "true" + netbsd.vm.provision "shell", privileged: false, inline: <<-SHELL + cd /vagrant + ./autogen.sh + ./configure + make -j20 verify + SHELL + end + end + + config.vm.define "solaris" do |solaris| + system('tar --overwrite --transform=s/libevent/libevent-solaris/ -xf .vagrant/libevent.tar -C .vagrant/') + + # XXX: + # - solaris do not have '-or' it only has '-o' for find(1), so we can't use + # rsync + # - and autoconf(1) doesn't work on virtualbox share, ugh + solaris.vm.synced_folder ".vagrant/libevent-solaris", "/vagrant-vbox", + type: "virtualbox" + + solaris.vm.box = "tnarik/solaris10-minimal" + if ENV['NO_PKG'] != "true" + # TODO: opencsw does not include ninja(1) + solaris.vm.provision "shell", inline: <<-SHELL + pkgadd -d http://get.opencsw.org/now + pkgutil -U + pkgutil -y -i libssl_dev cmake rsync python gmake gcc5core automake autoconf libtool + SHELL + end + + # copy from virtualbox mount to newly created folder + solaris.vm.provision "shell", privileged: false, inline: <<-SHELL + rm -fr ~/vagrant + cp -r /vagrant-vbox ~/vagrant + SHELL + + if ENV['NO_CMAKE'] != "true" + # builtin compiler cc(1) is a wrapper, so we should use gcc5 manually, + # otherwise it will not work. + # Plus we should set some paths so that cmake/compiler can find tham. + solaris.vm.provision "shell", privileged: false, inline: <<-SHELL + export CMAKE_INCLUDE_PATH=/opt/csw/include + export CMAKE_LIBRARY_PATH=/opt/csw/lib + export CFLAGS=-I$CMAKE_INCLUDE_PATH + export LDFLAGS=-L$CMAKE_LIBRARY_PATH + + cd ~/vagrant + rm -rf .cmake-vagrant + mkdir -p .cmake-vagrant + cd .cmake-vagrant + cmake -DCMAKE_C_COMPILER=gcc .. + + export CTEST_TEST_TIMEOUT=1800 + export CTEST_OUTPUT_ON_FAILURE=1 + export CTEST_PARALLEL_LEVEL=20 + cmake --build . --target verify + SHELL + end + + if ENV['NO_AUTOTOOLS'] != "true" + # and we should set MAKE for `configure` otherwise it will try to use + # `make` + solaris.vm.provision "shell", privileged: false, inline: <<-SHELL + cd ~/vagrant + ./autogen.sh + MAKE=gmake ./configure + gmake -j20 verify + SHELL + end + end + + # known failures: + # - sometimes vm hangs + config.vm.define "osx" do |osx| + system('tar --overwrite --transform=s/libevent/libevent-osx/ -xf .vagrant/libevent.tar -C .vagrant/') + + osx.vm.synced_folder ".vagrant/libevent-osx", "/vagrant", + type: "rsync", group: "wheel" + + osx.vm.box = "jhcook/osx-elcapitan-10.11" + if ENV['NO_PKG'] != "true" + osx.vm.provision "shell", privileged: false, inline: <<-SHELL + ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" + + brew uninstall libtool + brew install libtool openssl ninja cmake autoconf automake + SHELL + end + + if ENV['NO_CMAKE'] != "true" + # we should set some paths so that cmake/compiler can find tham + osx.vm.provision "shell", privileged: false, inline: <<-SHELL + export OPENSSL_ROOT=$(echo /usr/local/Cellar/openssl/*) + export CMAKE_INCLUDE_PATH=$OPENSSL_ROOT/include + export CMAKE_LIBRARY_PATH=$OPENSSL_ROOT/lib + + cd /vagrant + mkdir -p .cmake-vagrant + cd .cmake-vagrant + cmake -G Ninja .. + + export CTEST_TEST_TIMEOUT=1800 + export CTEST_OUTPUT_ON_FAILURE=1 + export CTEST_PARALLEL_LEVEL=20 + cmake --build . --target verify + SHELL + end + + if ENV['NO_AUTOTOOLS'] != "true" + osx.vm.provision "shell", privileged: false, inline: <<-SHELL + export OPENSSL_ROOT=$(echo /usr/local/Cellar/openssl/*) + export CFLAGS=-I$OPENSSL_ROOT/include + export LDFLAGS=-L$OPENSSL_ROOT/lib + + cd /vagrant + ./autogen.sh + ./configure + make -j20 verify + SHELL + end + end + + config.vm.define "centos" do |centos| + system('tar --overwrite --transform=s/libevent/libevent-centos/ -xf .vagrant/libevent.tar -C .vagrant/') + + centos.vm.synced_folder ".vagrant/libevent-centos", "/vagrant", + type: "rsync", group: "wheel" + + centos.vm.box = "centos/7" + if ENV['NO_PKG'] != "true" + centos.vm.provision "shell", inline: <<-SHELL + echo "[russianfedora]" > /etc/yum.repos.d/russianfedora.repo + echo name=russianfedora >> /etc/yum.repos.d/russianfedora.repo + echo baseurl=http://mirror.yandex.ru/fedora/russianfedora/russianfedora/free/el/releases/7/Everything/x86_64/os/ >> /etc/yum.repos.d/russianfedora.repo + echo enabled=1 >> /etc/yum.repos.d/russianfedora.repo + echo gpgcheck=0 >> /etc/yum.repos.d/russianfedora.repo + SHELL + centos.vm.provision "shell", inline: <<-SHELL + yum -y install zlib-devel openssl-devel python + yum -y install gcc cmake ninja-build + yum -y install autoconf automake libtool + SHELL + end + + if ENV['NO_CMAKE'] != "true" + centos.vm.provision "shell", privileged: false, inline: <<-SHELL + cd /vagrant + rm -fr .cmake-vagrant + mkdir -p .cmake-vagrant + cd .cmake-vagrant + cmake -G Ninja .. + + export CTEST_TEST_TIMEOUT=1800 + export CTEST_OUTPUT_ON_FAILURE=1 + export CTEST_PARALLEL_LEVEL=20 + cmake --build . --target verify + SHELL + end + + if ENV['NO_AUTOTOOLS'] != "true" + centos.vm.provision "shell", privileged: false, inline: <<-SHELL + cd /vagrant + ./autogen.sh + ./configure + make -j20 verify + SHELL + end + end + + # known failures: + # - issues with timers (not enough allowed error) + config.vm.define "win" do |win| + system('tar --overwrite --transform=s/libevent/libevent-win/ -xf .vagrant/libevent.tar -C .vagrant/') + + # 512MB not enough after libtool install, huh + win.vm.provider "virtualbox" do |vb| + vb.memory = "1024" + end + + # windows does not have rsync builtin, let's use virtualbox for now + win.vm.synced_folder ".vagrant/libevent-win", "/vagrant", + type: "virtualbox" + + win.vm.box = "senglin/win-10-enterprise-vs2015community" + if ENV['NO_PKG'] != "true" + # box with vs2015 does not have C++ support, so let's install it manually + # plus chocolatey that includes in this box, can't handle sha1 checksum for + # cmake.install, so let's update it< + win.vm.provision "shell", inline: <<-SHELL + choco upgrade -y chocolatey -pre -f + choco install -y VisualStudioCommunity2013 + choco install -y openssl.light + choco install -y cygwin cyg-get + choco install -y cmake + choco install -y cmake.install + choco install -y python2 + SHELL + + # chocolatey openssl.light package does not contains headers + win.vm.provision "shell", inline: <<-SHELL + (new-object System.Net.WebClient).DownloadFile('http://strcpy.net/packages/Win32OpenSSL-1_0_2a.exe', '/openssl.exe') + /openssl.exe /silent /verysilent /sp- /suppressmsgboxes + SHELL + + # XXX: + # - cyg-get depends from cygwinsetup.exe + # https://github.com/chocolatey/chocolatey-coreteampackages/issues/200 + # - cyg-get only downloads, do not installs them, ugh. so let's do not use + # it + win.vm.provision "shell", privileged: false, inline: <<-SHELL + (new-object System.Net.WebClient).DownloadFile('https://cygwin.com/setup-x86_64.exe', '/tools/cygwin/cygwinsetup.exe') + + $env:PATH="/tools/cygwin/bin;$($env:PATH);/tools/cygwin" + + cygwinsetup --root c:/tools/cygwin/ --local-package-dir c:/tools/cygwin/packages/ --no-desktop --no-startmenu --verbose --quiet-mode --download --packages automake,autoconf,gcc-core,libtool,make,python,openssl-devel + cygwinsetup --root c:/tools/cygwin/ --local-package-dir c:/tools/cygwin/packages/ --no-desktop --no-startmenu --verbose --quiet-mode --local-install --packages automake,autoconf,gcc-core,libtool,make,python,openssl-devel + SHELL + end + + if ENV['NO_CMAKE'] != "true" + win.vm.provision "shell", privileged: false, inline: <<-SHELL + $env:PATH="/Program Files/CMake/bin;/tools/python2;$($env:PATH)" + + cd /vagrant + Remove-Item -Recurse -Force .cmake-vagrant + mkdir -p .cmake-vagrant + cd .cmake-vagrant + cmake -G "Visual Studio 12" .. + + $env:CTEST_TEST_TIMEOUT = "1800" + $env:CTEST_OUTPUT_ON_FAILURE = "1" + $env:CTEST_PARALLEL_LEVEL = "10" + cmake --build . --target verify + SHELL + end + + if ENV['NO_AUTOTOOLS'] != "true" + win.vm.provision "shell", privileged: false, inline: <<-SHELL + $env:PATH="/tools/cygwin/bin;$($env:PATH)" + + bash -lc "echo 'C:/tools/mingw64 /mingw ntfs binary 0 0' > /etc/fstab" + bash -lc "echo 'C:/OpenSSL-Win32 /ssl ntfs binary 0 0' >> /etc/fstab" + bash -lc "echo 'C:/vagrant /vagrant ntfs binary 0 0' >> /etc/fstab" + + bash -lc "exec 0&1; cd /vagrant; bash -x ./autogen.sh && ./configure LDFLAGS='-L/ssl -L/ssl/lib -L/ssl/lib/MinGW' CFLAGS=-I/ssl/include && make -j20 verify" + SHELL + end + end +end diff --git a/probe-busybox/libevent-2.1.11-stable/WIN32-Code/getopt.c b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/getopt.c new file mode 100644 index 00000000..0fcba5d9 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/getopt.c @@ -0,0 +1,149 @@ +/* $NetBSD: getopt.c,v 1.16 1999/12/02 13:15:56 kleink Exp $ */ + +/* + * Copyright (c) 1987, 1993, 1994, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS + * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#if 0 +static char sccsid[] = "@(#)getopt.c 8.3 (Berkeley) 4/27/95"; +#endif + +#include +#include +#include +#include + +#define __P(x) x +#define _DIAGASSERT(x) assert(x) + +#ifdef __weak_alias +__weak_alias(getopt,_getopt); +#endif + + +int opterr = 1, /* if error message should be printed */ + optind = 1, /* index into parent argv vector */ + optopt, /* character checked for validity */ + optreset; /* reset getopt */ +char *optarg; /* argument associated with option */ + +static char * _progname __P((char *)); +int getopt_internal __P((int, char * const *, const char *)); + +static char * +_progname(nargv0) + char * nargv0; +{ + char * tmp; + + _DIAGASSERT(nargv0 != NULL); + + tmp = strrchr(nargv0, '/'); + if (tmp) + tmp++; + else + tmp = nargv0; + return(tmp); +} + +#define BADCH (int)'?' +#define BADARG (int)':' +#define EMSG "" + +/* + * getopt -- + * Parse argc/argv argument vector. + */ +int +getopt(nargc, nargv, ostr) + int nargc; + char * const nargv[]; + const char *ostr; +{ + static char *__progname = 0; + static char *place = EMSG; /* option letter processing */ + char *oli; /* option letter list index */ + __progname = __progname?__progname:_progname(*nargv); + + _DIAGASSERT(nargv != NULL); + _DIAGASSERT(ostr != NULL); + + if (optreset || !*place) { /* update scanning pointer */ + optreset = 0; + if (optind >= nargc || *(place = nargv[optind]) != '-') { + place = EMSG; + return (-1); + } + if (place[1] && *++place == '-' /* found "--" */ + && place[1] == '\0') { + ++optind; + place = EMSG; + return (-1); + } + } /* option letter okay? */ + if ((optopt = (int)*place++) == (int)':' || + !(oli = strchr(ostr, optopt))) { + /* + * if the user didn't specify '-' as an option, + * assume it means -1. + */ + if (optopt == (int)'-') + return (-1); + if (!*place) + ++optind; + if (opterr && *ostr != ':') + (void)fprintf(stderr, + "%s: illegal option -- %c\n", __progname, optopt); + return (BADCH); + } + if (*++oli != ':') { /* don't need argument */ + optarg = NULL; + if (!*place) + ++optind; + } + else { /* need an argument */ + if (*place) /* no white space */ + optarg = place; + else if (nargc <= ++optind) { /* no arg */ + place = EMSG; + if (*ostr == ':') + return (BADARG); + if (opterr) + (void)fprintf(stderr, + "%s: option requires an argument -- %c\n", + __progname, optopt); + return (BADCH); + } + else /* white space */ + optarg = nargv[optind]; + place = EMSG; + ++optind; + } + return (optopt); /* dump back option letter */ +} + diff --git a/probe-busybox/libevent-2.1.11-stable/WIN32-Code/getopt.h b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/getopt.h new file mode 100644 index 00000000..796f4550 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/getopt.h @@ -0,0 +1,33 @@ +#ifndef __GETOPT_H__ +#define __GETOPT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +extern int opterr; /* if error message should be printed */ +extern int optind; /* index into parent argv vector */ +extern int optopt; /* character checked for validity */ +extern int optreset; /* reset getopt */ +extern char *optarg; /* argument associated with option */ + +struct option +{ + const char *name; + int has_arg; + int *flag; + int val; +}; + +#define no_argument 0 +#define required_argument 1 +#define optional_argument 2 + +int getopt(int, char**, const char*); +int getopt_long(int, char**, const char*, const struct option*, int*); + +#ifdef __cplusplus +} +#endif + +#endif /* __GETOPT_H__ */ diff --git a/probe-busybox/libevent-2.1.11-stable/WIN32-Code/getopt_long.c b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/getopt_long.c new file mode 100644 index 00000000..092defbc --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/getopt_long.c @@ -0,0 +1,234 @@ + +/* + * Copyright (c) 1987, 1993, 1994, 1996 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS + * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include "getopt.h" + +extern int opterr; /* if error message should be printed */ +extern int optind; /* index into parent argv vector */ +extern int optopt; /* character checked for validity */ +extern int optreset; /* reset getopt */ +extern char *optarg; /* argument associated with option */ + +#define __P(x) x +#define _DIAGASSERT(x) assert(x) + +static char * __progname __P((char *)); +int getopt_internal __P((int, char * const *, const char *)); + +static char * +__progname(nargv0) + char * nargv0; +{ + char * tmp; + + _DIAGASSERT(nargv0 != NULL); + + tmp = strrchr(nargv0, '/'); + if (tmp) + tmp++; + else + tmp = nargv0; + return(tmp); +} + +#define BADCH (int)'?' +#define BADARG (int)':' +#define EMSG "" + +/* + * getopt -- + * Parse argc/argv argument vector. + */ +int +getopt_internal(nargc, nargv, ostr) + int nargc; + char * const *nargv; + const char *ostr; +{ + static char *place = EMSG; /* option letter processing */ + char *oli; /* option letter list index */ + + _DIAGASSERT(nargv != NULL); + _DIAGASSERT(ostr != NULL); + + if (optreset || !*place) { /* update scanning pointer */ + optreset = 0; + if (optind >= nargc || *(place = nargv[optind]) != '-') { + place = EMSG; + return (-1); + } + if (place[1] && *++place == '-') { /* found "--" */ + /* ++optind; */ + place = EMSG; + return (-2); + } + } /* option letter okay? */ + if ((optopt = (int)*place++) == (int)':' || + !(oli = strchr(ostr, optopt))) { + /* + * if the user didn't specify '-' as an option, + * assume it means -1. + */ + if (optopt == (int)'-') + return (-1); + if (!*place) + ++optind; + if (opterr && *ostr != ':') + (void)fprintf(stderr, + "%s: illegal option -- %c\n", __progname(nargv[0]), optopt); + return (BADCH); + } + if (*++oli != ':') { /* don't need argument */ + optarg = NULL; + if (!*place) + ++optind; + } else { /* need an argument */ + if (*place) /* no white space */ + optarg = place; + else if (nargc <= ++optind) { /* no arg */ + place = EMSG; + if ((opterr) && (*ostr != ':')) + (void)fprintf(stderr, + "%s: option requires an argument -- %c\n", + __progname(nargv[0]), optopt); + return (BADARG); + } else /* white space */ + optarg = nargv[optind]; + place = EMSG; + ++optind; + } + return (optopt); /* dump back option letter */ +} + +#if 0 +/* + * getopt -- + * Parse argc/argv argument vector. + */ +int +getopt2(nargc, nargv, ostr) + int nargc; + char * const *nargv; + const char *ostr; +{ + int retval; + + if ((retval = getopt_internal(nargc, nargv, ostr)) == -2) { + retval = -1; + ++optind; + } + return(retval); +} +#endif + +/* + * getopt_long -- + * Parse argc/argv argument vector. + */ +int +getopt_long(nargc, nargv, options, long_options, index) + int nargc; + char ** nargv; + const char * options; + const struct option * long_options; + int * index; +{ + int retval; + + _DIAGASSERT(nargv != NULL); + _DIAGASSERT(options != NULL); + _DIAGASSERT(long_options != NULL); + /* index may be NULL */ + + if ((retval = getopt_internal(nargc, nargv, options)) == -2) { + char *current_argv = nargv[optind++] + 2, *has_equal; + int i, match = -1; + size_t current_argv_len; + + if (*current_argv == '\0') { + return(-1); + } + if ((has_equal = strchr(current_argv, '=')) != NULL) { + current_argv_len = has_equal - current_argv; + has_equal++; + } else + current_argv_len = strlen(current_argv); + + for (i = 0; long_options[i].name; i++) { + if (strncmp(current_argv, long_options[i].name, current_argv_len)) + continue; + + if (strlen(long_options[i].name) == current_argv_len) { + match = i; + break; + } + if (match == -1) + match = i; + } + if (match != -1) { + if (long_options[match].has_arg == required_argument || + long_options[match].has_arg == optional_argument) { + if (has_equal) + optarg = has_equal; + else + optarg = nargv[optind++]; + } + if ((long_options[match].has_arg == required_argument) + && (optarg == NULL)) { + /* + * Missing argument, leading : + * indicates no error should be generated + */ + if ((opterr) && (*options != ':')) + (void)fprintf(stderr, + "%s: option requires an argument -- %s\n", + __progname(nargv[0]), current_argv); + return (BADARG); + } + } else { /* No matching argument */ + if ((opterr) && (*options != ':')) + (void)fprintf(stderr, + "%s: illegal option -- %s\n", __progname(nargv[0]), current_argv); + return (BADCH); + } + if (long_options[match].flag) { + *long_options[match].flag = long_options[match].val; + retval = 0; + } else + retval = long_options[match].val; + if (index) + *index = match; + } + return(retval); +} diff --git a/probe-busybox/libevent-2.1.11-stable/WIN32-Code/nmake/evconfig-private.h b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/nmake/evconfig-private.h new file mode 100644 index 00000000..88e20627 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/nmake/evconfig-private.h @@ -0,0 +1,6 @@ +#if !defined(EVENT_EVCONFIG__PRIVATE_H_) && !defined(__MINGW32__) +#define EVENT_EVCONFIG__PRIVATE_H_ + +/* Nothing to see here. Move along. */ + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/WIN32-Code/nmake/event2/event-config.h b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/nmake/event2/event-config.h new file mode 100644 index 00000000..35ec16fd --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/nmake/event2/event-config.h @@ -0,0 +1,350 @@ +/* event2/event-config.h + * + * This file was generated by autoconf when libevent was built, and post- + * processed by Libevent so that its macros would have a uniform prefix. + * + * DO NOT EDIT THIS FILE. + * + * Do not rely on macros in this file existing in later versions. + */ +#ifndef EVENT_CONFIG_H__ +#define EVENT_CONFIG_H__ +/* config.h. Generated by configure. */ +/* config.h.in. Generated from configure.in by autoheader. */ + +/* Define if libevent should not allow replacing the mm functions */ +/* #undef EVENT__DISABLE_MM_REPLACEMENT */ + +/* Define if libevent should not be compiled with thread support */ +/* #undef EVENT__DISABLE_THREAD_SUPPORT */ + +/* Define if clock_gettime is available in libc */ +/* #undef _EVENT_DNS_USE_CPU_CLOCK_FOR_ID */ + +/* Define is no secure id variant is available */ +/* #define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1 */ +#define EVENT_DNS_USE_FTIME_FOR_ID_ 1 + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_ARPA_INET_H */ + +/* Define to 1 if you have the `clock_gettime' function. */ +/* #undef EVENT__HAVE_CLOCK_GETTIME */ + +/* Define if /dev/poll is available */ +/* #undef EVENT__HAVE_DEVPOLL */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_DLFCN_H */ + +/* Define if your system supports the epoll system calls */ +/* #undef EVENT__HAVE_EPOLL */ + +/* Define to 1 if you have the `epoll_ctl' function. */ +/* #undef EVENT__HAVE_EPOLL_CTL */ + +/* Define to 1 if you have the `eventfd' function. */ +/* #undef EVENT__HAVE_EVENTFD */ + +/* Define if your system supports event ports */ +/* #undef EVENT__HAVE_EVENT_PORTS */ + +/* Define to 1 if you have the `fcntl' function. */ +/* #undef EVENT__HAVE_FCNTL */ + +/* Define to 1 if you have the header file. */ +#define EVENT__HAVE_FCNTL_H 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#define EVENT__HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#define EVENT__HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getprotobynumber' function. */ +#define EVENT__HAVE_GETPROTOBYNUMBER 1 + +/* Define to 1 if you have the `getservbyname' function. */ +#define EVENT__HAVE_GETSERVBYNAME 1 + +/* Define to 1 if you have the `gettimeofday' function. */ +/* #define EVENT__HAVE_GETTIMEOFDAY 1 */ + +/* Define to 1 if you have the `inet_ntop' function. */ +/* #undef EVENT__HAVE_INET_NTOP */ + +/* Define to 1 if you have the `inet_pton' function. */ +/* #undef EVENT__HAVE_INET_PTON */ + +/* Define to 1 if you have the header file. */ +/* #define EVENT__HAVE_INTTYPES_H 1 */ + +/* Define to 1 if you have the `kqueue' function. */ +/* #undef EVENT__HAVE_KQUEUE */ + +/* Define if the system has zlib */ +/* #undef EVENT__HAVE_LIBZ */ + +/* Define to 1 if you have the header file. */ +#define EVENT__HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mmap' function. */ +/* #undef EVENT__HAVE_MMAP */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_NETINET_IN6_H */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_NETINET_IN_H */ + +/* Define to 1 if you have the `pipe' function. */ +/* #undef EVENT__HAVE_PIPE */ + +/* Define to 1 if you have the `poll' function. */ +/* #undef EVENT__HAVE_POLL */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_POLL_H */ + +/* Define to 1 if you have the `port_create' function. */ +/* #undef EVENT__HAVE_PORT_CREATE */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_PORT_H */ + +/* Define if you have POSIX threads libraries and header files. */ +/* #undef EVENT__HAVE_PTHREAD */ + +/* Define if we have pthreads on this system */ +/* #undef EVENT__HAVE_PTHREADS */ + +/* Define to 1 if the system has the type `sa_family_t'. */ +/* #undef EVENT__HAVE_SA_FAMILY_T */ + +/* Define to 1 if you have the `select' function. */ +/* #undef EVENT__HAVE_SELECT */ + +/* Define to 1 if you have the `sendfile' function. */ +/* #undef EVENT__HAVE_SENDFILE */ + +/* Define if F_SETFD is defined in */ +/* #undef EVENT__HAVE_SETFD */ + +/* Define to 1 if you have the `sigaction' function. */ +/* #undef EVENT__HAVE_SIGACTION */ + +/* Define to 1 if you have the `signal' function. */ +#define EVENT__HAVE_SIGNAL 1 + +/* Define to 1 if you have the `splice' function. */ +/* #undef EVENT__HAVE_SPLICE */ + +/* Define to 1 if you have the header file. */ +#define EVENT__HAVE_STDARG_H 1 + +/* Define to 1 if you have the header file. */ +#define EVENT__HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +/* #define EVENT__HAVE_STDINT_H 1 */ + +/* Define to 1 if you have the header file. */ +#define EVENT__HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define EVENT__HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define EVENT__HAVE_STRING_H 1 + +/* Define to 1 if you have the `strlcpy' function. */ +/* #undef EVENT__HAVE_STRLCPY */ + +/* Define to 1 if you have the `strsep' function. */ +/* #undef EVENT__HAVE_STRSEP */ + +/* Define to 1 if you have the `strtok_r' function. */ +/* #undef EVENT__HAVE_STRTOK_R */ + +/* Define to 1 if you have the `strtoll' function. */ +/* #define EVENT__HAVE_STRTOLL 1 */ + +#define EVENT__HAVE_STRUCT_ADDRINFO 1 + +/* Define to 1 if the system has the type `struct in6_addr'. */ +#define EVENT__HAVE_STRUCT_IN6_ADDR 1 + +/* Define to 1 if `s6_addr16' is member of `struct in6_addr'. */ +#define EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR16 1 + +/* Define to 1 if `s6_addr32' is member of `struct in6_addr'. */ +#define EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR32 1 + +/* Define to 1 if the system has the type `struct sockaddr_in6'. */ +#define EVENT__HAVE_STRUCT_SOCKADDR_IN6 1 + +/* Define to 1 if `sin6_len' is member of `struct sockaddr_in6'. */ +/* #undef EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN */ + +/* Define to 1 if `sin_len' is member of `struct sockaddr_in'. */ +/* #undef EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */ + +/* Define to 1 if the system has the type `struct sockaddr_storage'. */ +#define EVENT__HAVE_STRUCT_SOCKADDR_STORAGE 1 + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_EPOLL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_EVENTFD_H */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_EVENT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_IOCTL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_MMAN_H */ + +/* Define to 1 if you have the header file. */ +/* #define EVENT__HAVE_SYS_PARAM_H 1 */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_QUEUE_H */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_SELECT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_SENDFILE_H */ + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_SOCKET_H */ + +/* Define to 1 if you have the header file. */ +#define EVENT__HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +/* #define EVENT__HAVE_SYS_TIME_H 1 */ + +/* Define to 1 if you have the header file. */ +#define EVENT__HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef EVENT__HAVE_SYS_UIO_H */ + +/* Define if TAILQ_FOREACH is defined in */ +/* #undef EVENT__HAVE_TAILQFOREACH */ + +/* Define if timeradd is defined in */ +/* #undef EVENT__HAVE_TIMERADD */ + +/* Define if timerclear is defined in */ +#define EVENT__HAVE_TIMERCLEAR 1 + +/* Define if timercmp is defined in */ +#define EVENT__HAVE_TIMERCMP 1 + +/* Define if timerisset is defined in */ +#define EVENT__HAVE_TIMERISSET 1 + +/* Define to 1 if the system has the type `uint16_t'. */ +/* #define EVENT__HAVE_UINT16_T 1 */ + +/* Define to 1 if the system has the type `uint32_t'. */ +/* #define EVENT__HAVE_UINT32_T 1 */ + +/* Define to 1 if the system has the type `uint64_t'. */ +/* #define EVENT__HAVE_UINT64_T 1 */ + +/* Define to 1 if the system has the type `uint8_t'. */ +/* #define EVENT__HAVE_UINT8_T 1 */ + +/* Define to 1 if you have the header file. */ +/* #define EVENT__HAVE_UNISTD_H 1 */ + +/* Define to 1 if you have the `vasprintf' function. */ +/* #undef EVENT__HAVE_VASPRINTF */ + +/* Define if kqueue works correctly with pipes */ +/* #undef EVENT__HAVE_WORKING_KQUEUE */ + +/* Numeric representation of the version */ +#define EVENT__NUMERIC_VERSION 0x02010b00 + +/* Name of package */ +#define EVENT__PACKAGE "libevent" + +/* Define to the address where bug reports for this package should be sent. */ +#define EVENT__PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define EVENT__PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define EVENT__PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define EVENT__PACKAGE_TARNAME "" + +/* Define to the version of this package. */ +#define EVENT__PACKAGE_VERSION "" + +/* Define to necessary symbol if this constant uses a non-standard name on + your system. */ +/* #undef EVENT__PTHREAD_CREATE_JOINABLE */ + +/* The size of a `int', as computed by sizeof. */ +#define EVENT__SIZEOF_INT 4 + +/* The size of a `long', as computed by sizeof. */ +#define EVENT__SIZEOF_LONG 4 + +/* The size of a `long long', as computed by sizeof. */ +#define EVENT__SIZEOF_LONG_LONG 8 + +/* The size of a `short', as computed by sizeof. */ +#define EVENT__SIZEOF_SHORT 2 + +/* The size of `size_t', as computed by sizeof. */ +#ifdef _WIN64 +#define EVENT__SIZEOF_SIZE_T 8 +#else +#define EVENT__SIZEOF_SIZE_T 4 +#endif + +/* The size of `void *', as computed by sizeof. */ +#ifdef _WIN64 +#define EVENT__SIZEOF_VOID_P 8 +#else +#define EVENT__SIZEOF_VOID_P 4 +#endif + +/* The size of `time_t`, as computed by sizeof. */ +#ifdef _WIN64 +#define EVENT__SIZEOF_TIME_T 8 +#else +#define EVENT__SIZEOF_TIME_T 4 +#endif + +/* Define to 1 if you can safely include both and . */ +#define EVENT__TIME_WITH_SYS_TIME 1 + +/* Version number of package */ +#define EVENT__VERSION "2.1.11-stable" + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#define EVENT__inline __inline + +/* Define to `unsigned' if does not define. */ +/* #undef EVENT__size_t */ + +/* Define to unsigned int if you dont have it */ +#define EVENT__socklen_t unsigned int + +/* Define to `int' if does not define. */ +#define EVENT__ssize_t SSIZE_T + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/WIN32-Code/tree.h b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/tree.h new file mode 100644 index 00000000..2ccfbf20 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/WIN32-Code/tree.h @@ -0,0 +1,677 @@ +/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */ +/* + * Copyright 2002 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_TREE_H_ +#define _SYS_TREE_H_ + +/* + * This file defines data structures for different types of trees: + * splay trees and red-black trees. + * + * A splay tree is a self-organizing data structure. Every operation + * on the tree causes a splay to happen. The splay moves the requested + * node to the root of the tree and partly rebalances it. + * + * This has the benefit that request locality causes faster lookups as + * the requested nodes move to the top of the tree. On the other hand, + * every lookup causes memory writes. + * + * The Balance Theorem bounds the total access time for m operations + * and n inserts on an initially empty tree as O((m + n)lg n). The + * amortized cost for a sequence of m accesses to a splay tree is O(lg n); + * + * A red-black tree is a binary search tree with the node color as an + * extra attribute. It fulfills a set of conditions: + * - every search path from the root to a leaf consists of the + * same number of black nodes, + * - each red node (except for the root) has a black parent, + * - each leaf node is black. + * + * Every operation on a red-black tree is bounded as O(lg n). + * The maximum height of a red-black tree is 2lg (n+1). + */ + +#define SPLAY_HEAD(name, type) \ +struct name { \ + struct type *sph_root; /* root of the tree */ \ +} + +#define SPLAY_INITIALIZER(root) \ + { NULL } + +#define SPLAY_INIT(root) do { \ + (root)->sph_root = NULL; \ +} while (0) + +#define SPLAY_ENTRY(type) \ +struct { \ + struct type *spe_left; /* left element */ \ + struct type *spe_right; /* right element */ \ +} + +#define SPLAY_LEFT(elm, field) (elm)->field.spe_left +#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right +#define SPLAY_ROOT(head) (head)->sph_root +#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) + +/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ +#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while (0) + +#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while (0) + +#define SPLAY_LINKLEFT(head, tmp, field) do { \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ +} while (0) + +#define SPLAY_LINKRIGHT(head, tmp, field) do { \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ +} while (0) + +#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ + SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ + SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ +} while (0) + +/* Generates prototypes and inline functions */ + +#define SPLAY_PROTOTYPE(name, type, field, cmp) \ +void name##_SPLAY(struct name *, struct type *); \ +void name##_SPLAY_MINMAX(struct name *, int); \ +struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ +struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ + \ +/* Finds the node with the same key as elm */ \ +static __inline struct type * \ +name##_SPLAY_FIND(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) \ + return(NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) \ + return (head->sph_root); \ + return (NULL); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_NEXT(struct name *head, struct type *elm) \ +{ \ + name##_SPLAY(head, elm); \ + if (SPLAY_RIGHT(elm, field) != NULL) { \ + elm = SPLAY_RIGHT(elm, field); \ + while (SPLAY_LEFT(elm, field) != NULL) { \ + elm = SPLAY_LEFT(elm, field); \ + } \ + } else \ + elm = NULL; \ + return (elm); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_MIN_MAX(struct name *head, int val) \ +{ \ + name##_SPLAY_MINMAX(head, val); \ + return (SPLAY_ROOT(head)); \ +} + +/* Main splay operation. + * Moves node close to the key of elm to top + */ +#define SPLAY_GENERATE(name, type, field, cmp) \ +struct type * \ +name##_SPLAY_INSERT(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) { \ + SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ + } else { \ + int __comp; \ + name##_SPLAY(head, elm); \ + __comp = (cmp)(elm, (head)->sph_root); \ + if(__comp < 0) { \ + SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ + SPLAY_RIGHT(elm, field) = (head)->sph_root; \ + SPLAY_LEFT((head)->sph_root, field) = NULL; \ + } else if (__comp > 0) { \ + SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ + SPLAY_LEFT(elm, field) = (head)->sph_root; \ + SPLAY_RIGHT((head)->sph_root, field) = NULL; \ + } else \ + return ((head)->sph_root); \ + } \ + (head)->sph_root = (elm); \ + return (NULL); \ +} \ + \ +struct type * \ +name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *__tmp; \ + if (SPLAY_EMPTY(head)) \ + return (NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) { \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ + } else { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ + name##_SPLAY(head, elm); \ + SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ + } \ + return (elm); \ + } \ + return (NULL); \ +} \ + \ +void \ +name##_SPLAY(struct name *head, struct type *elm) \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ + int __comp; \ +\ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ + __left = __right = &__node; \ +\ + while ((__comp = (cmp)(elm, (head)->sph_root))) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) > 0){ \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ +} \ + \ +/* Splay with either the minimum or the maximum element \ + * Used to find minimum or maximum element in tree. \ + */ \ +void name##_SPLAY_MINMAX(struct name *head, int __comp) \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ +\ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ + __left = __right = &__node; \ +\ + while (1) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp > 0) { \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ +} + +#define SPLAY_NEGINF -1 +#define SPLAY_INF 1 + +#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) +#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) +#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) +#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) +#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) +#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) + +#define SPLAY_FOREACH(x, name, head) \ + for ((x) = SPLAY_MIN(name, head); \ + (x) != NULL; \ + (x) = SPLAY_NEXT(name, head, x)) + +/* Macros that define a red-back tree */ +#define RB_HEAD(name, type) \ +struct name { \ + struct type *rbh_root; /* root of the tree */ \ +} + +#define RB_INITIALIZER(root) \ + { NULL } + +#define RB_INIT(root) do { \ + (root)->rbh_root = NULL; \ +} while (0) + +#define RB_BLACK 0 +#define RB_RED 1 +#define RB_ENTRY(type) \ +struct { \ + struct type *rbe_left; /* left element */ \ + struct type *rbe_right; /* right element */ \ + struct type *rbe_parent; /* parent element */ \ + int rbe_color; /* node color */ \ +} + +#define RB_LEFT(elm, field) (elm)->field.rbe_left +#define RB_RIGHT(elm, field) (elm)->field.rbe_right +#define RB_PARENT(elm, field) (elm)->field.rbe_parent +#define RB_COLOR(elm, field) (elm)->field.rbe_color +#define RB_ROOT(head) (head)->rbh_root +#define RB_EMPTY(head) (RB_ROOT(head) == NULL) + +#define RB_SET(elm, parent, field) do { \ + RB_PARENT(elm, field) = parent; \ + RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ + RB_COLOR(elm, field) = RB_RED; \ +} while (0) + +#define RB_SET_BLACKRED(black, red, field) do { \ + RB_COLOR(black, field) = RB_BLACK; \ + RB_COLOR(red, field) = RB_RED; \ +} while (0) + +#ifndef RB_AUGMENT +#define RB_AUGMENT(x) +#endif + +#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \ + (tmp) = RB_RIGHT(elm, field); \ + if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field))) { \ + RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_LEFT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ +} while (0) + +#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \ + (tmp) = RB_LEFT(elm, field); \ + if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field))) { \ + RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_RIGHT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ +} while (0) + +/* Generates prototypes and inline functions */ +#define RB_PROTOTYPE(name, type, field, cmp) \ +void name##_RB_INSERT_COLOR(struct name *, struct type *); \ +void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\ +struct type *name##_RB_REMOVE(struct name *, struct type *); \ +struct type *name##_RB_INSERT(struct name *, struct type *); \ +struct type *name##_RB_FIND(struct name *, struct type *); \ +struct type *name##_RB_NEXT(struct type *); \ +struct type *name##_RB_MINMAX(struct name *, int); \ + \ + +/* Main rb operation. + * Moves node close to the key of elm to top + */ +#define RB_GENERATE(name, type, field, cmp) \ +void \ +name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ +{ \ + struct type *parent, *gparent, *tmp; \ + while ((parent = RB_PARENT(elm, field)) && \ + RB_COLOR(parent, field) == RB_RED) { \ + gparent = RB_PARENT(parent, field); \ + if (parent == RB_LEFT(gparent, field)) { \ + tmp = RB_RIGHT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field);\ + elm = gparent; \ + continue; \ + } \ + if (RB_RIGHT(parent, field) == elm) { \ + RB_ROTATE_LEFT(head, parent, tmp, field);\ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_RIGHT(head, gparent, tmp, field); \ + } else { \ + tmp = RB_LEFT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field);\ + elm = gparent; \ + continue; \ + } \ + if (RB_LEFT(parent, field) == elm) { \ + RB_ROTATE_RIGHT(head, parent, tmp, field);\ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_LEFT(head, gparent, tmp, field); \ + } \ + } \ + RB_COLOR(head->rbh_root, field) = RB_BLACK; \ +} \ + \ +void \ +name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \ +{ \ + struct type *tmp; \ + while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \ + elm != RB_ROOT(head)) { \ + if (RB_LEFT(parent, field) == elm) { \ + tmp = RB_RIGHT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_LEFT(head, parent, tmp, field);\ + tmp = RB_RIGHT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\ + struct type *oleft; \ + if ((oleft = RB_LEFT(tmp, field)))\ + RB_COLOR(oleft, field) = RB_BLACK;\ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_RIGHT(head, tmp, oleft, field);\ + tmp = RB_RIGHT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_RIGHT(tmp, field)) \ + RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\ + RB_ROTATE_LEFT(head, parent, tmp, field);\ + elm = RB_ROOT(head); \ + break; \ + } \ + } else { \ + tmp = RB_LEFT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_RIGHT(head, parent, tmp, field);\ + tmp = RB_LEFT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\ + struct type *oright; \ + if ((oright = RB_RIGHT(tmp, field)))\ + RB_COLOR(oright, field) = RB_BLACK;\ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_LEFT(head, tmp, oright, field);\ + tmp = RB_LEFT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_LEFT(tmp, field)) \ + RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\ + RB_ROTATE_RIGHT(head, parent, tmp, field);\ + elm = RB_ROOT(head); \ + break; \ + } \ + } \ + } \ + if (elm) \ + RB_COLOR(elm, field) = RB_BLACK; \ +} \ + \ +struct type * \ +name##_RB_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *child, *parent, *old = elm; \ + int color; \ + if (RB_LEFT(elm, field) == NULL) \ + child = RB_RIGHT(elm, field); \ + else if (RB_RIGHT(elm, field) == NULL) \ + child = RB_LEFT(elm, field); \ + else { \ + struct type *left; \ + elm = RB_RIGHT(elm, field); \ + while ((left = RB_LEFT(elm, field))) \ + elm = left; \ + child = RB_RIGHT(elm, field); \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ + if (RB_PARENT(elm, field) == old) \ + parent = elm; \ + (elm)->field = (old)->field; \ + if (RB_PARENT(old, field)) { \ + if (RB_LEFT(RB_PARENT(old, field), field) == old)\ + RB_LEFT(RB_PARENT(old, field), field) = elm;\ + else \ + RB_RIGHT(RB_PARENT(old, field), field) = elm;\ + RB_AUGMENT(RB_PARENT(old, field)); \ + } else \ + RB_ROOT(head) = elm; \ + RB_PARENT(RB_LEFT(old, field), field) = elm; \ + if (RB_RIGHT(old, field)) \ + RB_PARENT(RB_RIGHT(old, field), field) = elm; \ + if (parent) { \ + left = parent; \ + do { \ + RB_AUGMENT(left); \ + } while ((left = RB_PARENT(left, field))); \ + } \ + goto color; \ + } \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ +color: \ + if (color == RB_BLACK) \ + name##_RB_REMOVE_COLOR(head, parent, child); \ + return (old); \ +} \ + \ +/* Inserts a node into the RB tree */ \ +struct type * \ +name##_RB_INSERT(struct name *head, struct type *elm) \ +{ \ + struct type *tmp; \ + struct type *parent = NULL; \ + int comp = 0; \ + tmp = RB_ROOT(head); \ + while (tmp) { \ + parent = tmp; \ + comp = (cmp)(elm, parent); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + RB_SET(elm, parent, field); \ + if (parent != NULL) { \ + if (comp < 0) \ + RB_LEFT(parent, field) = elm; \ + else \ + RB_RIGHT(parent, field) = elm; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = elm; \ + name##_RB_INSERT_COLOR(head, elm); \ + return (NULL); \ +} \ + \ +/* Finds the node with the same key as elm */ \ +struct type * \ +name##_RB_FIND(struct name *head, struct type *elm) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (NULL); \ +} \ + \ +struct type * \ +name##_RB_NEXT(struct type *elm) \ +{ \ + if (RB_RIGHT(elm, field)) { \ + elm = RB_RIGHT(elm, field); \ + while (RB_LEFT(elm, field)) \ + elm = RB_LEFT(elm, field); \ + } else { \ + if (RB_PARENT(elm, field) && \ + (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + else { \ + while (RB_PARENT(elm, field) && \ + (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\ + elm = RB_PARENT(elm, field); \ + elm = RB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ +} \ + \ +struct type * \ +name##_RB_MINMAX(struct name *head, int val) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + struct type *parent = NULL; \ + while (tmp) { \ + parent = tmp; \ + if (val < 0) \ + tmp = RB_LEFT(tmp, field); \ + else \ + tmp = RB_RIGHT(tmp, field); \ + } \ + return (parent); \ +} + +#define RB_NEGINF -1 +#define RB_INF 1 + +#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) +#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) +#define RB_FIND(name, x, y) name##_RB_FIND(x, y) +#define RB_NEXT(name, x, y) name##_RB_NEXT(y) +#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) +#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) + +#define RB_FOREACH(x, name, head) \ + for ((x) = RB_MIN(name, head); \ + (x) != NULL; \ + (x) = name##_RB_NEXT(x)) + +#endif /* _SYS_TREE_H_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/appveyor.yml b/probe-busybox/libevent-2.1.11-stable/appveyor.yml new file mode 100644 index 00000000..edfe3372 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/appveyor.yml @@ -0,0 +1,112 @@ +version: 2.1.11.{build} + +os: Visual Studio 2017 +platform: + - x64 + +branches: + except: + - /.*travis.*/ + - /.*linux.*/ + - /.*freebsd.*/ + - /.*osx.*/ + - /.*bitrise.*/ +skip_commits: + message: /travis/ + files: + - .travis.yml + +environment: + global: + APPVEYOR_SAVE_CACHE_ON_ERROR: true + MINGW_ROOT: C:/mingw-w64/x86_64-7.2.0-posix-seh-rt_v5-rev1 + OPENSSL_ROOT: C:/OpenSSL-Win64 + MPATH: C:/mingw-w64/x86_64-7.2.0-posix-seh-rt_v5-rev1/bin;C:/msys64/bin;C:/cygwin64/bin + EVENT_TESTS_PARALLEL: 20 + EVENT_BUILD_PARALLEL: 10 + matrix: + # !EVENT_ALLOW_FAILURE + - EVENT_BUILD_METHOD: "cmake" + EVENT_CMAKE_OPTIONS: "" + - EVENT_BUILD_METHOD: "cmake" + EVENT_CMAKE_OPTIONS: "-DEVENT__LIBRARY_TYPE=STATIC" + # EVENT_ALLOW_FAILURE + - EVENT_BUILD_METHOD: "autotools" + EVENT_CONFIGURE_OPTIONS: "" + EVENT_ALLOW_FAILURE: 1 + - EVENT_BUILD_METHOD: "autotools" + EVENT_CONFIGURE_OPTIONS: "--disable-openssl" + EVENT_ALLOW_FAILURE: 1 + - EVENT_BUILD_METHOD: "autotools" + EVENT_CONFIGURE_OPTIONS: "--disable-thread-support" + EVENT_ALLOW_FAILURE: 1 + - EVENT_BUILD_METHOD: "autotools" + EVENT_CONFIGURE_OPTIONS: "--disable-debug-mode" + EVENT_ALLOW_FAILURE: 1 + - EVENT_BUILD_METHOD: "autotools" + EVENT_CONFIGURE_OPTIONS: "--disable-malloc-replacement" + EVENT_ALLOW_FAILURE: 1 + - EVENT_BUILD_METHOD: "cmake" + EVENT_CMAKE_OPTIONS: "-DEVENT__DISABLE_OPENSSL=ON" + EVENT_ALLOW_FAILURE: 1 + - EVENT_BUILD_METHOD: "cmake" + EVENT_CMAKE_OPTIONS: "-DEVENT__DISABLE_THREAD_SUPPORT=ON" + EVENT_ALLOW_FAILURE: 1 + - EVENT_BUILD_METHOD: "cmake" + EVENT_CMAKE_OPTIONS: "-DEVENT__DISABLE_DEBUG_MODE=ON" + EVENT_ALLOW_FAILURE: 1 + - EVENT_BUILD_METHOD: "cmake" + EVENT_CMAKE_OPTIONS: "-DEVENT__DISABLE_MM_REPLACEMENT=ON" + EVENT_ALLOW_FAILURE: 1 + - EVENT_BUILD_METHOD: "cmake" + EVENT_CMAKE_OPTIONS: "-DCMAKE_C_FLAGS='-DUNICODE -D_UNICODE'" + EVENT_ALLOW_FAILURE: 1 + +matrix: + allow_failures: + - EVENT_ALLOW_FAILURE: 1 + fast_finish: true + +init: + - 'echo Repo build branch is: %APPVEYOR_REPO_BRANCH%' + - 'echo Build folder is: %APPVEYOR_BUILD_FOLDER%' + - 'echo Repo build commit is: %APPVEYOR_REPO_COMMIT%' + - 'echo PATH is: %PATH%' + +build_script: + - ps: | + if ($env:EVENT_BUILD_METHOD -eq 'autotools') { + $env:PATH="$env:MPATH;$env:OPENSSL_ROOT/bin;$env:PATH" + $env:LDFLAGS="-L$($env:OPENSSL_ROOT)/lib -L$($env:OPENSSL_ROOT)" + $env:CFLAGS="-I$($env:OPENSSL_ROOT)/include" + + bash ./autogen.sh 2>&1 3>&1 + if ($LastExitCode -ne 0) { $host.SetShouldExit($LastExitCode) } + + md build-autotools 2> $null + cd build-autotools + if ($LastExitCode -ne 0) { $host.SetShouldExit($LastExitCode) } + + bash ../configure $env:EVENT_CONFIGURE_OPTIONS 2>&1 + if ($LastExitCode -ne 0) { $host.SetShouldExit($LastExitCode) } + make -j $env:EVENT_BUILD_PARALLEL 2>&1 + if ($LastExitCode -ne 0) { $host.SetShouldExit($LastExitCode) } + make verify -j $env:EVENT_TESTS_PARALLEL 2>&1 + } else { + md build-cmake 2> $null + cd build-cmake + if ($LastExitCode -ne 0) { $host.SetShouldExit($LastExitCode) } + cmake -G "Visual Studio 15 2017 Win64" .. $env:EVENT_CMAKE_OPTIONS + if ($LastExitCode -ne 0) { $host.SetShouldExit($LastExitCode) } + cmake --build . -j $env:EVENT_BUILD_PARALLEL -- /nologo /verbosity:minimal + if ($LastExitCode -ne 0) { $host.SetShouldExit($LastExitCode) } + ctest --output-on-failure -j $env:EVENT_TESTS_PARALLEL + } + +cache: + - build-cmake + - build-autotools + +on_failure: + - 7z a libevent.zip . + - appveyor PushArtifact libevent.zip diff --git a/probe-busybox/libevent-2.1.11-stable/arc4random.c b/probe-busybox/libevent-2.1.11-stable/arc4random.c new file mode 100644 index 00000000..be64452b --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/arc4random.c @@ -0,0 +1,550 @@ +/* Portable arc4random.c based on arc4random.c from OpenBSD. + * Portable version by Chris Davis, adapted for Libevent by Nick Mathewson + * Copyright (c) 2010 Chris Davis, Niels Provos, and Nick Mathewson + * Copyright (c) 2010-2012 Niels Provos and Nick Mathewson + * + * Note that in Libevent, this file isn't compiled directly. Instead, + * it's included from evutil_rand.c + */ + +/* + * Copyright (c) 1996, David Mazieres + * Copyright (c) 2008, Damien Miller + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Arc4 random number generator for OpenBSD. + * + * This code is derived from section 17.1 of Applied Cryptography, + * second edition, which describes a stream cipher allegedly + * compatible with RSA Labs "RC4" cipher (the actual description of + * which is a trade secret). The same algorithm is used as a stream + * cipher called "arcfour" in Tatu Ylonen's ssh package. + * + * Here the stream cipher has been modified always to include the time + * when initializing the state. That makes it impossible to + * regenerate the same random sequence twice, so this can't be used + * for encryption, but will generate good random numbers. + * + * RC4 is a registered trademark of RSA Laboratories. + */ + +#ifndef ARC4RANDOM_EXPORT +#define ARC4RANDOM_EXPORT +#endif + +#ifndef ARC4RANDOM_UINT32 +#define ARC4RANDOM_UINT32 uint32_t +#endif + +#ifndef ARC4RANDOM_NO_INCLUDES +#include "evconfig-private.h" +#ifdef _WIN32 +#include +#include +#include +#else +#include +#include +#include +#include +#ifdef EVENT__HAVE_SYS_SYSCTL_H +#include +#endif +#endif +#include +#include +#include +#endif + +/* Add platform entropy 32 bytes (256 bits) at a time. */ +#define ADD_ENTROPY 32 + +/* Re-seed from the platform RNG after generating this many bytes. */ +#define BYTES_BEFORE_RESEED 1600000 + +struct arc4_stream { + unsigned char i; + unsigned char j; + unsigned char s[256]; +}; + +#ifdef _WIN32 +#define getpid _getpid +#define pid_t int +#endif + +static int rs_initialized; +static struct arc4_stream rs; +static pid_t arc4_stir_pid; +static int arc4_count; + +static inline unsigned char arc4_getbyte(void); + +static inline void +arc4_init(void) +{ + int n; + + for (n = 0; n < 256; n++) + rs.s[n] = n; + rs.i = 0; + rs.j = 0; +} + +static inline void +arc4_addrandom(const unsigned char *dat, int datlen) +{ + int n; + unsigned char si; + + rs.i--; + for (n = 0; n < 256; n++) { + rs.i = (rs.i + 1); + si = rs.s[rs.i]; + rs.j = (rs.j + si + dat[n % datlen]); + rs.s[rs.i] = rs.s[rs.j]; + rs.s[rs.j] = si; + } + rs.j = rs.i; +} + +#ifndef _WIN32 +static ssize_t +read_all(int fd, unsigned char *buf, size_t count) +{ + size_t numread = 0; + ssize_t result; + + while (numread < count) { + result = read(fd, buf+numread, count-numread); + if (result<0) + return -1; + else if (result == 0) + break; + numread += result; + } + + return (ssize_t)numread; +} +#endif + +#ifdef _WIN32 +#define TRY_SEED_WIN32 +static int +arc4_seed_win32(void) +{ + /* This is adapted from Tor's crypto_seed_rng() */ + static int provider_set = 0; + static HCRYPTPROV provider; + unsigned char buf[ADD_ENTROPY]; + + if (!provider_set) { + if (!CryptAcquireContext(&provider, NULL, NULL, PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT)) { + if (GetLastError() != (DWORD)NTE_BAD_KEYSET) + return -1; + } + provider_set = 1; + } + if (!CryptGenRandom(provider, sizeof(buf), buf)) + return -1; + arc4_addrandom(buf, sizeof(buf)); + evutil_memclear_(buf, sizeof(buf)); + return 0; +} +#endif + +#if defined(EVENT__HAVE_SYS_SYSCTL_H) && defined(EVENT__HAVE_SYSCTL) +#if EVENT__HAVE_DECL_CTL_KERN && EVENT__HAVE_DECL_KERN_RANDOM && EVENT__HAVE_DECL_RANDOM_UUID +#define TRY_SEED_SYSCTL_LINUX +static int +arc4_seed_sysctl_linux(void) +{ + /* Based on code by William Ahern, this function tries to use the + * RANDOM_UUID sysctl to get entropy from the kernel. This can work + * even if /dev/urandom is inaccessible for some reason (e.g., we're + * running in a chroot). */ + int mib[] = { CTL_KERN, KERN_RANDOM, RANDOM_UUID }; + unsigned char buf[ADD_ENTROPY]; + size_t len, n; + unsigned i; + int any_set; + + memset(buf, 0, sizeof(buf)); + + for (len = 0; len < sizeof(buf); len += n) { + n = sizeof(buf) - len; + + if (0 != sysctl(mib, 3, &buf[len], &n, NULL, 0)) + return -1; + } + /* make sure that the buffer actually got set. */ + for (i=0,any_set=0; i sizeof(buf)) + n = len - sizeof(buf); + if (sysctl(mib, 2, &buf[len], &n, NULL, 0) == -1) + return -1; + } + } + /* make sure that the buffer actually got set. */ + for (i=any_set=0; i 0xffffffffUL) + min = 0x100000000UL % upper_bound; +#else + /* Calculate (2**32 % upper_bound) avoiding 64-bit math */ + if (upper_bound > 0x80000000) + min = 1 + ~upper_bound; /* 2**32 - upper_bound */ + else { + /* (2**32 - (x * 2)) % x == 2**32 % x when x <= 2**31 */ + min = ((0xffffffff - (upper_bound * 2)) + 1) % upper_bound; + } +#endif + + /* + * This could theoretically loop forever but each retry has + * p > 0.5 (worst case, usually far better) of selecting a + * number inside the range we need, so it should rarely need + * to re-roll. + */ + for (;;) { + r = arc4random(); + if (r >= min) + break; + } + + return r % upper_bound; +} +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/autogen.sh b/probe-busybox/libevent-2.1.11-stable/autogen.sh new file mode 100755 index 00000000..bcfe937a --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/autogen.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +MAKE=make +if command -v gmake >/dev/null 2>/dev/null; then + MAKE=gmake +fi +$MAKE maintainer-clean >/dev/null 2>/dev/null + +if [ -x "`which autoreconf 2>/dev/null`" ] ; then + exec autoreconf -ivf +fi + +LIBTOOLIZE=libtoolize +SYSNAME=`uname` +if [ "x$SYSNAME" = "xDarwin" ] ; then + LIBTOOLIZE=glibtoolize +fi +aclocal -I m4 && \ + autoheader && \ + $LIBTOOLIZE && \ + autoconf && \ + automake --add-missing --force-missing --copy diff --git a/probe-busybox/libevent-2.1.11-stable/buffer.c b/probe-busybox/libevent-2.1.11-stable/buffer.c new file mode 100644 index 00000000..a51b6c5f --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/buffer.c @@ -0,0 +1,3463 @@ +/* + * Copyright (c) 2002-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef _WIN32 +#include +#include +#include +#endif + +#ifdef EVENT__HAVE_VASPRINTF +/* If we have vasprintf, we need to define _GNU_SOURCE before we include + * stdio.h. This comes from evconfig-private.h. + */ +#endif + +#include + +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +#ifdef EVENT__HAVE_SYS_SOCKET_H +#include +#endif + +#ifdef EVENT__HAVE_SYS_UIO_H +#include +#endif + +#ifdef EVENT__HAVE_SYS_IOCTL_H +#include +#endif + +#ifdef EVENT__HAVE_SYS_MMAN_H +#include +#endif + +#ifdef EVENT__HAVE_SYS_SENDFILE_H +#include +#endif +#ifdef EVENT__HAVE_SYS_STAT_H +#include +#endif + + +#include +#include +#include +#include +#ifdef EVENT__HAVE_STDARG_H +#include +#endif +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif +#include + +#include "event2/event.h" +#include "event2/buffer.h" +#include "event2/buffer_compat.h" +#include "event2/bufferevent.h" +#include "event2/bufferevent_compat.h" +#include "event2/bufferevent_struct.h" +#include "event2/thread.h" +#include "log-internal.h" +#include "mm-internal.h" +#include "util-internal.h" +#include "evthread-internal.h" +#include "evbuffer-internal.h" +#include "bufferevent-internal.h" +#include "event-internal.h" + +/* some systems do not have MAP_FAILED */ +#ifndef MAP_FAILED +#define MAP_FAILED ((void *)-1) +#endif + +/* send file support */ +#if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__) +#define USE_SENDFILE 1 +#define SENDFILE_IS_LINUX 1 +#elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__) +#define USE_SENDFILE 1 +#define SENDFILE_IS_FREEBSD 1 +#elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__) +#define USE_SENDFILE 1 +#define SENDFILE_IS_MACOSX 1 +#elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) +#define USE_SENDFILE 1 +#define SENDFILE_IS_SOLARIS 1 +#endif + +/* Mask of user-selectable callback flags. */ +#define EVBUFFER_CB_USER_FLAGS 0xffff +/* Mask of all internal-use-only flags. */ +#define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 + +/* Flag set if the callback is using the cb_obsolete function pointer */ +#define EVBUFFER_CB_OBSOLETE 0x00040000 + +/* evbuffer_chain support */ +#define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) +#define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ + 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) + +#define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) +#define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) + +/* evbuffer_ptr support */ +#define PTR_NOT_FOUND(ptr) do { \ + (ptr)->pos = -1; \ + (ptr)->internal_.chain = NULL; \ + (ptr)->internal_.pos_in_chain = 0; \ +} while (0) + +static void evbuffer_chain_align(struct evbuffer_chain *chain); +static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, + size_t datalen); +static void evbuffer_deferred_callback(struct event_callback *cb, void *arg); +static int evbuffer_ptr_memcmp(const struct evbuffer *buf, + const struct evbuffer_ptr *pos, const char *mem, size_t len); +static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, + size_t datlen); +static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, + size_t howfar); +static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg); +static inline void evbuffer_chain_incref(struct evbuffer_chain *chain); + +static struct evbuffer_chain * +evbuffer_chain_new(size_t size) +{ + struct evbuffer_chain *chain; + size_t to_alloc; + + if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE) + return (NULL); + + size += EVBUFFER_CHAIN_SIZE; + + /* get the next largest memory that can hold the buffer */ + if (size < EVBUFFER_CHAIN_MAX / 2) { + to_alloc = MIN_BUFFER_SIZE; + while (to_alloc < size) { + to_alloc <<= 1; + } + } else { + to_alloc = size; + } + + /* we get everything in one chunk */ + if ((chain = mm_malloc(to_alloc)) == NULL) + return (NULL); + + memset(chain, 0, EVBUFFER_CHAIN_SIZE); + + chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; + + /* this way we can manipulate the buffer to different addresses, + * which is required for mmap for example. + */ + chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain); + + chain->refcnt = 1; + + return (chain); +} + +static inline void +evbuffer_chain_free(struct evbuffer_chain *chain) +{ + EVUTIL_ASSERT(chain->refcnt > 0); + if (--chain->refcnt > 0) { + /* chain is still referenced by other chains */ + return; + } + + if (CHAIN_PINNED(chain)) { + /* will get freed once no longer dangling */ + chain->refcnt++; + chain->flags |= EVBUFFER_DANGLING; + return; + } + + /* safe to release chain, it's either a referencing + * chain or all references to it have been freed */ + if (chain->flags & EVBUFFER_REFERENCE) { + struct evbuffer_chain_reference *info = + EVBUFFER_CHAIN_EXTRA( + struct evbuffer_chain_reference, + chain); + if (info->cleanupfn) + (*info->cleanupfn)(chain->buffer, + chain->buffer_len, + info->extra); + } + if (chain->flags & EVBUFFER_FILESEGMENT) { + struct evbuffer_chain_file_segment *info = + EVBUFFER_CHAIN_EXTRA( + struct evbuffer_chain_file_segment, + chain); + if (info->segment) { +#ifdef _WIN32 + if (info->segment->is_mapping) + UnmapViewOfFile(chain->buffer); +#endif + evbuffer_file_segment_free(info->segment); + } + } + if (chain->flags & EVBUFFER_MULTICAST) { + struct evbuffer_multicast_parent *info = + EVBUFFER_CHAIN_EXTRA( + struct evbuffer_multicast_parent, + chain); + /* referencing chain is being freed, decrease + * refcounts of source chain and associated + * evbuffer (which get freed once both reach + * zero) */ + EVUTIL_ASSERT(info->source != NULL); + EVUTIL_ASSERT(info->parent != NULL); + EVBUFFER_LOCK(info->source); + evbuffer_chain_free(info->parent); + evbuffer_decref_and_unlock_(info->source); + } + + mm_free(chain); +} + +static void +evbuffer_free_all_chains(struct evbuffer_chain *chain) +{ + struct evbuffer_chain *next; + for (; chain; chain = next) { + next = chain->next; + evbuffer_chain_free(chain); + } +} + +#ifndef NDEBUG +static int +evbuffer_chains_all_empty(struct evbuffer_chain *chain) +{ + for (; chain; chain = chain->next) { + if (chain->off) + return 0; + } + return 1; +} +#else +/* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid +"unused variable" warnings. */ +static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { + return 1; +} +#endif + +/* Free all trailing chains in 'buf' that are neither pinned nor empty, prior + * to replacing them all with a new chain. Return a pointer to the place + * where the new chain will go. + * + * Internal; requires lock. The caller must fix up buf->last and buf->first + * as needed; they might have been freed. + */ +static struct evbuffer_chain ** +evbuffer_free_trailing_empty_chains(struct evbuffer *buf) +{ + struct evbuffer_chain **ch = buf->last_with_datap; + /* Find the first victim chain. It might be *last_with_datap */ + while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) + ch = &(*ch)->next; + if (*ch) { + EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); + evbuffer_free_all_chains(*ch); + *ch = NULL; + } + return ch; +} + +/* Add a single chain 'chain' to the end of 'buf', freeing trailing empty + * chains as necessary. Requires lock. Does not schedule callbacks. + */ +static void +evbuffer_chain_insert(struct evbuffer *buf, + struct evbuffer_chain *chain) +{ + ASSERT_EVBUFFER_LOCKED(buf); + if (*buf->last_with_datap == NULL) { + /* There are no chains data on the buffer at all. */ + EVUTIL_ASSERT(buf->last_with_datap == &buf->first); + EVUTIL_ASSERT(buf->first == NULL); + buf->first = buf->last = chain; + } else { + struct evbuffer_chain **chp; + chp = evbuffer_free_trailing_empty_chains(buf); + *chp = chain; + if (chain->off) + buf->last_with_datap = chp; + buf->last = chain; + } + buf->total_len += chain->off; +} + +static inline struct evbuffer_chain * +evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) +{ + struct evbuffer_chain *chain; + if ((chain = evbuffer_chain_new(datlen)) == NULL) + return NULL; + evbuffer_chain_insert(buf, chain); + return chain; +} + +void +evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag) +{ + EVUTIL_ASSERT((chain->flags & flag) == 0); + chain->flags |= flag; +} + +void +evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag) +{ + EVUTIL_ASSERT((chain->flags & flag) != 0); + chain->flags &= ~flag; + if (chain->flags & EVBUFFER_DANGLING) + evbuffer_chain_free(chain); +} + +static inline void +evbuffer_chain_incref(struct evbuffer_chain *chain) +{ + ++chain->refcnt; +} + +struct evbuffer * +evbuffer_new(void) +{ + struct evbuffer *buffer; + + buffer = mm_calloc(1, sizeof(struct evbuffer)); + if (buffer == NULL) + return (NULL); + + LIST_INIT(&buffer->callbacks); + buffer->refcnt = 1; + buffer->last_with_datap = &buffer->first; + + return (buffer); +} + +int +evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) +{ + EVBUFFER_LOCK(buf); + buf->flags |= (ev_uint32_t)flags; + EVBUFFER_UNLOCK(buf); + return 0; +} + +int +evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) +{ + EVBUFFER_LOCK(buf); + buf->flags &= ~(ev_uint32_t)flags; + EVBUFFER_UNLOCK(buf); + return 0; +} + +void +evbuffer_incref_(struct evbuffer *buf) +{ + EVBUFFER_LOCK(buf); + ++buf->refcnt; + EVBUFFER_UNLOCK(buf); +} + +void +evbuffer_incref_and_lock_(struct evbuffer *buf) +{ + EVBUFFER_LOCK(buf); + ++buf->refcnt; +} + +int +evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) +{ + EVBUFFER_LOCK(buffer); + buffer->cb_queue = base; + buffer->deferred_cbs = 1; + event_deferred_cb_init_(&buffer->deferred, + event_base_get_npriorities(base) / 2, + evbuffer_deferred_callback, buffer); + EVBUFFER_UNLOCK(buffer); + return 0; +} + +int +evbuffer_enable_locking(struct evbuffer *buf, void *lock) +{ +#ifdef EVENT__DISABLE_THREAD_SUPPORT + return -1; +#else + if (buf->lock) + return -1; + + if (!lock) { + EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); + if (!lock) + return -1; + buf->lock = lock; + buf->own_lock = 1; + } else { + buf->lock = lock; + buf->own_lock = 0; + } + + return 0; +#endif +} + +void +evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev) +{ + EVBUFFER_LOCK(buf); + buf->parent = bev; + EVBUFFER_UNLOCK(buf); +} + +static void +evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) +{ + struct evbuffer_cb_entry *cbent, *next; + struct evbuffer_cb_info info; + size_t new_size; + ev_uint32_t mask, masked_val; + int clear = 1; + + if (running_deferred) { + mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; + masked_val = EVBUFFER_CB_ENABLED; + } else if (buffer->deferred_cbs) { + mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; + masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; + /* Don't zero-out n_add/n_del, since the deferred callbacks + will want to see them. */ + clear = 0; + } else { + mask = EVBUFFER_CB_ENABLED; + masked_val = EVBUFFER_CB_ENABLED; + } + + ASSERT_EVBUFFER_LOCKED(buffer); + + if (LIST_EMPTY(&buffer->callbacks)) { + buffer->n_add_for_cb = buffer->n_del_for_cb = 0; + return; + } + if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) + return; + + new_size = buffer->total_len; + info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; + info.n_added = buffer->n_add_for_cb; + info.n_deleted = buffer->n_del_for_cb; + if (clear) { + buffer->n_add_for_cb = 0; + buffer->n_del_for_cb = 0; + } + for (cbent = LIST_FIRST(&buffer->callbacks); + cbent != LIST_END(&buffer->callbacks); + cbent = next) { + /* Get the 'next' pointer now in case this callback decides + * to remove itself or something. */ + next = LIST_NEXT(cbent, next); + + if ((cbent->flags & mask) != masked_val) + continue; + + if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) + cbent->cb.cb_obsolete(buffer, + info.orig_size, new_size, cbent->cbarg); + else + cbent->cb.cb_func(buffer, &info, cbent->cbarg); + } +} + +void +evbuffer_invoke_callbacks_(struct evbuffer *buffer) +{ + if (LIST_EMPTY(&buffer->callbacks)) { + buffer->n_add_for_cb = buffer->n_del_for_cb = 0; + return; + } + + if (buffer->deferred_cbs) { + if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) { + evbuffer_incref_and_lock_(buffer); + if (buffer->parent) + bufferevent_incref_(buffer->parent); + EVBUFFER_UNLOCK(buffer); + } + } + + evbuffer_run_callbacks(buffer, 0); +} + +static void +evbuffer_deferred_callback(struct event_callback *cb, void *arg) +{ + struct bufferevent *parent = NULL; + struct evbuffer *buffer = arg; + + /* XXXX It would be better to run these callbacks without holding the + * lock */ + EVBUFFER_LOCK(buffer); + parent = buffer->parent; + evbuffer_run_callbacks(buffer, 1); + evbuffer_decref_and_unlock_(buffer); + if (parent) + bufferevent_decref_(parent); +} + +static void +evbuffer_remove_all_callbacks(struct evbuffer *buffer) +{ + struct evbuffer_cb_entry *cbent; + + while ((cbent = LIST_FIRST(&buffer->callbacks))) { + LIST_REMOVE(cbent, next); + mm_free(cbent); + } +} + +void +evbuffer_decref_and_unlock_(struct evbuffer *buffer) +{ + struct evbuffer_chain *chain, *next; + ASSERT_EVBUFFER_LOCKED(buffer); + + EVUTIL_ASSERT(buffer->refcnt > 0); + + if (--buffer->refcnt > 0) { + EVBUFFER_UNLOCK(buffer); + return; + } + + for (chain = buffer->first; chain != NULL; chain = next) { + next = chain->next; + evbuffer_chain_free(chain); + } + evbuffer_remove_all_callbacks(buffer); + if (buffer->deferred_cbs) + event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred); + + EVBUFFER_UNLOCK(buffer); + if (buffer->own_lock) + EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); + mm_free(buffer); +} + +void +evbuffer_free(struct evbuffer *buffer) +{ + EVBUFFER_LOCK(buffer); + evbuffer_decref_and_unlock_(buffer); +} + +void +evbuffer_lock(struct evbuffer *buf) +{ + EVBUFFER_LOCK(buf); +} + +void +evbuffer_unlock(struct evbuffer *buf) +{ + EVBUFFER_UNLOCK(buf); +} + +size_t +evbuffer_get_length(const struct evbuffer *buffer) +{ + size_t result; + + EVBUFFER_LOCK(buffer); + + result = (buffer->total_len); + + EVBUFFER_UNLOCK(buffer); + + return result; +} + +size_t +evbuffer_get_contiguous_space(const struct evbuffer *buf) +{ + struct evbuffer_chain *chain; + size_t result; + + EVBUFFER_LOCK(buf); + chain = buf->first; + result = (chain != NULL ? chain->off : 0); + EVBUFFER_UNLOCK(buf); + + return result; +} + +size_t +evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { + int n; + size_t res; + size_t to_alloc; + + EVBUFFER_LOCK(buf); + + res = to_alloc = 0; + + for (n = 0; n < n_vec; n++) { + to_alloc += vec[n].iov_len; + } + + if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { + goto done; + } + + for (n = 0; n < n_vec; n++) { + /* XXX each 'add' call here does a bunch of setup that's + * obviated by evbuffer_expand_fast_, and some cleanup that we + * would like to do only once. Instead we should just extract + * the part of the code that's needed. */ + + if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { + goto done; + } + + res += vec[n].iov_len; + } + +done: + EVBUFFER_UNLOCK(buf); + return res; +} + +int +evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, + struct evbuffer_iovec *vec, int n_vecs) +{ + struct evbuffer_chain *chain, **chainp; + int n = -1; + + EVBUFFER_LOCK(buf); + if (buf->freeze_end) + goto done; + if (n_vecs < 1) + goto done; + if (n_vecs == 1) { + if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) + goto done; + + vec[0].iov_base = (void *)CHAIN_SPACE_PTR(chain); + vec[0].iov_len = (size_t)CHAIN_SPACE_LEN(chain); + EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); + n = 1; + } else { + if (evbuffer_expand_fast_(buf, size, n_vecs)<0) + goto done; + n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs, + &chainp, 0); + } + +done: + EVBUFFER_UNLOCK(buf); + return n; + +} + +static int +advance_last_with_data(struct evbuffer *buf) +{ + int n = 0; + struct evbuffer_chain **chainp = buf->last_with_datap; + + ASSERT_EVBUFFER_LOCKED(buf); + + if (!*chainp) + return 0; + + while ((*chainp)->next) { + chainp = &(*chainp)->next; + if ((*chainp)->off) + buf->last_with_datap = chainp; + ++n; + } + return n; +} + +int +evbuffer_commit_space(struct evbuffer *buf, + struct evbuffer_iovec *vec, int n_vecs) +{ + struct evbuffer_chain *chain, **firstchainp, **chainp; + int result = -1; + size_t added = 0; + int i; + + EVBUFFER_LOCK(buf); + + if (buf->freeze_end) + goto done; + if (n_vecs == 0) { + result = 0; + goto done; + } else if (n_vecs == 1 && + (buf->last && vec[0].iov_base == (void *)CHAIN_SPACE_PTR(buf->last))) { + /* The user only got or used one chain; it might not + * be the first one with space in it. */ + if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) + goto done; + buf->last->off += vec[0].iov_len; + added = vec[0].iov_len; + if (added) + advance_last_with_data(buf); + goto okay; + } + + /* Advance 'firstchain' to the first chain with space in it. */ + firstchainp = buf->last_with_datap; + if (!*firstchainp) + goto done; + if (CHAIN_SPACE_LEN(*firstchainp) == 0) { + firstchainp = &(*firstchainp)->next; + } + + chain = *firstchainp; + /* pass 1: make sure that the pointers and lengths of vecs[] are in + * bounds before we try to commit anything. */ + for (i=0; i CHAIN_SPACE_LEN(chain)) + goto done; + chain = chain->next; + } + /* pass 2: actually adjust all the chains. */ + chainp = firstchainp; + for (i=0; ioff += vec[i].iov_len; + added += vec[i].iov_len; + if (vec[i].iov_len) { + buf->last_with_datap = chainp; + } + chainp = &(*chainp)->next; + } + +okay: + buf->total_len += added; + buf->n_add_for_cb += added; + result = 0; + evbuffer_invoke_callbacks_(buf); + +done: + EVBUFFER_UNLOCK(buf); + return result; +} + +static inline int +HAS_PINNED_R(struct evbuffer *buf) +{ + return (buf->last && CHAIN_PINNED_R(buf->last)); +} + +static inline void +ZERO_CHAIN(struct evbuffer *dst) +{ + ASSERT_EVBUFFER_LOCKED(dst); + dst->first = NULL; + dst->last = NULL; + dst->last_with_datap = &(dst)->first; + dst->total_len = 0; +} + +/* Prepares the contents of src to be moved to another buffer by removing + * read-pinned chains. The first pinned chain is saved in first, and the + * last in last. If src has no read-pinned chains, first and last are set + * to NULL. */ +static int +PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, + struct evbuffer_chain **last) +{ + struct evbuffer_chain *chain, **pinned; + + ASSERT_EVBUFFER_LOCKED(src); + + if (!HAS_PINNED_R(src)) { + *first = *last = NULL; + return 0; + } + + pinned = src->last_with_datap; + if (!CHAIN_PINNED_R(*pinned)) + pinned = &(*pinned)->next; + EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); + chain = *first = *pinned; + *last = src->last; + + /* If there's data in the first pinned chain, we need to allocate + * a new chain and copy the data over. */ + if (chain->off) { + struct evbuffer_chain *tmp; + + EVUTIL_ASSERT(pinned == src->last_with_datap); + tmp = evbuffer_chain_new(chain->off); + if (!tmp) + return -1; + memcpy(tmp->buffer, chain->buffer + chain->misalign, + chain->off); + tmp->off = chain->off; + *src->last_with_datap = tmp; + src->last = tmp; + chain->misalign += chain->off; + chain->off = 0; + } else { + src->last = *src->last_with_datap; + *pinned = NULL; + } + + return 0; +} + +static inline void +RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, + struct evbuffer_chain *last) +{ + ASSERT_EVBUFFER_LOCKED(src); + + if (!pinned) { + ZERO_CHAIN(src); + return; + } + + src->first = pinned; + src->last = last; + src->last_with_datap = &src->first; + src->total_len = 0; +} + +static inline void +COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) +{ + ASSERT_EVBUFFER_LOCKED(dst); + ASSERT_EVBUFFER_LOCKED(src); + dst->first = src->first; + if (src->last_with_datap == &src->first) + dst->last_with_datap = &dst->first; + else + dst->last_with_datap = src->last_with_datap; + dst->last = src->last; + dst->total_len = src->total_len; +} + +static void +APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) +{ + struct evbuffer_chain **chp; + + ASSERT_EVBUFFER_LOCKED(dst); + ASSERT_EVBUFFER_LOCKED(src); + + chp = evbuffer_free_trailing_empty_chains(dst); + *chp = src->first; + + if (src->last_with_datap == &src->first) + dst->last_with_datap = chp; + else + dst->last_with_datap = src->last_with_datap; + dst->last = src->last; + dst->total_len += src->total_len; +} + +static inline void +APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src) +{ + struct evbuffer_chain *tmp; + struct evbuffer_chain *chain = src->first; + struct evbuffer_multicast_parent *extra; + + ASSERT_EVBUFFER_LOCKED(dst); + ASSERT_EVBUFFER_LOCKED(src); + + for (; chain; chain = chain->next) { + if (!chain->off || chain->flags & EVBUFFER_DANGLING) { + /* skip empty chains */ + continue; + } + + tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); + if (!tmp) { + event_warn("%s: out of memory", __func__); + return; + } + extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); + /* reference evbuffer containing source chain so it + * doesn't get released while the chain is still + * being referenced to */ + evbuffer_incref_(src); + extra->source = src; + /* reference source chain which now becomes immutable */ + evbuffer_chain_incref(chain); + extra->parent = chain; + chain->flags |= EVBUFFER_IMMUTABLE; + tmp->buffer_len = chain->buffer_len; + tmp->misalign = chain->misalign; + tmp->off = chain->off; + tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE; + tmp->buffer = chain->buffer; + evbuffer_chain_insert(dst, tmp); + } +} + +static void +PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) +{ + ASSERT_EVBUFFER_LOCKED(dst); + ASSERT_EVBUFFER_LOCKED(src); + src->last->next = dst->first; + dst->first = src->first; + dst->total_len += src->total_len; + if (*dst->last_with_datap == NULL) { + if (src->last_with_datap == &(src)->first) + dst->last_with_datap = &dst->first; + else + dst->last_with_datap = src->last_with_datap; + } else if (dst->last_with_datap == &dst->first) { + dst->last_with_datap = &src->last->next; + } +} + +int +evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) +{ + struct evbuffer_chain *pinned, *last; + size_t in_total_len, out_total_len; + int result = 0; + + EVBUFFER_LOCK2(inbuf, outbuf); + in_total_len = inbuf->total_len; + out_total_len = outbuf->total_len; + + if (in_total_len == 0 || outbuf == inbuf) + goto done; + + if (outbuf->freeze_end || inbuf->freeze_start) { + result = -1; + goto done; + } + + if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { + result = -1; + goto done; + } + + if (out_total_len == 0) { + /* There might be an empty chain at the start of outbuf; free + * it. */ + evbuffer_free_all_chains(outbuf->first); + COPY_CHAIN(outbuf, inbuf); + } else { + APPEND_CHAIN(outbuf, inbuf); + } + + RESTORE_PINNED(inbuf, pinned, last); + + inbuf->n_del_for_cb += in_total_len; + outbuf->n_add_for_cb += in_total_len; + + evbuffer_invoke_callbacks_(inbuf); + evbuffer_invoke_callbacks_(outbuf); + +done: + EVBUFFER_UNLOCK2(inbuf, outbuf); + return result; +} + +int +evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf) +{ + size_t in_total_len, out_total_len; + struct evbuffer_chain *chain; + int result = 0; + + EVBUFFER_LOCK2(inbuf, outbuf); + in_total_len = inbuf->total_len; + out_total_len = outbuf->total_len; + chain = inbuf->first; + + if (in_total_len == 0) + goto done; + + if (outbuf->freeze_end || outbuf == inbuf) { + result = -1; + goto done; + } + + for (; chain; chain = chain->next) { + if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { + /* chain type can not be referenced */ + result = -1; + goto done; + } + } + + if (out_total_len == 0) { + /* There might be an empty chain at the start of outbuf; free + * it. */ + evbuffer_free_all_chains(outbuf->first); + } + APPEND_CHAIN_MULTICAST(outbuf, inbuf); + + outbuf->n_add_for_cb += in_total_len; + evbuffer_invoke_callbacks_(outbuf); + +done: + EVBUFFER_UNLOCK2(inbuf, outbuf); + return result; +} + +int +evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) +{ + struct evbuffer_chain *pinned, *last; + size_t in_total_len, out_total_len; + int result = 0; + + EVBUFFER_LOCK2(inbuf, outbuf); + + in_total_len = inbuf->total_len; + out_total_len = outbuf->total_len; + + if (!in_total_len || inbuf == outbuf) + goto done; + + if (outbuf->freeze_start || inbuf->freeze_start) { + result = -1; + goto done; + } + + if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { + result = -1; + goto done; + } + + if (out_total_len == 0) { + /* There might be an empty chain at the start of outbuf; free + * it. */ + evbuffer_free_all_chains(outbuf->first); + COPY_CHAIN(outbuf, inbuf); + } else { + PREPEND_CHAIN(outbuf, inbuf); + } + + RESTORE_PINNED(inbuf, pinned, last); + + inbuf->n_del_for_cb += in_total_len; + outbuf->n_add_for_cb += in_total_len; + + evbuffer_invoke_callbacks_(inbuf); + evbuffer_invoke_callbacks_(outbuf); +done: + EVBUFFER_UNLOCK2(inbuf, outbuf); + return result; +} + +int +evbuffer_drain(struct evbuffer *buf, size_t len) +{ + struct evbuffer_chain *chain, *next; + size_t remaining, old_len; + int result = 0; + + EVBUFFER_LOCK(buf); + old_len = buf->total_len; + + if (old_len == 0) + goto done; + + if (buf->freeze_start) { + result = -1; + goto done; + } + + if (len >= old_len && !HAS_PINNED_R(buf)) { + len = old_len; + for (chain = buf->first; chain != NULL; chain = next) { + next = chain->next; + evbuffer_chain_free(chain); + } + + ZERO_CHAIN(buf); + } else { + if (len >= old_len) + len = old_len; + + buf->total_len -= len; + remaining = len; + for (chain = buf->first; + remaining >= chain->off; + chain = next) { + next = chain->next; + remaining -= chain->off; + + if (chain == *buf->last_with_datap) { + buf->last_with_datap = &buf->first; + } + if (&chain->next == buf->last_with_datap) + buf->last_with_datap = &buf->first; + + if (CHAIN_PINNED_R(chain)) { + EVUTIL_ASSERT(remaining == 0); + chain->misalign += chain->off; + chain->off = 0; + break; + } else + evbuffer_chain_free(chain); + } + + buf->first = chain; + EVUTIL_ASSERT(remaining <= chain->off); + chain->misalign += remaining; + chain->off -= remaining; + } + + buf->n_del_for_cb += len; + /* Tell someone about changes in this buffer */ + evbuffer_invoke_callbacks_(buf); + +done: + EVBUFFER_UNLOCK(buf); + return result; +} + +/* Reads data from an event buffer and drains the bytes read */ +int +evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) +{ + ev_ssize_t n; + EVBUFFER_LOCK(buf); + n = evbuffer_copyout_from(buf, NULL, data_out, datlen); + if (n > 0) { + if (evbuffer_drain(buf, n)<0) + n = -1; + } + EVBUFFER_UNLOCK(buf); + return (int)n; +} + +ev_ssize_t +evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) +{ + return evbuffer_copyout_from(buf, NULL, data_out, datlen); +} + +ev_ssize_t +evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, + void *data_out, size_t datlen) +{ + /*XXX fails badly on sendfile case. */ + struct evbuffer_chain *chain; + char *data = data_out; + size_t nread; + ev_ssize_t result = 0; + size_t pos_in_chain; + + EVBUFFER_LOCK(buf); + + if (pos) { + if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) { + result = -1; + goto done; + } + chain = pos->internal_.chain; + pos_in_chain = pos->internal_.pos_in_chain; + if (datlen + pos->pos > buf->total_len) + datlen = buf->total_len - pos->pos; + } else { + chain = buf->first; + pos_in_chain = 0; + if (datlen > buf->total_len) + datlen = buf->total_len; + } + + + if (datlen == 0) + goto done; + + if (buf->freeze_start) { + result = -1; + goto done; + } + + nread = datlen; + + while (datlen && datlen >= chain->off - pos_in_chain) { + size_t copylen = chain->off - pos_in_chain; + memcpy(data, + chain->buffer + chain->misalign + pos_in_chain, + copylen); + data += copylen; + datlen -= copylen; + + chain = chain->next; + pos_in_chain = 0; + EVUTIL_ASSERT(chain || datlen==0); + } + + if (datlen) { + EVUTIL_ASSERT(chain); + EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off); + + memcpy(data, chain->buffer + chain->misalign + pos_in_chain, + datlen); + } + + result = nread; +done: + EVBUFFER_UNLOCK(buf); + return result; +} + +/* reads data from the src buffer to the dst buffer, avoids memcpy as + * possible. */ +/* XXXX should return ev_ssize_t */ +int +evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, + size_t datlen) +{ + /*XXX We should have an option to force this to be zero-copy.*/ + + /*XXX can fail badly on sendfile case. */ + struct evbuffer_chain *chain, *previous; + size_t nread = 0; + int result; + + EVBUFFER_LOCK2(src, dst); + + chain = previous = src->first; + + if (datlen == 0 || dst == src) { + result = 0; + goto done; + } + + if (dst->freeze_end || src->freeze_start) { + result = -1; + goto done; + } + + /* short-cut if there is no more data buffered */ + if (datlen >= src->total_len) { + datlen = src->total_len; + evbuffer_add_buffer(dst, src); + result = (int)datlen; /*XXXX should return ev_ssize_t*/ + goto done; + } + + /* removes chains if possible */ + while (chain->off <= datlen) { + /* We can't remove the last with data from src unless we + * remove all chains, in which case we would have done the if + * block above */ + EVUTIL_ASSERT(chain != *src->last_with_datap); + nread += chain->off; + datlen -= chain->off; + previous = chain; + if (src->last_with_datap == &chain->next) + src->last_with_datap = &src->first; + chain = chain->next; + } + + if (chain != src->first) { + /* we can remove the chain */ + struct evbuffer_chain **chp; + chp = evbuffer_free_trailing_empty_chains(dst); + + if (dst->first == NULL) { + dst->first = src->first; + } else { + *chp = src->first; + } + dst->last = previous; + previous->next = NULL; + src->first = chain; + advance_last_with_data(dst); + + dst->total_len += nread; + dst->n_add_for_cb += nread; + } + + /* we know that there is more data in the src buffer than + * we want to read, so we manually drain the chain */ + evbuffer_add(dst, chain->buffer + chain->misalign, datlen); + chain->misalign += datlen; + chain->off -= datlen; + nread += datlen; + + /* You might think we would want to increment dst->n_add_for_cb + * here too. But evbuffer_add above already took care of that. + */ + src->total_len -= nread; + src->n_del_for_cb += nread; + + if (nread) { + evbuffer_invoke_callbacks_(dst); + evbuffer_invoke_callbacks_(src); + } + result = (int)nread;/*XXXX should change return type */ + +done: + EVBUFFER_UNLOCK2(src, dst); + return result; +} + +unsigned char * +evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) +{ + struct evbuffer_chain *chain, *next, *tmp, *last_with_data; + unsigned char *buffer, *result = NULL; + ev_ssize_t remaining; + int removed_last_with_data = 0; + int removed_last_with_datap = 0; + + EVBUFFER_LOCK(buf); + + chain = buf->first; + + if (size < 0) + size = buf->total_len; + /* if size > buf->total_len, we cannot guarantee to the user that she + * is going to have a long enough buffer afterwards; so we return + * NULL */ + if (size == 0 || (size_t)size > buf->total_len) + goto done; + + /* No need to pull up anything; the first size bytes are + * already here. */ + if (chain->off >= (size_t)size) { + result = chain->buffer + chain->misalign; + goto done; + } + + /* Make sure that none of the chains we need to copy from is pinned. */ + remaining = size - chain->off; + EVUTIL_ASSERT(remaining >= 0); + for (tmp=chain->next; tmp; tmp=tmp->next) { + if (CHAIN_PINNED(tmp)) + goto done; + if (tmp->off >= (size_t)remaining) + break; + remaining -= tmp->off; + } + + if (CHAIN_PINNED(chain)) { + size_t old_off = chain->off; + if (CHAIN_SPACE_LEN(chain) < size - chain->off) { + /* not enough room at end of chunk. */ + goto done; + } + buffer = CHAIN_SPACE_PTR(chain); + tmp = chain; + tmp->off = size; + size -= old_off; + chain = chain->next; + } else if (chain->buffer_len - chain->misalign >= (size_t)size) { + /* already have enough space in the first chain */ + size_t old_off = chain->off; + buffer = chain->buffer + chain->misalign + chain->off; + tmp = chain; + tmp->off = size; + size -= old_off; + chain = chain->next; + } else { + if ((tmp = evbuffer_chain_new(size)) == NULL) { + event_warn("%s: out of memory", __func__); + goto done; + } + buffer = tmp->buffer; + tmp->off = size; + buf->first = tmp; + } + + /* TODO(niels): deal with buffers that point to NULL like sendfile */ + + /* Copy and free every chunk that will be entirely pulled into tmp */ + last_with_data = *buf->last_with_datap; + for (; chain != NULL && (size_t)size >= chain->off; chain = next) { + next = chain->next; + + memcpy(buffer, chain->buffer + chain->misalign, chain->off); + size -= chain->off; + buffer += chain->off; + if (chain == last_with_data) + removed_last_with_data = 1; + if (&chain->next == buf->last_with_datap) + removed_last_with_datap = 1; + + evbuffer_chain_free(chain); + } + + if (chain != NULL) { + memcpy(buffer, chain->buffer + chain->misalign, size); + chain->misalign += size; + chain->off -= size; + } else { + buf->last = tmp; + } + + tmp->next = chain; + + if (removed_last_with_data) { + buf->last_with_datap = &buf->first; + } else if (removed_last_with_datap) { + if (buf->first->next && buf->first->next->off) + buf->last_with_datap = &buf->first->next; + else + buf->last_with_datap = &buf->first; + } + + result = (tmp->buffer + tmp->misalign); + +done: + EVBUFFER_UNLOCK(buf); + return result; +} + +/* + * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. + * The returned buffer needs to be freed by the called. + */ +char * +evbuffer_readline(struct evbuffer *buffer) +{ + return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); +} + +static inline ev_ssize_t +evbuffer_strchr(struct evbuffer_ptr *it, const char chr) +{ + struct evbuffer_chain *chain = it->internal_.chain; + size_t i = it->internal_.pos_in_chain; + while (chain != NULL) { + char *buffer = (char *)chain->buffer + chain->misalign; + char *cp = memchr(buffer+i, chr, chain->off-i); + if (cp) { + it->internal_.chain = chain; + it->internal_.pos_in_chain = cp - buffer; + it->pos += (cp - buffer - i); + return it->pos; + } + it->pos += chain->off - i; + i = 0; + chain = chain->next; + } + + return (-1); +} + +static inline char * +find_eol_char(char *s, size_t len) +{ +#define CHUNK_SZ 128 + /* Lots of benchmarking found this approach to be faster in practice + * than doing two memchrs over the whole buffer, doin a memchr on each + * char of the buffer, or trying to emulate memchr by hand. */ + char *s_end, *cr, *lf; + s_end = s+len; + while (s < s_end) { + size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); + cr = memchr(s, '\r', chunk); + lf = memchr(s, '\n', chunk); + if (cr) { + if (lf && lf < cr) + return lf; + return cr; + } else if (lf) { + return lf; + } + s += CHUNK_SZ; + } + + return NULL; +#undef CHUNK_SZ +} + +static ev_ssize_t +evbuffer_find_eol_char(struct evbuffer_ptr *it) +{ + struct evbuffer_chain *chain = it->internal_.chain; + size_t i = it->internal_.pos_in_chain; + while (chain != NULL) { + char *buffer = (char *)chain->buffer + chain->misalign; + char *cp = find_eol_char(buffer+i, chain->off-i); + if (cp) { + it->internal_.chain = chain; + it->internal_.pos_in_chain = cp - buffer; + it->pos += (cp - buffer) - i; + return it->pos; + } + it->pos += chain->off - i; + i = 0; + chain = chain->next; + } + + return (-1); +} + +static inline size_t +evbuffer_strspn( + struct evbuffer_ptr *ptr, const char *chrset) +{ + size_t count = 0; + struct evbuffer_chain *chain = ptr->internal_.chain; + size_t i = ptr->internal_.pos_in_chain; + + if (!chain) + return 0; + + while (1) { + char *buffer = (char *)chain->buffer + chain->misalign; + for (; i < chain->off; ++i) { + const char *p = chrset; + while (*p) { + if (buffer[i] == *p++) + goto next; + } + ptr->internal_.chain = chain; + ptr->internal_.pos_in_chain = i; + ptr->pos += count; + return count; + next: + ++count; + } + i = 0; + + if (! chain->next) { + ptr->internal_.chain = chain; + ptr->internal_.pos_in_chain = i; + ptr->pos += count; + return count; + } + + chain = chain->next; + } +} + + +static inline int +evbuffer_getchr(struct evbuffer_ptr *it) +{ + struct evbuffer_chain *chain = it->internal_.chain; + size_t off = it->internal_.pos_in_chain; + + if (chain == NULL) + return -1; + + return (unsigned char)chain->buffer[chain->misalign + off]; +} + +struct evbuffer_ptr +evbuffer_search_eol(struct evbuffer *buffer, + struct evbuffer_ptr *start, size_t *eol_len_out, + enum evbuffer_eol_style eol_style) +{ + struct evbuffer_ptr it, it2; + size_t extra_drain = 0; + int ok = 0; + + /* Avoid locking in trivial edge cases */ + if (start && start->internal_.chain == NULL) { + PTR_NOT_FOUND(&it); + if (eol_len_out) + *eol_len_out = extra_drain; + return it; + } + + EVBUFFER_LOCK(buffer); + + if (start) { + memcpy(&it, start, sizeof(it)); + } else { + it.pos = 0; + it.internal_.chain = buffer->first; + it.internal_.pos_in_chain = 0; + } + + /* the eol_style determines our first stop character and how many + * characters we are going to drain afterwards. */ + switch (eol_style) { + case EVBUFFER_EOL_ANY: + if (evbuffer_find_eol_char(&it) < 0) + goto done; + memcpy(&it2, &it, sizeof(it)); + extra_drain = evbuffer_strspn(&it2, "\r\n"); + break; + case EVBUFFER_EOL_CRLF_STRICT: { + it = evbuffer_search(buffer, "\r\n", 2, &it); + if (it.pos < 0) + goto done; + extra_drain = 2; + break; + } + case EVBUFFER_EOL_CRLF: { + ev_ssize_t start_pos = it.pos; + /* Look for a LF ... */ + if (evbuffer_strchr(&it, '\n') < 0) + goto done; + extra_drain = 1; + /* ... optionally preceeded by a CR. */ + if (it.pos == start_pos) + break; /* If the first character is \n, don't back up */ + /* This potentially does an extra linear walk over the first + * few chains. Probably, that's not too expensive unless you + * have a really pathological setup. */ + memcpy(&it2, &it, sizeof(it)); + if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) + break; + if (evbuffer_getchr(&it2) == '\r') { + memcpy(&it, &it2, sizeof(it)); + extra_drain = 2; + } + break; + } + case EVBUFFER_EOL_LF: + if (evbuffer_strchr(&it, '\n') < 0) + goto done; + extra_drain = 1; + break; + case EVBUFFER_EOL_NUL: + if (evbuffer_strchr(&it, '\0') < 0) + goto done; + extra_drain = 1; + break; + default: + goto done; + } + + ok = 1; +done: + EVBUFFER_UNLOCK(buffer); + + if (!ok) + PTR_NOT_FOUND(&it); + if (eol_len_out) + *eol_len_out = extra_drain; + + return it; +} + +char * +evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, + enum evbuffer_eol_style eol_style) +{ + struct evbuffer_ptr it; + char *line; + size_t n_to_copy=0, extra_drain=0; + char *result = NULL; + + EVBUFFER_LOCK(buffer); + + if (buffer->freeze_start) { + goto done; + } + + it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); + if (it.pos < 0) + goto done; + n_to_copy = it.pos; + + if ((line = mm_malloc(n_to_copy+1)) == NULL) { + event_warn("%s: out of memory", __func__); + goto done; + } + + evbuffer_remove(buffer, line, n_to_copy); + line[n_to_copy] = '\0'; + + evbuffer_drain(buffer, extra_drain); + result = line; +done: + EVBUFFER_UNLOCK(buffer); + + if (n_read_out) + *n_read_out = result ? n_to_copy : 0; + + return result; +} + +#define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 + +/* Adds data to an event buffer */ + +int +evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) +{ + struct evbuffer_chain *chain, *tmp; + const unsigned char *data = data_in; + size_t remain, to_alloc; + int result = -1; + + EVBUFFER_LOCK(buf); + + if (buf->freeze_end) { + goto done; + } + /* Prevent buf->total_len overflow */ + if (datlen > EV_SIZE_MAX - buf->total_len) { + goto done; + } + + if (*buf->last_with_datap == NULL) { + chain = buf->last; + } else { + chain = *buf->last_with_datap; + } + + /* If there are no chains allocated for this buffer, allocate one + * big enough to hold all the data. */ + if (chain == NULL) { + chain = evbuffer_chain_new(datlen); + if (!chain) + goto done; + evbuffer_chain_insert(buf, chain); + } + + if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { + /* Always true for mutable buffers */ + EVUTIL_ASSERT(chain->misalign >= 0 && + (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); + remain = chain->buffer_len - (size_t)chain->misalign - chain->off; + if (remain >= datlen) { + /* there's enough space to hold all the data in the + * current last chain */ + memcpy(chain->buffer + chain->misalign + chain->off, + data, datlen); + chain->off += datlen; + buf->total_len += datlen; + buf->n_add_for_cb += datlen; + goto out; + } else if (!CHAIN_PINNED(chain) && + evbuffer_chain_should_realign(chain, datlen)) { + /* we can fit the data into the misalignment */ + evbuffer_chain_align(chain); + + memcpy(chain->buffer + chain->off, data, datlen); + chain->off += datlen; + buf->total_len += datlen; + buf->n_add_for_cb += datlen; + goto out; + } + } else { + /* we cannot write any data to the last chain */ + remain = 0; + } + + /* we need to add another chain */ + to_alloc = chain->buffer_len; + if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) + to_alloc <<= 1; + if (datlen > to_alloc) + to_alloc = datlen; + tmp = evbuffer_chain_new(to_alloc); + if (tmp == NULL) + goto done; + + if (remain) { + memcpy(chain->buffer + chain->misalign + chain->off, + data, remain); + chain->off += remain; + buf->total_len += remain; + buf->n_add_for_cb += remain; + } + + data += remain; + datlen -= remain; + + memcpy(tmp->buffer, data, datlen); + tmp->off = datlen; + evbuffer_chain_insert(buf, tmp); + buf->n_add_for_cb += datlen; + +out: + evbuffer_invoke_callbacks_(buf); + result = 0; +done: + EVBUFFER_UNLOCK(buf); + return result; +} + +int +evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) +{ + struct evbuffer_chain *chain, *tmp; + int result = -1; + + EVBUFFER_LOCK(buf); + + if (datlen == 0) { + result = 0; + goto done; + } + if (buf->freeze_start) { + goto done; + } + if (datlen > EV_SIZE_MAX - buf->total_len) { + goto done; + } + + chain = buf->first; + + if (chain == NULL) { + chain = evbuffer_chain_new(datlen); + if (!chain) + goto done; + evbuffer_chain_insert(buf, chain); + } + + /* we cannot touch immutable buffers */ + if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { + /* Always true for mutable buffers */ + EVUTIL_ASSERT(chain->misalign >= 0 && + (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); + + /* If this chain is empty, we can treat it as + * 'empty at the beginning' rather than 'empty at the end' */ + if (chain->off == 0) + chain->misalign = chain->buffer_len; + + if ((size_t)chain->misalign >= datlen) { + /* we have enough space to fit everything */ + memcpy(chain->buffer + chain->misalign - datlen, + data, datlen); + chain->off += datlen; + chain->misalign -= datlen; + buf->total_len += datlen; + buf->n_add_for_cb += datlen; + goto out; + } else if (chain->misalign) { + /* we can only fit some of the data. */ + memcpy(chain->buffer, + (char*)data + datlen - chain->misalign, + (size_t)chain->misalign); + chain->off += (size_t)chain->misalign; + buf->total_len += (size_t)chain->misalign; + buf->n_add_for_cb += (size_t)chain->misalign; + datlen -= (size_t)chain->misalign; + chain->misalign = 0; + } + } + + /* we need to add another chain */ + if ((tmp = evbuffer_chain_new(datlen)) == NULL) + goto done; + buf->first = tmp; + if (buf->last_with_datap == &buf->first && chain->off) + buf->last_with_datap = &tmp->next; + + tmp->next = chain; + + tmp->off = datlen; + EVUTIL_ASSERT(datlen <= tmp->buffer_len); + tmp->misalign = tmp->buffer_len - datlen; + + memcpy(tmp->buffer + tmp->misalign, data, datlen); + buf->total_len += datlen; + buf->n_add_for_cb += datlen; + +out: + evbuffer_invoke_callbacks_(buf); + result = 0; +done: + EVBUFFER_UNLOCK(buf); + return result; +} + +/** Helper: realigns the memory in chain->buffer so that misalign is 0. */ +static void +evbuffer_chain_align(struct evbuffer_chain *chain) +{ + EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); + EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); + memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); + chain->misalign = 0; +} + +#define MAX_TO_COPY_IN_EXPAND 4096 +#define MAX_TO_REALIGN_IN_EXPAND 2048 + +/** Helper: return true iff we should realign chain to fit datalen bytes of + data in it. */ +static int +evbuffer_chain_should_realign(struct evbuffer_chain *chain, + size_t datlen) +{ + return chain->buffer_len - chain->off >= datlen && + (chain->off < chain->buffer_len / 2) && + (chain->off <= MAX_TO_REALIGN_IN_EXPAND); +} + +/* Expands the available space in the event buffer to at least datlen, all in + * a single chunk. Return that chunk. */ +static struct evbuffer_chain * +evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) +{ + struct evbuffer_chain *chain, **chainp; + struct evbuffer_chain *result = NULL; + ASSERT_EVBUFFER_LOCKED(buf); + + chainp = buf->last_with_datap; + + /* XXX If *chainp is no longer writeable, but has enough space in its + * misalign, this might be a bad idea: we could still use *chainp, not + * (*chainp)->next. */ + if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) + chainp = &(*chainp)->next; + + /* 'chain' now points to the first chain with writable space (if any) + * We will either use it, realign it, replace it, or resize it. */ + chain = *chainp; + + if (chain == NULL || + (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { + /* We can't use the last_with_data chain at all. Just add a + * new one that's big enough. */ + goto insert_new; + } + + /* If we can fit all the data, then we don't have to do anything */ + if (CHAIN_SPACE_LEN(chain) >= datlen) { + result = chain; + goto ok; + } + + /* If the chain is completely empty, just replace it by adding a new + * empty chain. */ + if (chain->off == 0) { + goto insert_new; + } + + /* If the misalignment plus the remaining space fulfills our data + * needs, we could just force an alignment to happen. Afterwards, we + * have enough space. But only do this if we're saving a lot of space + * and not moving too much data. Otherwise the space savings are + * probably offset by the time lost in copying. + */ + if (evbuffer_chain_should_realign(chain, datlen)) { + evbuffer_chain_align(chain); + result = chain; + goto ok; + } + + /* At this point, we can either resize the last chunk with space in + * it, use the next chunk after it, or If we add a new chunk, we waste + * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we + * resize, we have to copy chain->off bytes. + */ + + /* Would expanding this chunk be affordable and worthwhile? */ + if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || + chain->off > MAX_TO_COPY_IN_EXPAND || + datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) { + /* It's not worth resizing this chain. Can the next one be + * used? */ + if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { + /* Yes, we can just use the next chain (which should + * be empty. */ + result = chain->next; + goto ok; + } else { + /* No; append a new chain (which will free all + * terminal empty chains.) */ + goto insert_new; + } + } else { + /* Okay, we're going to try to resize this chain: Not doing so + * would waste at least 1/8 of its current allocation, and we + * can do so without having to copy more than + * MAX_TO_COPY_IN_EXPAND bytes. */ + /* figure out how much space we need */ + size_t length = chain->off + datlen; + struct evbuffer_chain *tmp = evbuffer_chain_new(length); + if (tmp == NULL) + goto err; + + /* copy the data over that we had so far */ + tmp->off = chain->off; + memcpy(tmp->buffer, chain->buffer + chain->misalign, + chain->off); + /* fix up the list */ + EVUTIL_ASSERT(*chainp == chain); + result = *chainp = tmp; + + if (buf->last == chain) + buf->last = tmp; + + tmp->next = chain->next; + evbuffer_chain_free(chain); + goto ok; + } + +insert_new: + result = evbuffer_chain_insert_new(buf, datlen); + if (!result) + goto err; +ok: + EVUTIL_ASSERT(result); + EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); +err: + return result; +} + +/* Make sure that datlen bytes are available for writing in the last n + * chains. Never copies or moves data. */ +int +evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n) +{ + struct evbuffer_chain *chain = buf->last, *tmp, *next; + size_t avail; + int used; + + ASSERT_EVBUFFER_LOCKED(buf); + EVUTIL_ASSERT(n >= 2); + + if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { + /* There is no last chunk, or we can't touch the last chunk. + * Just add a new chunk. */ + chain = evbuffer_chain_new(datlen); + if (chain == NULL) + return (-1); + + evbuffer_chain_insert(buf, chain); + return (0); + } + + used = 0; /* number of chains we're using space in. */ + avail = 0; /* how much space they have. */ + /* How many bytes can we stick at the end of buffer as it is? Iterate + * over the chains at the end of the buffer, tring to see how much + * space we have in the first n. */ + for (chain = *buf->last_with_datap; chain; chain = chain->next) { + if (chain->off) { + size_t space = (size_t) CHAIN_SPACE_LEN(chain); + EVUTIL_ASSERT(chain == *buf->last_with_datap); + if (space) { + avail += space; + ++used; + } + } else { + /* No data in chain; realign it. */ + chain->misalign = 0; + avail += chain->buffer_len; + ++used; + } + if (avail >= datlen) { + /* There is already enough space. Just return */ + return (0); + } + if (used == n) + break; + } + + /* There wasn't enough space in the first n chains with space in + * them. Either add a new chain with enough space, or replace all + * empty chains with one that has enough space, depending on n. */ + if (used < n) { + /* The loop ran off the end of the chains before it hit n + * chains; we can add another. */ + EVUTIL_ASSERT(chain == NULL); + + tmp = evbuffer_chain_new(datlen - avail); + if (tmp == NULL) + return (-1); + + buf->last->next = tmp; + buf->last = tmp; + /* (we would only set last_with_data if we added the first + * chain. But if the buffer had no chains, we would have + * just allocated a new chain earlier) */ + return (0); + } else { + /* Nuke _all_ the empty chains. */ + int rmv_all = 0; /* True iff we removed last_with_data. */ + chain = *buf->last_with_datap; + if (!chain->off) { + EVUTIL_ASSERT(chain == buf->first); + rmv_all = 1; + avail = 0; + } else { + /* can't overflow, since only mutable chains have + * huge misaligns. */ + avail = (size_t) CHAIN_SPACE_LEN(chain); + chain = chain->next; + } + + + for (; chain; chain = next) { + next = chain->next; + EVUTIL_ASSERT(chain->off == 0); + evbuffer_chain_free(chain); + } + EVUTIL_ASSERT(datlen >= avail); + tmp = evbuffer_chain_new(datlen - avail); + if (tmp == NULL) { + if (rmv_all) { + ZERO_CHAIN(buf); + } else { + buf->last = *buf->last_with_datap; + (*buf->last_with_datap)->next = NULL; + } + return (-1); + } + + if (rmv_all) { + buf->first = buf->last = tmp; + buf->last_with_datap = &buf->first; + } else { + (*buf->last_with_datap)->next = tmp; + buf->last = tmp; + } + return (0); + } +} + +int +evbuffer_expand(struct evbuffer *buf, size_t datlen) +{ + struct evbuffer_chain *chain; + + EVBUFFER_LOCK(buf); + chain = evbuffer_expand_singlechain(buf, datlen); + EVBUFFER_UNLOCK(buf); + return chain ? 0 : -1; +} + +/* + * Reads data from a file descriptor into a buffer. + */ + +#if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32) +#define USE_IOVEC_IMPL +#endif + +#ifdef USE_IOVEC_IMPL + +#ifdef EVENT__HAVE_SYS_UIO_H +/* number of iovec we use for writev, fragmentation is going to determine + * how much we end up writing */ + +#define DEFAULT_WRITE_IOVEC 128 + +#if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC +#define NUM_WRITE_IOVEC UIO_MAXIOV +#elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC +#define NUM_WRITE_IOVEC IOV_MAX +#else +#define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC +#endif + +#define IOV_TYPE struct iovec +#define IOV_PTR_FIELD iov_base +#define IOV_LEN_FIELD iov_len +#define IOV_LEN_TYPE size_t +#else +#define NUM_WRITE_IOVEC 16 +#define IOV_TYPE WSABUF +#define IOV_PTR_FIELD buf +#define IOV_LEN_FIELD len +#define IOV_LEN_TYPE unsigned long +#endif +#endif +#define NUM_READ_IOVEC 4 + +#define EVBUFFER_MAX_READ 4096 + +/** Helper function to figure out which space to use for reading data into + an evbuffer. Internal use only. + + @param buf The buffer to read into + @param howmuch How much we want to read. + @param vecs An array of two or more iovecs or WSABUFs. + @param n_vecs_avail The length of vecs + @param chainp A pointer to a variable to hold the first chain we're + reading into. + @param exact Boolean: if true, we do not provide more than 'howmuch' + space in the vectors, even if more space is available. + @return The number of buffers we're using. + */ +int +evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, + struct evbuffer_iovec *vecs, int n_vecs_avail, + struct evbuffer_chain ***chainp, int exact) +{ + struct evbuffer_chain *chain; + struct evbuffer_chain **firstchainp; + size_t so_far; + int i; + ASSERT_EVBUFFER_LOCKED(buf); + + if (howmuch < 0) + return -1; + + so_far = 0; + /* Let firstchain be the first chain with any space on it */ + firstchainp = buf->last_with_datap; + EVUTIL_ASSERT(*firstchainp); + if (CHAIN_SPACE_LEN(*firstchainp) == 0) { + firstchainp = &(*firstchainp)->next; + } + + chain = *firstchainp; + EVUTIL_ASSERT(chain); + for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { + size_t avail = (size_t) CHAIN_SPACE_LEN(chain); + if (avail > (howmuch - so_far) && exact) + avail = howmuch - so_far; + vecs[i].iov_base = (void *)CHAIN_SPACE_PTR(chain); + vecs[i].iov_len = avail; + so_far += avail; + chain = chain->next; + } + + *chainp = firstchainp; + return i; +} + +static int +get_n_bytes_readable_on_socket(evutil_socket_t fd) +{ +#if defined(FIONREAD) && defined(_WIN32) + unsigned long lng = EVBUFFER_MAX_READ; + if (ioctlsocket(fd, FIONREAD, &lng) < 0) + return -1; + /* Can overflow, but mostly harmlessly. XXXX */ + return (int)lng; +#elif defined(FIONREAD) + int n = EVBUFFER_MAX_READ; + if (ioctl(fd, FIONREAD, &n) < 0) + return -1; + return n; +#else + return EVBUFFER_MAX_READ; +#endif +} + +/* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t + * as howmuch? */ +int +evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) +{ + struct evbuffer_chain **chainp; + int n; + int result; + +#ifdef USE_IOVEC_IMPL + int nvecs, i, remaining; +#else + struct evbuffer_chain *chain; + unsigned char *p; +#endif + + EVBUFFER_LOCK(buf); + + if (buf->freeze_end) { + result = -1; + goto done; + } + + n = get_n_bytes_readable_on_socket(fd); + if (n <= 0 || n > EVBUFFER_MAX_READ) + n = EVBUFFER_MAX_READ; + if (howmuch < 0 || howmuch > n) + howmuch = n; + +#ifdef USE_IOVEC_IMPL + /* Since we can use iovecs, we're willing to use the last + * NUM_READ_IOVEC chains. */ + if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { + result = -1; + goto done; + } else { + IOV_TYPE vecs[NUM_READ_IOVEC]; +#ifdef EVBUFFER_IOVEC_IS_NATIVE_ + nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs, + NUM_READ_IOVEC, &chainp, 1); +#else + /* We aren't using the native struct iovec. Therefore, + we are on win32. */ + struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; + nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2, + &chainp, 1); + + for (i=0; i < nvecs; ++i) + WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); +#endif + +#ifdef _WIN32 + { + DWORD bytesRead; + DWORD flags=0; + if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { + /* The read failed. It might be a close, + * or it might be an error. */ + if (WSAGetLastError() == WSAECONNABORTED) + n = 0; + else + n = -1; + } else + n = bytesRead; + } +#else + n = readv(fd, vecs, nvecs); +#endif + } + +#else /*!USE_IOVEC_IMPL*/ + /* If we don't have FIONREAD, we might waste some space here */ + /* XXX we _will_ waste some space here if there is any space left + * over on buf->last. */ + if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { + result = -1; + goto done; + } + + /* We can append new data at this point */ + p = chain->buffer + chain->misalign + chain->off; + +#ifndef _WIN32 + n = read(fd, p, howmuch); +#else + n = recv(fd, p, howmuch, 0); +#endif +#endif /* USE_IOVEC_IMPL */ + + if (n == -1) { + result = -1; + goto done; + } + if (n == 0) { + result = 0; + goto done; + } + +#ifdef USE_IOVEC_IMPL + remaining = n; + for (i=0; i < nvecs; ++i) { + /* can't overflow, since only mutable chains have + * huge misaligns. */ + size_t space = (size_t) CHAIN_SPACE_LEN(*chainp); + /* XXXX This is a kludge that can waste space in perverse + * situations. */ + if (space > EVBUFFER_CHAIN_MAX) + space = EVBUFFER_CHAIN_MAX; + if ((ev_ssize_t)space < remaining) { + (*chainp)->off += space; + remaining -= (int)space; + } else { + (*chainp)->off += remaining; + buf->last_with_datap = chainp; + break; + } + chainp = &(*chainp)->next; + } +#else + chain->off += n; + advance_last_with_data(buf); +#endif + buf->total_len += n; + buf->n_add_for_cb += n; + + /* Tell someone about changes in this buffer */ + evbuffer_invoke_callbacks_(buf); + result = n; +done: + EVBUFFER_UNLOCK(buf); + return result; +} + +#ifdef USE_IOVEC_IMPL +static inline int +evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, + ev_ssize_t howmuch) +{ + IOV_TYPE iov[NUM_WRITE_IOVEC]; + struct evbuffer_chain *chain = buffer->first; + int n, i = 0; + + if (howmuch < 0) + return -1; + + ASSERT_EVBUFFER_LOCKED(buffer); + /* XXX make this top out at some maximal data length? if the + * buffer has (say) 1MB in it, split over 128 chains, there's + * no way it all gets written in one go. */ + while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { +#ifdef USE_SENDFILE + /* we cannot write the file info via writev */ + if (chain->flags & EVBUFFER_SENDFILE) + break; +#endif + iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); + if ((size_t)howmuch >= chain->off) { + /* XXXcould be problematic when windows supports mmap*/ + iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; + howmuch -= chain->off; + } else { + /* XXXcould be problematic when windows supports mmap*/ + iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; + break; + } + chain = chain->next; + } + if (! i) + return 0; + +#ifdef _WIN32 + { + DWORD bytesSent; + if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) + n = -1; + else + n = bytesSent; + } +#else + n = writev(fd, iov, i); +#endif + return (n); +} +#endif + +#ifdef USE_SENDFILE +static inline int +evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, + ev_ssize_t howmuch) +{ + struct evbuffer_chain *chain = buffer->first; + struct evbuffer_chain_file_segment *info = + EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, + chain); + const int source_fd = info->segment->fd; +#if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) + int res; + ev_off_t len = chain->off; +#elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) + ev_ssize_t res; + off_t offset = chain->misalign; +#endif + + ASSERT_EVBUFFER_LOCKED(buffer); + +#if defined(SENDFILE_IS_MACOSX) + res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); + if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) + return (-1); + + return (len); +#elif defined(SENDFILE_IS_FREEBSD) + res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); + if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) + return (-1); + + return (len); +#elif defined(SENDFILE_IS_LINUX) + /* TODO(niels): implement splice */ + res = sendfile(dest_fd, source_fd, &offset, chain->off); + if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { + /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ + return (0); + } + return (res); +#elif defined(SENDFILE_IS_SOLARIS) + { + const off_t offset_orig = offset; + res = sendfile(dest_fd, source_fd, &offset, chain->off); + if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { + if (offset - offset_orig) + return offset - offset_orig; + /* if this is EAGAIN or EINTR and no bytes were + * written, return 0 */ + return (0); + } + return (res); + } +#endif +} +#endif + +int +evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, + ev_ssize_t howmuch) +{ + int n = -1; + + EVBUFFER_LOCK(buffer); + + if (buffer->freeze_start) { + goto done; + } + + if (howmuch < 0 || (size_t)howmuch > buffer->total_len) + howmuch = buffer->total_len; + + if (howmuch > 0) { +#ifdef USE_SENDFILE + struct evbuffer_chain *chain = buffer->first; + if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) + n = evbuffer_write_sendfile(buffer, fd, howmuch); + else { +#endif +#ifdef USE_IOVEC_IMPL + n = evbuffer_write_iovec(buffer, fd, howmuch); +#elif defined(_WIN32) + /* XXX(nickm) Don't disable this code until we know if + * the WSARecv code above works. */ + void *p = evbuffer_pullup(buffer, howmuch); + EVUTIL_ASSERT(p || !howmuch); + n = send(fd, p, howmuch, 0); +#else + void *p = evbuffer_pullup(buffer, howmuch); + EVUTIL_ASSERT(p || !howmuch); + n = write(fd, p, howmuch); +#endif +#ifdef USE_SENDFILE + } +#endif + } + + if (n > 0) + evbuffer_drain(buffer, n); + +done: + EVBUFFER_UNLOCK(buffer); + return (n); +} + +int +evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) +{ + return evbuffer_write_atmost(buffer, fd, -1); +} + +unsigned char * +evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) +{ + unsigned char *search; + struct evbuffer_ptr ptr; + + EVBUFFER_LOCK(buffer); + + ptr = evbuffer_search(buffer, (const char *)what, len, NULL); + if (ptr.pos < 0) { + search = NULL; + } else { + search = evbuffer_pullup(buffer, ptr.pos + len); + if (search) + search += ptr.pos; + } + EVBUFFER_UNLOCK(buffer); + return search; +} + +/* Subract howfar from the position of pos within + * buf. Returns 0 on success, -1 on failure. + * + * This isn't exposed yet, because of potential inefficiency issues. + * Maybe it should be. */ +static int +evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, + size_t howfar) +{ + if (pos->pos < 0) + return -1; + if (howfar > (size_t)pos->pos) + return -1; + if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { + pos->internal_.pos_in_chain -= howfar; + pos->pos -= howfar; + return 0; + } else { + const size_t newpos = pos->pos - howfar; + /* Here's the inefficient part: it walks over the + * chains until we hit newpos. */ + return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET); + } +} + +int +evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, + size_t position, enum evbuffer_ptr_how how) +{ + size_t left = position; + struct evbuffer_chain *chain = NULL; + int result = 0; + + EVBUFFER_LOCK(buf); + + switch (how) { + case EVBUFFER_PTR_SET: + chain = buf->first; + pos->pos = position; + position = 0; + break; + case EVBUFFER_PTR_ADD: + /* this avoids iterating over all previous chains if + we just want to advance the position */ + if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) { + EVBUFFER_UNLOCK(buf); + return -1; + } + chain = pos->internal_.chain; + pos->pos += position; + position = pos->internal_.pos_in_chain; + break; + } + + EVUTIL_ASSERT(EV_SIZE_MAX - left >= position); + while (chain && position + left >= chain->off) { + left -= chain->off - position; + chain = chain->next; + position = 0; + } + if (chain) { + pos->internal_.chain = chain; + pos->internal_.pos_in_chain = position + left; + } else if (left == 0) { + /* The first byte in the (nonexistent) chain after the last chain */ + pos->internal_.chain = NULL; + pos->internal_.pos_in_chain = 0; + } else { + PTR_NOT_FOUND(pos); + result = -1; + } + + EVBUFFER_UNLOCK(buf); + + return result; +} + +/** + Compare the bytes in buf at position pos to the len bytes in mem. Return + less than 0, 0, or greater than 0 as memcmp. + */ +static int +evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, + const char *mem, size_t len) +{ + struct evbuffer_chain *chain; + size_t position; + int r; + + ASSERT_EVBUFFER_LOCKED(buf); + + if (pos->pos < 0 || + EV_SIZE_MAX - len < (size_t)pos->pos || + pos->pos + len > buf->total_len) + return -1; + + chain = pos->internal_.chain; + position = pos->internal_.pos_in_chain; + while (len && chain) { + size_t n_comparable; + if (len + position > chain->off) + n_comparable = chain->off - position; + else + n_comparable = len; + r = memcmp(chain->buffer + chain->misalign + position, mem, + n_comparable); + if (r) + return r; + mem += n_comparable; + len -= n_comparable; + position = 0; + chain = chain->next; + } + + return 0; +} + +struct evbuffer_ptr +evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) +{ + return evbuffer_search_range(buffer, what, len, start, NULL); +} + +struct evbuffer_ptr +evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) +{ + struct evbuffer_ptr pos; + struct evbuffer_chain *chain, *last_chain = NULL; + const unsigned char *p; + char first; + + EVBUFFER_LOCK(buffer); + + if (start) { + memcpy(&pos, start, sizeof(pos)); + chain = pos.internal_.chain; + } else { + pos.pos = 0; + chain = pos.internal_.chain = buffer->first; + pos.internal_.pos_in_chain = 0; + } + + if (end) + last_chain = end->internal_.chain; + + if (!len || len > EV_SSIZE_MAX) + goto done; + + first = what[0]; + + while (chain) { + const unsigned char *start_at = + chain->buffer + chain->misalign + + pos.internal_.pos_in_chain; + p = memchr(start_at, first, + chain->off - pos.internal_.pos_in_chain); + if (p) { + pos.pos += p - start_at; + pos.internal_.pos_in_chain += p - start_at; + if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { + if (end && pos.pos + (ev_ssize_t)len > end->pos) + goto not_found; + else + goto done; + } + ++pos.pos; + ++pos.internal_.pos_in_chain; + if (pos.internal_.pos_in_chain == chain->off) { + chain = pos.internal_.chain = chain->next; + pos.internal_.pos_in_chain = 0; + } + } else { + if (chain == last_chain) + goto not_found; + pos.pos += chain->off - pos.internal_.pos_in_chain; + chain = pos.internal_.chain = chain->next; + pos.internal_.pos_in_chain = 0; + } + } + +not_found: + PTR_NOT_FOUND(&pos); +done: + EVBUFFER_UNLOCK(buffer); + return pos; +} + +int +evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, + struct evbuffer_ptr *start_at, + struct evbuffer_iovec *vec, int n_vec) +{ + struct evbuffer_chain *chain; + int idx = 0; + ev_ssize_t len_so_far = 0; + + /* Avoid locking in trivial edge cases */ + if (start_at && start_at->internal_.chain == NULL) + return 0; + + EVBUFFER_LOCK(buffer); + + if (start_at) { + chain = start_at->internal_.chain; + len_so_far = chain->off + - start_at->internal_.pos_in_chain; + idx = 1; + if (n_vec > 0) { + vec[0].iov_base = (void *)(chain->buffer + chain->misalign + + start_at->internal_.pos_in_chain); + vec[0].iov_len = len_so_far; + } + chain = chain->next; + } else { + chain = buffer->first; + } + + if (n_vec == 0 && len < 0) { + /* If no vectors are provided and they asked for "everything", + * pretend they asked for the actual available amount. */ + len = buffer->total_len; + if (start_at) { + len -= start_at->pos; + } + } + + while (chain) { + if (len >= 0 && len_so_far >= len) + break; + if (idxbuffer + chain->misalign); + vec[idx].iov_len = chain->off; + } else if (len<0) { + break; + } + ++idx; + len_so_far += chain->off; + chain = chain->next; + } + + EVBUFFER_UNLOCK(buffer); + + return idx; +} + + +int +evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) +{ + char *buffer; + size_t space; + int sz, result = -1; + va_list aq; + struct evbuffer_chain *chain; + + + EVBUFFER_LOCK(buf); + + if (buf->freeze_end) { + goto done; + } + + /* make sure that at least some space is available */ + if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) + goto done; + + for (;;) { +#if 0 + size_t used = chain->misalign + chain->off; + buffer = (char *)chain->buffer + chain->misalign + chain->off; + EVUTIL_ASSERT(chain->buffer_len >= used); + space = chain->buffer_len - used; +#endif + buffer = (char*) CHAIN_SPACE_PTR(chain); + space = (size_t) CHAIN_SPACE_LEN(chain); + +#ifndef va_copy +#define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) +#endif + va_copy(aq, ap); + + sz = evutil_vsnprintf(buffer, space, fmt, aq); + + va_end(aq); + + if (sz < 0) + goto done; + if (INT_MAX >= EVBUFFER_CHAIN_MAX && + (size_t)sz >= EVBUFFER_CHAIN_MAX) + goto done; + if ((size_t)sz < space) { + chain->off += sz; + buf->total_len += sz; + buf->n_add_for_cb += sz; + + advance_last_with_data(buf); + evbuffer_invoke_callbacks_(buf); + result = sz; + goto done; + } + if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) + goto done; + } + /* NOTREACHED */ + +done: + EVBUFFER_UNLOCK(buf); + return result; +} + +int +evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) +{ + int res = -1; + va_list ap; + + va_start(ap, fmt); + res = evbuffer_add_vprintf(buf, fmt, ap); + va_end(ap); + + return (res); +} + +int +evbuffer_add_reference(struct evbuffer *outbuf, + const void *data, size_t datlen, + evbuffer_ref_cleanup_cb cleanupfn, void *extra) +{ + struct evbuffer_chain *chain; + struct evbuffer_chain_reference *info; + int result = -1; + + chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); + if (!chain) + return (-1); + chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; + chain->buffer = (unsigned char *)data; + chain->buffer_len = datlen; + chain->off = datlen; + + info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); + info->cleanupfn = cleanupfn; + info->extra = extra; + + EVBUFFER_LOCK(outbuf); + if (outbuf->freeze_end) { + /* don't call chain_free; we do not want to actually invoke + * the cleanup function */ + mm_free(chain); + goto done; + } + evbuffer_chain_insert(outbuf, chain); + outbuf->n_add_for_cb += datlen; + + evbuffer_invoke_callbacks_(outbuf); + + result = 0; +done: + EVBUFFER_UNLOCK(outbuf); + + return result; +} + +/* TODO(niels): we may want to add to automagically convert to mmap, in + * case evbuffer_remove() or evbuffer_pullup() are being used. + */ +struct evbuffer_file_segment * +evbuffer_file_segment_new( + int fd, ev_off_t offset, ev_off_t length, unsigned flags) +{ + struct evbuffer_file_segment *seg = + mm_calloc(sizeof(struct evbuffer_file_segment), 1); + if (!seg) + return NULL; + seg->refcnt = 1; + seg->fd = fd; + seg->flags = flags; + seg->file_offset = offset; + seg->cleanup_cb = NULL; + seg->cleanup_cb_arg = NULL; +#ifdef _WIN32 +#ifndef lseek +#define lseek _lseeki64 +#endif +#ifndef fstat +#define fstat _fstat +#endif +#ifndef stat +#define stat _stat +#endif +#endif + if (length == -1) { + struct stat st; + if (fstat(fd, &st) < 0) + goto err; + length = st.st_size; + } + seg->length = length; + + if (offset < 0 || length < 0 || + ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) || + (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length)) + goto err; + +#if defined(USE_SENDFILE) + if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { + seg->can_sendfile = 1; + goto done; + } +#endif + + if (evbuffer_file_segment_materialize(seg)<0) + goto err; + +#if defined(USE_SENDFILE) +done: +#endif + if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { + EVTHREAD_ALLOC_LOCK(seg->lock, 0); + } + return seg; +err: + mm_free(seg); + return NULL; +} + +#ifdef EVENT__HAVE_MMAP +static long +get_page_size(void) +{ +#ifdef SC_PAGE_SIZE + return sysconf(SC_PAGE_SIZE); +#elif defined(_SC_PAGE_SIZE) + return sysconf(_SC_PAGE_SIZE); +#else + return 1; +#endif +} +#endif + +/* DOCDOC */ +/* Requires lock */ +static int +evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg) +{ + const unsigned flags = seg->flags; + const int fd = seg->fd; + const ev_off_t length = seg->length; + const ev_off_t offset = seg->file_offset; + + if (seg->contents) + return 0; /* already materialized */ + +#if defined(EVENT__HAVE_MMAP) + if (!(flags & EVBUF_FS_DISABLE_MMAP)) { + off_t offset_rounded = 0, offset_leftover = 0; + void *mapped; + if (offset) { + /* mmap implementations don't generally like us + * to have an offset that isn't a round */ + long page_size = get_page_size(); + if (page_size == -1) + goto err; + offset_leftover = offset % page_size; + offset_rounded = offset - offset_leftover; + } + mapped = mmap(NULL, length + offset_leftover, + PROT_READ, +#ifdef MAP_NOCACHE + MAP_NOCACHE | /* ??? */ +#endif +#ifdef MAP_FILE + MAP_FILE | +#endif + MAP_PRIVATE, + fd, offset_rounded); + if (mapped == MAP_FAILED) { + event_warn("%s: mmap(%d, %d, %zu) failed", + __func__, fd, 0, (size_t)(offset + length)); + } else { + seg->mapping = mapped; + seg->contents = (char*)mapped+offset_leftover; + seg->mmap_offset = 0; + seg->is_mapping = 1; + goto done; + } + } +#endif +#ifdef _WIN32 + if (!(flags & EVBUF_FS_DISABLE_MMAP)) { + intptr_t h = _get_osfhandle(fd); + HANDLE m; + ev_uint64_t total_size = length+offset; + if ((HANDLE)h == INVALID_HANDLE_VALUE) + goto err; + m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, + (total_size >> 32), total_size & 0xfffffffful, + NULL); + if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ + seg->mapping_handle = m; + seg->mmap_offset = offset; + seg->is_mapping = 1; + goto done; + } + } +#endif + { + ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; + ev_off_t read_so_far = 0; + char *mem; + int e; + ev_ssize_t n = 0; + if (!(mem = mm_malloc(length))) + goto err; + if (start_pos < 0) { + mm_free(mem); + goto err; + } + if (lseek(fd, offset, SEEK_SET) < 0) { + mm_free(mem); + goto err; + } + while (read_so_far < length) { + n = read(fd, mem+read_so_far, length-read_so_far); + if (n <= 0) + break; + read_so_far += n; + } + + e = errno; + pos = lseek(fd, start_pos, SEEK_SET); + if (n < 0 || (n == 0 && length > read_so_far)) { + mm_free(mem); + errno = e; + goto err; + } else if (pos < 0) { + mm_free(mem); + goto err; + } + + seg->contents = mem; + } + +done: + return 0; +err: + return -1; +} + +void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg, + evbuffer_file_segment_cleanup_cb cb, void* arg) +{ + EVUTIL_ASSERT(seg->refcnt > 0); + seg->cleanup_cb = cb; + seg->cleanup_cb_arg = arg; +} + +void +evbuffer_file_segment_free(struct evbuffer_file_segment *seg) +{ + int refcnt; + EVLOCK_LOCK(seg->lock, 0); + refcnt = --seg->refcnt; + EVLOCK_UNLOCK(seg->lock, 0); + if (refcnt > 0) + return; + EVUTIL_ASSERT(refcnt == 0); + + if (seg->is_mapping) { +#ifdef _WIN32 + CloseHandle(seg->mapping_handle); +#elif defined (EVENT__HAVE_MMAP) + off_t offset_leftover; + offset_leftover = seg->file_offset % get_page_size(); + if (munmap(seg->mapping, seg->length + offset_leftover) == -1) + event_warn("%s: munmap failed", __func__); +#endif + } else if (seg->contents) { + mm_free(seg->contents); + } + + if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { + close(seg->fd); + } + + if (seg->cleanup_cb) { + (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg, + seg->flags, seg->cleanup_cb_arg); + seg->cleanup_cb = NULL; + seg->cleanup_cb_arg = NULL; + } + + EVTHREAD_FREE_LOCK(seg->lock, 0); + mm_free(seg); +} + +int +evbuffer_add_file_segment(struct evbuffer *buf, + struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) +{ + struct evbuffer_chain *chain; + struct evbuffer_chain_file_segment *extra; + int can_use_sendfile = 0; + + EVBUFFER_LOCK(buf); + EVLOCK_LOCK(seg->lock, 0); + if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { + can_use_sendfile = 1; + } else { + if (!seg->contents) { + if (evbuffer_file_segment_materialize(seg)<0) { + EVLOCK_UNLOCK(seg->lock, 0); + EVBUFFER_UNLOCK(buf); + return -1; + } + } + } + ++seg->refcnt; + EVLOCK_UNLOCK(seg->lock, 0); + + if (buf->freeze_end) + goto err; + + if (length < 0) { + if (offset > seg->length) + goto err; + length = seg->length - offset; + } + + /* Can we actually add this? */ + if (offset+length > seg->length) + goto err; + + chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); + if (!chain) + goto err; + extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); + + chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; + if (can_use_sendfile && seg->can_sendfile) { + chain->flags |= EVBUFFER_SENDFILE; + chain->misalign = seg->file_offset + offset; + chain->off = length; + chain->buffer_len = chain->misalign + length; + } else if (seg->is_mapping) { +#ifdef _WIN32 + ev_uint64_t total_offset = seg->mmap_offset+offset; + ev_uint64_t offset_rounded=0, offset_remaining=0; + LPVOID data; + if (total_offset) { + SYSTEM_INFO si; + memset(&si, 0, sizeof(si)); /* cargo cult */ + GetSystemInfo(&si); + offset_remaining = total_offset % si.dwAllocationGranularity; + offset_rounded = total_offset - offset_remaining; + } + data = MapViewOfFile( + seg->mapping_handle, + FILE_MAP_READ, + offset_rounded >> 32, + offset_rounded & 0xfffffffful, + length + offset_remaining); + if (data == NULL) { + mm_free(chain); + goto err; + } + chain->buffer = (unsigned char*) data; + chain->buffer_len = length+offset_remaining; + chain->misalign = offset_remaining; + chain->off = length; +#else + chain->buffer = (unsigned char*)(seg->contents + offset); + chain->buffer_len = length; + chain->off = length; +#endif + } else { + chain->buffer = (unsigned char*)(seg->contents + offset); + chain->buffer_len = length; + chain->off = length; + } + + extra->segment = seg; + buf->n_add_for_cb += length; + evbuffer_chain_insert(buf, chain); + + evbuffer_invoke_callbacks_(buf); + + EVBUFFER_UNLOCK(buf); + + return 0; +err: + EVBUFFER_UNLOCK(buf); + evbuffer_file_segment_free(seg); /* Lowers the refcount */ + return -1; +} + +int +evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) +{ + struct evbuffer_file_segment *seg; + unsigned flags = EVBUF_FS_CLOSE_ON_FREE; + int r; + + seg = evbuffer_file_segment_new(fd, offset, length, flags); + if (!seg) + return -1; + r = evbuffer_add_file_segment(buf, seg, 0, length); + if (r == 0) + evbuffer_file_segment_free(seg); + return r; +} + +int +evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) +{ + EVBUFFER_LOCK(buffer); + + if (!LIST_EMPTY(&buffer->callbacks)) + evbuffer_remove_all_callbacks(buffer); + + if (cb) { + struct evbuffer_cb_entry *ent = + evbuffer_add_cb(buffer, NULL, cbarg); + if (!ent) { + EVBUFFER_UNLOCK(buffer); + return -1; + } + ent->cb.cb_obsolete = cb; + ent->flags |= EVBUFFER_CB_OBSOLETE; + } + EVBUFFER_UNLOCK(buffer); + return 0; +} + +struct evbuffer_cb_entry * +evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) +{ + struct evbuffer_cb_entry *e; + if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) + return NULL; + EVBUFFER_LOCK(buffer); + e->cb.cb_func = cb; + e->cbarg = cbarg; + e->flags = EVBUFFER_CB_ENABLED; + LIST_INSERT_HEAD(&buffer->callbacks, e, next); + EVBUFFER_UNLOCK(buffer); + return e; +} + +int +evbuffer_remove_cb_entry(struct evbuffer *buffer, + struct evbuffer_cb_entry *ent) +{ + EVBUFFER_LOCK(buffer); + LIST_REMOVE(ent, next); + EVBUFFER_UNLOCK(buffer); + mm_free(ent); + return 0; +} + +int +evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) +{ + struct evbuffer_cb_entry *cbent; + int result = -1; + EVBUFFER_LOCK(buffer); + LIST_FOREACH(cbent, &buffer->callbacks, next) { + if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { + result = evbuffer_remove_cb_entry(buffer, cbent); + goto done; + } + } +done: + EVBUFFER_UNLOCK(buffer); + return result; +} + +int +evbuffer_cb_set_flags(struct evbuffer *buffer, + struct evbuffer_cb_entry *cb, ev_uint32_t flags) +{ + /* the user isn't allowed to mess with these. */ + flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; + EVBUFFER_LOCK(buffer); + cb->flags |= flags; + EVBUFFER_UNLOCK(buffer); + return 0; +} + +int +evbuffer_cb_clear_flags(struct evbuffer *buffer, + struct evbuffer_cb_entry *cb, ev_uint32_t flags) +{ + /* the user isn't allowed to mess with these. */ + flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; + EVBUFFER_LOCK(buffer); + cb->flags &= ~flags; + EVBUFFER_UNLOCK(buffer); + return 0; +} + +int +evbuffer_freeze(struct evbuffer *buffer, int start) +{ + EVBUFFER_LOCK(buffer); + if (start) + buffer->freeze_start = 1; + else + buffer->freeze_end = 1; + EVBUFFER_UNLOCK(buffer); + return 0; +} + +int +evbuffer_unfreeze(struct evbuffer *buffer, int start) +{ + EVBUFFER_LOCK(buffer); + if (start) + buffer->freeze_start = 0; + else + buffer->freeze_end = 0; + EVBUFFER_UNLOCK(buffer); + return 0; +} + +#if 0 +void +evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) +{ + if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { + cb->size_before_suspend = evbuffer_get_length(buffer); + cb->flags |= EVBUFFER_CB_SUSPENDED; + } +} + +void +evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) +{ + if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { + unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); + size_t sz = cb->size_before_suspend; + cb->flags &= ~(EVBUFFER_CB_SUSPENDED| + EVBUFFER_CB_CALL_ON_UNSUSPEND); + cb->size_before_suspend = 0; + if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { + cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); + } + } +} +#endif + +int +evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs, + int max_cbs) +{ + int r = 0; + EVBUFFER_LOCK(buffer); + if (buffer->deferred_cbs) { + if (max_cbs < 1) { + r = -1; + goto done; + } + cbs[0] = &buffer->deferred; + r = 1; + } +done: + EVBUFFER_UNLOCK(buffer); + return r; +} diff --git a/probe-busybox/libevent-2.1.11-stable/buffer_iocp.c b/probe-busybox/libevent-2.1.11-stable/buffer_iocp.c new file mode 100644 index 00000000..2af0c49c --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/buffer_iocp.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + @file buffer_iocp.c + + This module implements overlapped read and write functions for evbuffer + objects on Windows. +*/ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include "event2/buffer.h" +#include "event2/buffer_compat.h" +#include "event2/util.h" +#include "event2/thread.h" +#include "util-internal.h" +#include "evthread-internal.h" +#include "evbuffer-internal.h" +#include "iocp-internal.h" +#include "mm-internal.h" + +#include +#include +#include +#include + +#define MAX_WSABUFS 16 + +/** An evbuffer that can handle overlapped IO. */ +struct evbuffer_overlapped { + struct evbuffer buffer; + /** The socket that we're doing overlapped IO on. */ + evutil_socket_t fd; + + /** pending I/O type */ + unsigned read_in_progress : 1; + unsigned write_in_progress : 1; + + /** The first pinned chain in the buffer. */ + struct evbuffer_chain *first_pinned; + + /** How many chains are pinned; how many of the fields in buffers + * are we using. */ + int n_buffers; + WSABUF buffers[MAX_WSABUFS]; +}; + +/** Given an evbuffer, return the correponding evbuffer structure, or NULL if + * the evbuffer isn't overlapped. */ +static inline struct evbuffer_overlapped * +upcast_evbuffer(struct evbuffer *buf) +{ + if (!buf || !buf->is_overlapped) + return NULL; + return EVUTIL_UPCAST(buf, struct evbuffer_overlapped, buffer); +} + +/** Unpin all the chains noted as pinned in 'eo'. */ +static void +pin_release(struct evbuffer_overlapped *eo, unsigned flag) +{ + int i; + struct evbuffer_chain *next, *chain = eo->first_pinned; + + for (i = 0; i < eo->n_buffers; ++i) { + EVUTIL_ASSERT(chain); + next = chain->next; + evbuffer_chain_unpin_(chain, flag); + chain = next; + } +} + +void +evbuffer_commit_read_(struct evbuffer *evbuf, ev_ssize_t nBytes) +{ + struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf); + struct evbuffer_chain **chainp; + size_t remaining, len; + unsigned i; + + EVBUFFER_LOCK(evbuf); + EVUTIL_ASSERT(buf->read_in_progress && !buf->write_in_progress); + EVUTIL_ASSERT(nBytes >= 0); /* XXXX Can this be false? */ + + evbuffer_unfreeze(evbuf, 0); + + chainp = evbuf->last_with_datap; + if (!((*chainp)->flags & EVBUFFER_MEM_PINNED_R)) + chainp = &(*chainp)->next; + remaining = nBytes; + for (i = 0; remaining > 0 && i < (unsigned)buf->n_buffers; ++i) { + EVUTIL_ASSERT(*chainp); + len = buf->buffers[i].len; + if (remaining < len) + len = remaining; + (*chainp)->off += len; + evbuf->last_with_datap = chainp; + remaining -= len; + chainp = &(*chainp)->next; + } + + pin_release(buf, EVBUFFER_MEM_PINNED_R); + + buf->read_in_progress = 0; + + evbuf->total_len += nBytes; + evbuf->n_add_for_cb += nBytes; + + evbuffer_invoke_callbacks_(evbuf); + + evbuffer_decref_and_unlock_(evbuf); +} + +void +evbuffer_commit_write_(struct evbuffer *evbuf, ev_ssize_t nBytes) +{ + struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf); + + EVBUFFER_LOCK(evbuf); + EVUTIL_ASSERT(buf->write_in_progress && !buf->read_in_progress); + evbuffer_unfreeze(evbuf, 1); + evbuffer_drain(evbuf, nBytes); + pin_release(buf,EVBUFFER_MEM_PINNED_W); + buf->write_in_progress = 0; + evbuffer_decref_and_unlock_(evbuf); +} + +struct evbuffer * +evbuffer_overlapped_new_(evutil_socket_t fd) +{ + struct evbuffer_overlapped *evo; + + evo = mm_calloc(1, sizeof(struct evbuffer_overlapped)); + if (!evo) + return NULL; + + LIST_INIT(&evo->buffer.callbacks); + evo->buffer.refcnt = 1; + evo->buffer.last_with_datap = &evo->buffer.first; + + evo->buffer.is_overlapped = 1; + evo->fd = fd; + + return &evo->buffer; +} + +int +evbuffer_launch_write_(struct evbuffer *buf, ev_ssize_t at_most, + struct event_overlapped *ol) +{ + struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf); + int r = -1; + int i; + struct evbuffer_chain *chain; + DWORD bytesSent; + + if (!buf) { + /* No buffer, or it isn't overlapped */ + return -1; + } + + EVBUFFER_LOCK(buf); + EVUTIL_ASSERT(!buf_o->read_in_progress); + if (buf->freeze_start || buf_o->write_in_progress) + goto done; + if (!buf->total_len) { + /* Nothing to write */ + r = 0; + goto done; + } else if (at_most < 0 || (size_t)at_most > buf->total_len) { + at_most = buf->total_len; + } + evbuffer_freeze(buf, 1); + + buf_o->first_pinned = NULL; + buf_o->n_buffers = 0; + memset(buf_o->buffers, 0, sizeof(buf_o->buffers)); + + chain = buf_o->first_pinned = buf->first; + + for (i=0; i < MAX_WSABUFS && chain; ++i, chain=chain->next) { + WSABUF *b = &buf_o->buffers[i]; + b->buf = (char*)( chain->buffer + chain->misalign ); + evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_W); + + if ((size_t)at_most > chain->off) { + /* XXXX Cast is safe for now, since win32 has no + mmaped chains. But later, we need to have this + add more WSAbufs if chain->off is greater than + ULONG_MAX */ + b->len = (unsigned long)chain->off; + at_most -= chain->off; + } else { + b->len = (unsigned long)at_most; + ++i; + break; + } + } + + buf_o->n_buffers = i; + evbuffer_incref_(buf); + if (WSASend(buf_o->fd, buf_o->buffers, i, &bytesSent, 0, + &ol->overlapped, NULL)) { + int error = WSAGetLastError(); + if (error != WSA_IO_PENDING) { + /* An actual error. */ + pin_release(buf_o, EVBUFFER_MEM_PINNED_W); + evbuffer_unfreeze(buf, 1); + evbuffer_free(buf); /* decref */ + goto done; + } + } + + buf_o->write_in_progress = 1; + r = 0; +done: + EVBUFFER_UNLOCK(buf); + return r; +} + +int +evbuffer_launch_read_(struct evbuffer *buf, size_t at_most, + struct event_overlapped *ol) +{ + struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf); + int r = -1, i; + int nvecs; + int npin=0; + struct evbuffer_chain *chain=NULL, **chainp; + DWORD bytesRead; + DWORD flags = 0; + struct evbuffer_iovec vecs[MAX_WSABUFS]; + + if (!buf_o) + return -1; + EVBUFFER_LOCK(buf); + EVUTIL_ASSERT(!buf_o->write_in_progress); + if (buf->freeze_end || buf_o->read_in_progress) + goto done; + + buf_o->first_pinned = NULL; + buf_o->n_buffers = 0; + memset(buf_o->buffers, 0, sizeof(buf_o->buffers)); + + if (evbuffer_expand_fast_(buf, at_most, MAX_WSABUFS) == -1) + goto done; + evbuffer_freeze(buf, 0); + + nvecs = evbuffer_read_setup_vecs_(buf, at_most, + vecs, MAX_WSABUFS, &chainp, 1); + for (i=0;ibuffers[i], + &vecs[i]); + } + + buf_o->n_buffers = nvecs; + buf_o->first_pinned = chain = *chainp; + + npin=0; + for ( ; chain; chain = chain->next) { + evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_R); + ++npin; + } + EVUTIL_ASSERT(npin == nvecs); + + evbuffer_incref_(buf); + if (WSARecv(buf_o->fd, buf_o->buffers, nvecs, &bytesRead, &flags, + &ol->overlapped, NULL)) { + int error = WSAGetLastError(); + if (error != WSA_IO_PENDING) { + /* An actual error. */ + pin_release(buf_o, EVBUFFER_MEM_PINNED_R); + evbuffer_unfreeze(buf, 0); + evbuffer_free(buf); /* decref */ + goto done; + } + } + + buf_o->read_in_progress = 1; + r = 0; +done: + EVBUFFER_UNLOCK(buf); + return r; +} + +evutil_socket_t +evbuffer_overlapped_get_fd_(struct evbuffer *buf) +{ + struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf); + return buf_o ? buf_o->fd : -1; +} + +void +evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd) +{ + struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf); + EVBUFFER_LOCK(buf); + /* XXX is this right?, should it cancel current I/O operations? */ + if (buf_o) + buf_o->fd = fd; + EVBUFFER_UNLOCK(buf); +} diff --git a/probe-busybox/libevent-2.1.11-stable/bufferevent-internal.h b/probe-busybox/libevent-2.1.11-stable/bufferevent-internal.h new file mode 100644 index 00000000..87ab9ad9 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/bufferevent-internal.h @@ -0,0 +1,518 @@ +/* + * Copyright (c) 2008-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef BUFFEREVENT_INTERNAL_H_INCLUDED_ +#define BUFFEREVENT_INTERNAL_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "event2/event-config.h" +#include "event2/event_struct.h" +#include "evconfig-private.h" +#include "event2/util.h" +#include "defer-internal.h" +#include "evthread-internal.h" +#include "event2/thread.h" +#include "ratelim-internal.h" +#include "event2/bufferevent_struct.h" + +#include "ipv6-internal.h" +#ifdef _WIN32 +#include +#endif +#ifdef EVENT__HAVE_NETINET_IN_H +#include +#endif +#ifdef EVENT__HAVE_NETINET_IN6_H +#include +#endif + +/* These flags are reasons that we might be declining to actually enable + reading or writing on a bufferevent. + */ + +/* On a all bufferevents, for reading: used when we have read up to the + watermark value. + + On a filtering bufferevent, for writing: used when the underlying + bufferevent's write buffer has been filled up to its watermark + value. +*/ +#define BEV_SUSPEND_WM 0x01 +/* On a base bufferevent: when we have emptied a bandwidth buckets */ +#define BEV_SUSPEND_BW 0x02 +/* On a base bufferevent: when we have emptied the group's bandwidth bucket. */ +#define BEV_SUSPEND_BW_GROUP 0x04 +/* On a socket bufferevent: can't do any operations while we're waiting for + * name lookup to finish. */ +#define BEV_SUSPEND_LOOKUP 0x08 +/* On a base bufferevent, for reading: used when a filter has choked this + * (underlying) bufferevent because it has stopped reading from it. */ +#define BEV_SUSPEND_FILT_READ 0x10 + +typedef ev_uint16_t bufferevent_suspend_flags; + +struct bufferevent_rate_limit_group { + /** List of all members in the group */ + LIST_HEAD(rlim_group_member_list, bufferevent_private) members; + /** Current limits for the group. */ + struct ev_token_bucket rate_limit; + struct ev_token_bucket_cfg rate_limit_cfg; + + /** True iff we don't want to read from any member of the group.until + * the token bucket refills. */ + unsigned read_suspended : 1; + /** True iff we don't want to write from any member of the group.until + * the token bucket refills. */ + unsigned write_suspended : 1; + /** True iff we were unable to suspend one of the bufferevents in the + * group for reading the last time we tried, and we should try + * again. */ + unsigned pending_unsuspend_read : 1; + /** True iff we were unable to suspend one of the bufferevents in the + * group for writing the last time we tried, and we should try + * again. */ + unsigned pending_unsuspend_write : 1; + + /*@{*/ + /** Total number of bytes read or written in this group since last + * reset. */ + ev_uint64_t total_read; + ev_uint64_t total_written; + /*@}*/ + + /** The number of bufferevents in the group. */ + int n_members; + + /** The smallest number of bytes that any member of the group should + * be limited to read or write at a time. */ + ev_ssize_t min_share; + ev_ssize_t configured_min_share; + + /** Timeout event that goes off once a tick, when the bucket is ready + * to refill. */ + struct event master_refill_event; + + /** Seed for weak random number generator. Protected by 'lock' */ + struct evutil_weakrand_state weakrand_seed; + + /** Lock to protect the members of this group. This lock should nest + * within every bufferevent lock: if you are holding this lock, do + * not assume you can lock another bufferevent. */ + void *lock; +}; + +/** Fields for rate-limiting a single bufferevent. */ +struct bufferevent_rate_limit { + /* Linked-list elements for storing this bufferevent_private in a + * group. + * + * Note that this field is supposed to be protected by the group + * lock */ + LIST_ENTRY(bufferevent_private) next_in_group; + /** The rate-limiting group for this bufferevent, or NULL if it is + * only rate-limited on its own. */ + struct bufferevent_rate_limit_group *group; + + /* This bufferevent's current limits. */ + struct ev_token_bucket limit; + /* Pointer to the rate-limit configuration for this bufferevent. + * Can be shared. XXX reference-count this? */ + struct ev_token_bucket_cfg *cfg; + + /* Timeout event used when one this bufferevent's buckets are + * empty. */ + struct event refill_bucket_event; +}; + +/** Parts of the bufferevent structure that are shared among all bufferevent + * types, but not exposed in bufferevent_struct.h. */ +struct bufferevent_private { + /** The underlying bufferevent structure. */ + struct bufferevent bev; + + /** Evbuffer callback to enforce watermarks on input. */ + struct evbuffer_cb_entry *read_watermarks_cb; + + /** If set, we should free the lock when we free the bufferevent. */ + unsigned own_lock : 1; + + /** Flag: set if we have deferred callbacks and a read callback is + * pending. */ + unsigned readcb_pending : 1; + /** Flag: set if we have deferred callbacks and a write callback is + * pending. */ + unsigned writecb_pending : 1; + /** Flag: set if we are currently busy connecting. */ + unsigned connecting : 1; + /** Flag: set if a connect failed prematurely; this is a hack for + * getting around the bufferevent abstraction. */ + unsigned connection_refused : 1; + /** Set to the events pending if we have deferred callbacks and + * an events callback is pending. */ + short eventcb_pending; + + /** If set, read is suspended until one or more conditions are over. + * The actual value here is a bitfield of those conditions; see the + * BEV_SUSPEND_* flags above. */ + bufferevent_suspend_flags read_suspended; + + /** If set, writing is suspended until one or more conditions are over. + * The actual value here is a bitfield of those conditions; see the + * BEV_SUSPEND_* flags above. */ + bufferevent_suspend_flags write_suspended; + + /** Set to the current socket errno if we have deferred callbacks and + * an events callback is pending. */ + int errno_pending; + + /** The DNS error code for bufferevent_socket_connect_hostname */ + int dns_error; + + /** Used to implement deferred callbacks */ + struct event_callback deferred; + + /** The options this bufferevent was constructed with */ + enum bufferevent_options options; + + /** Current reference count for this bufferevent. */ + int refcnt; + + /** Lock for this bufferevent. Shared by the inbuf and the outbuf. + * If NULL, locking is disabled. */ + void *lock; + + /** No matter how big our bucket gets, don't try to read more than this + * much in a single read operation. */ + ev_ssize_t max_single_read; + + /** No matter how big our bucket gets, don't try to write more than this + * much in a single write operation. */ + ev_ssize_t max_single_write; + + /** Rate-limiting information for this bufferevent */ + struct bufferevent_rate_limit *rate_limiting; + + /* Saved conn_addr, to extract IP address from it. + * + * Because some servers may reset/close connection without waiting clients, + * in that case we can't extract IP address even in close_cb. + * So we need to save it, just after we connected to remote server, or + * after resolving (to avoid extra dns requests during retrying, since UDP + * is slow) */ + union { + struct sockaddr_in6 in6; + struct sockaddr_in in; + } conn_address; + + struct evdns_getaddrinfo_request *dns_request; +}; + +/** Possible operations for a control callback. */ +enum bufferevent_ctrl_op { + BEV_CTRL_SET_FD, + BEV_CTRL_GET_FD, + BEV_CTRL_GET_UNDERLYING, + BEV_CTRL_CANCEL_ALL +}; + +/** Possible data types for a control callback */ +union bufferevent_ctrl_data { + void *ptr; + evutil_socket_t fd; +}; + +/** + Implementation table for a bufferevent: holds function pointers and other + information to make the various bufferevent types work. +*/ +struct bufferevent_ops { + /** The name of the bufferevent's type. */ + const char *type; + /** At what offset into the implementation type will we find a + bufferevent structure? + + Example: if the type is implemented as + struct bufferevent_x { + int extra_data; + struct bufferevent bev; + } + then mem_offset should be offsetof(struct bufferevent_x, bev) + */ + off_t mem_offset; + + /** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does + not need to adjust the 'enabled' field. Returns 0 on success, -1 + on failure. + */ + int (*enable)(struct bufferevent *, short); + + /** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does + not need to adjust the 'enabled' field. Returns 0 on success, -1 + on failure. + */ + int (*disable)(struct bufferevent *, short); + + /** Detatches the bufferevent from related data structures. Called as + * soon as its reference count reaches 0. */ + void (*unlink)(struct bufferevent *); + + /** Free any storage and deallocate any extra data or structures used + in this implementation. Called when the bufferevent is + finalized. + */ + void (*destruct)(struct bufferevent *); + + /** Called when the timeouts on the bufferevent have changed.*/ + int (*adj_timeouts)(struct bufferevent *); + + /** Called to flush data. */ + int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode); + + /** Called to access miscellaneous fields. */ + int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *); + +}; + +extern const struct bufferevent_ops bufferevent_ops_socket; +extern const struct bufferevent_ops bufferevent_ops_filter; +extern const struct bufferevent_ops bufferevent_ops_pair; + +#define BEV_IS_SOCKET(bevp) ((bevp)->be_ops == &bufferevent_ops_socket) +#define BEV_IS_FILTER(bevp) ((bevp)->be_ops == &bufferevent_ops_filter) +#define BEV_IS_PAIR(bevp) ((bevp)->be_ops == &bufferevent_ops_pair) + +#if defined(EVENT__HAVE_OPENSSL) +extern const struct bufferevent_ops bufferevent_ops_openssl; +#define BEV_IS_OPENSSL(bevp) ((bevp)->be_ops == &bufferevent_ops_openssl) +#else +#define BEV_IS_OPENSSL(bevp) 0 +#endif + +#ifdef _WIN32 +extern const struct bufferevent_ops bufferevent_ops_async; +#define BEV_IS_ASYNC(bevp) ((bevp)->be_ops == &bufferevent_ops_async) +#else +#define BEV_IS_ASYNC(bevp) 0 +#endif + +/** Initialize the shared parts of a bufferevent. */ +EVENT2_EXPORT_SYMBOL +int bufferevent_init_common_(struct bufferevent_private *, struct event_base *, const struct bufferevent_ops *, enum bufferevent_options options); + +/** For internal use: temporarily stop all reads on bufev, until the conditions + * in 'what' are over. */ +EVENT2_EXPORT_SYMBOL +void bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what); +/** For internal use: clear the conditions 'what' on bufev, and re-enable + * reading if there are no conditions left. */ +EVENT2_EXPORT_SYMBOL +void bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what); + +/** For internal use: temporarily stop all writes on bufev, until the conditions + * in 'what' are over. */ +void bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what); +/** For internal use: clear the conditions 'what' on bufev, and re-enable + * writing if there are no conditions left. */ +void bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what); + +#define bufferevent_wm_suspend_read(b) \ + bufferevent_suspend_read_((b), BEV_SUSPEND_WM) +#define bufferevent_wm_unsuspend_read(b) \ + bufferevent_unsuspend_read_((b), BEV_SUSPEND_WM) + +/* + Disable a bufferevent. Equivalent to bufferevent_disable(), but + first resets 'connecting' flag to force EV_WRITE down for sure. + + XXXX this method will go away in the future; try not to add new users. + See comment in evhttp_connection_reset_() for discussion. + + @param bufev the bufferevent to be disabled + @param event any combination of EV_READ | EV_WRITE. + @return 0 if successful, or -1 if an error occurred + @see bufferevent_disable() + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_disable_hard_(struct bufferevent *bufev, short event); + +/** Internal: Set up locking on a bufferevent. If lock is set, use it. + * Otherwise, use a new lock. */ +EVENT2_EXPORT_SYMBOL +int bufferevent_enable_locking_(struct bufferevent *bufev, void *lock); +/** Internal: backwards compat macro for the now public function + * Increment the reference count on bufev. */ +#define bufferevent_incref_(bufev) bufferevent_incref(bufev) +/** Internal: Lock bufev and increase its reference count. + * unlocking it otherwise. */ +EVENT2_EXPORT_SYMBOL +void bufferevent_incref_and_lock_(struct bufferevent *bufev); +/** Internal: backwards compat macro for the now public function + * Decrement the reference count on bufev. Returns 1 if it freed + * the bufferevent.*/ +#define bufferevent_decref_(bufev) bufferevent_decref(bufev) + +/** Internal: Drop the reference count on bufev, freeing as necessary, and + * unlocking it otherwise. Returns 1 if it freed the bufferevent. */ +EVENT2_EXPORT_SYMBOL +int bufferevent_decref_and_unlock_(struct bufferevent *bufev); + +/** Internal: If callbacks are deferred and we have a read callback, schedule + * a readcb. Otherwise just run the readcb. Ignores watermarks. */ +EVENT2_EXPORT_SYMBOL +void bufferevent_run_readcb_(struct bufferevent *bufev, int options); +/** Internal: If callbacks are deferred and we have a write callback, schedule + * a writecb. Otherwise just run the writecb. Ignores watermarks. */ +EVENT2_EXPORT_SYMBOL +void bufferevent_run_writecb_(struct bufferevent *bufev, int options); +/** Internal: If callbacks are deferred and we have an eventcb, schedule + * it to run with events "what". Otherwise just run the eventcb. + * See bufferevent_trigger_event for meaning of "options". */ +EVENT2_EXPORT_SYMBOL +void bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options); + +/** Internal: Run or schedule (if deferred or options contain + * BEV_TRIG_DEFER_CALLBACKS) I/O callbacks specified in iotype. + * Must already hold the bufev lock. Honors watermarks unless + * BEV_TRIG_IGNORE_WATERMARKS is in options. */ +static inline void bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options); + +/* Making this inline since all of the common-case calls to this function in + * libevent use constant arguments. */ +static inline void +bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options) +{ + if ((iotype & EV_READ) && ((options & BEV_TRIG_IGNORE_WATERMARKS) || + evbuffer_get_length(bufev->input) >= bufev->wm_read.low)) + bufferevent_run_readcb_(bufev, options); + if ((iotype & EV_WRITE) && ((options & BEV_TRIG_IGNORE_WATERMARKS) || + evbuffer_get_length(bufev->output) <= bufev->wm_write.low)) + bufferevent_run_writecb_(bufev, options); +} + +/** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in + * which case add ev with no timeout. */ +EVENT2_EXPORT_SYMBOL +int bufferevent_add_event_(struct event *ev, const struct timeval *tv); + +/* ========= + * These next functions implement timeouts for bufferevents that aren't doing + * anything else with ev_read and ev_write, to handle timeouts. + * ========= */ +/** Internal use: Set up the ev_read and ev_write callbacks so that + * the other "generic_timeout" functions will work on it. Call this from + * the constructor function. */ +EVENT2_EXPORT_SYMBOL +void bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev); +/** Internal use: Add or delete the generic timeout events as appropriate. + * (If an event is enabled and a timeout is set, we add the event. Otherwise + * we delete it.) Call this from anything that changes the timeout values, + * that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */ +EVENT2_EXPORT_SYMBOL +int bufferevent_generic_adj_timeouts_(struct bufferevent *bev); +EVENT2_EXPORT_SYMBOL +int bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev); + +EVENT2_EXPORT_SYMBOL +enum bufferevent_options bufferevent_get_options_(struct bufferevent *bev); + +EVENT2_EXPORT_SYMBOL +const struct sockaddr* +bufferevent_socket_get_conn_address_(struct bufferevent *bev); + +EVENT2_EXPORT_SYMBOL +void +bufferevent_socket_set_conn_address_fd_(struct bufferevent *bev, evutil_socket_t fd); + +EVENT2_EXPORT_SYMBOL +void +bufferevent_socket_set_conn_address_(struct bufferevent *bev, struct sockaddr *addr, size_t addrlen); + + +/** Internal use: We have just successfully read data into an inbuf, so + * reset the read timeout (if any). */ +#define BEV_RESET_GENERIC_READ_TIMEOUT(bev) \ + do { \ + if (evutil_timerisset(&(bev)->timeout_read)) \ + event_add(&(bev)->ev_read, &(bev)->timeout_read); \ + } while (0) +/** Internal use: We have just successfully written data from an inbuf, so + * reset the read timeout (if any). */ +#define BEV_RESET_GENERIC_WRITE_TIMEOUT(bev) \ + do { \ + if (evutil_timerisset(&(bev)->timeout_write)) \ + event_add(&(bev)->ev_write, &(bev)->timeout_write); \ + } while (0) +#define BEV_DEL_GENERIC_READ_TIMEOUT(bev) \ + event_del(&(bev)->ev_read) +#define BEV_DEL_GENERIC_WRITE_TIMEOUT(bev) \ + event_del(&(bev)->ev_write) + + +/** Internal: Given a bufferevent, return its corresponding + * bufferevent_private. */ +#define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev) + +#ifdef EVENT__DISABLE_THREAD_SUPPORT +#define BEV_LOCK(b) EVUTIL_NIL_STMT_ +#define BEV_UNLOCK(b) EVUTIL_NIL_STMT_ +#else +/** Internal: Grab the lock (if any) on a bufferevent */ +#define BEV_LOCK(b) do { \ + struct bufferevent_private *locking = BEV_UPCAST(b); \ + EVLOCK_LOCK(locking->lock, 0); \ + } while (0) + +/** Internal: Release the lock (if any) on a bufferevent */ +#define BEV_UNLOCK(b) do { \ + struct bufferevent_private *locking = BEV_UPCAST(b); \ + EVLOCK_UNLOCK(locking->lock, 0); \ + } while (0) +#endif + + +/* ==== For rate-limiting. */ + +EVENT2_EXPORT_SYMBOL +int bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, + ev_ssize_t bytes); +EVENT2_EXPORT_SYMBOL +int bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, + ev_ssize_t bytes); +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_get_read_max_(struct bufferevent_private *bev); +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_get_write_max_(struct bufferevent_private *bev); + +int bufferevent_ratelim_init_(struct bufferevent_private *bev); + +#ifdef __cplusplus +} +#endif + + +#endif /* BUFFEREVENT_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/bufferevent.c b/probe-busybox/libevent-2.1.11-stable/bufferevent.c new file mode 100644 index 00000000..08c0486c --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/bufferevent.c @@ -0,0 +1,1036 @@ +/* + * Copyright (c) 2002-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos, Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include + +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +#include +#include +#include +#include +#ifdef EVENT__HAVE_STDARG_H +#include +#endif + +#ifdef _WIN32 +#include +#endif + +#include "event2/util.h" +#include "event2/buffer.h" +#include "event2/buffer_compat.h" +#include "event2/bufferevent.h" +#include "event2/bufferevent_struct.h" +#include "event2/bufferevent_compat.h" +#include "event2/event.h" +#include "event-internal.h" +#include "log-internal.h" +#include "mm-internal.h" +#include "bufferevent-internal.h" +#include "evbuffer-internal.h" +#include "util-internal.h" + +static void bufferevent_cancel_all_(struct bufferevent *bev); +static void bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_); + +void +bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what) +{ + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + BEV_LOCK(bufev); + if (!bufev_private->read_suspended) + bufev->be_ops->disable(bufev, EV_READ); + bufev_private->read_suspended |= what; + BEV_UNLOCK(bufev); +} + +void +bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what) +{ + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + BEV_LOCK(bufev); + bufev_private->read_suspended &= ~what; + if (!bufev_private->read_suspended && (bufev->enabled & EV_READ)) + bufev->be_ops->enable(bufev, EV_READ); + BEV_UNLOCK(bufev); +} + +void +bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what) +{ + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + BEV_LOCK(bufev); + if (!bufev_private->write_suspended) + bufev->be_ops->disable(bufev, EV_WRITE); + bufev_private->write_suspended |= what; + BEV_UNLOCK(bufev); +} + +void +bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what) +{ + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + BEV_LOCK(bufev); + bufev_private->write_suspended &= ~what; + if (!bufev_private->write_suspended && (bufev->enabled & EV_WRITE)) + bufev->be_ops->enable(bufev, EV_WRITE); + BEV_UNLOCK(bufev); +} + +/** + * Sometimes bufferevent's implementation can overrun high watermarks + * (one of examples is openssl) and in this case if the read callback + * will not handle enough data do over condition above the read + * callback will never be called again (due to suspend above). + * + * To avoid this we are scheduling read callback again here, but only + * from the user callback to avoid multiple scheduling: + * - when the data had been added to it + * - when the data had been drained from it (user specified read callback) + */ +static void bufferevent_inbuf_wm_check(struct bufferevent *bev) +{ + if (!bev->wm_read.high) + return; + if (!(bev->enabled & EV_READ)) + return; + if (evbuffer_get_length(bev->input) < bev->wm_read.high) + return; + + bufferevent_trigger(bev, EV_READ, BEV_OPT_DEFER_CALLBACKS); +} + +/* Callback to implement watermarks on the input buffer. Only enabled + * if the watermark is set. */ +static void +bufferevent_inbuf_wm_cb(struct evbuffer *buf, + const struct evbuffer_cb_info *cbinfo, + void *arg) +{ + struct bufferevent *bufev = arg; + size_t size; + + size = evbuffer_get_length(buf); + + if (size >= bufev->wm_read.high) + bufferevent_wm_suspend_read(bufev); + else + bufferevent_wm_unsuspend_read(bufev); +} + +static void +bufferevent_run_deferred_callbacks_locked(struct event_callback *cb, void *arg) +{ + struct bufferevent_private *bufev_private = arg; + struct bufferevent *bufev = &bufev_private->bev; + + BEV_LOCK(bufev); + if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) && + bufev->errorcb) { + /* The "connected" happened before any reads or writes, so + send it first. */ + bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED; + bufev->errorcb(bufev, BEV_EVENT_CONNECTED, bufev->cbarg); + } + if (bufev_private->readcb_pending && bufev->readcb) { + bufev_private->readcb_pending = 0; + bufev->readcb(bufev, bufev->cbarg); + bufferevent_inbuf_wm_check(bufev); + } + if (bufev_private->writecb_pending && bufev->writecb) { + bufev_private->writecb_pending = 0; + bufev->writecb(bufev, bufev->cbarg); + } + if (bufev_private->eventcb_pending && bufev->errorcb) { + short what = bufev_private->eventcb_pending; + int err = bufev_private->errno_pending; + bufev_private->eventcb_pending = 0; + bufev_private->errno_pending = 0; + EVUTIL_SET_SOCKET_ERROR(err); + bufev->errorcb(bufev, what, bufev->cbarg); + } + bufferevent_decref_and_unlock_(bufev); +} + +static void +bufferevent_run_deferred_callbacks_unlocked(struct event_callback *cb, void *arg) +{ + struct bufferevent_private *bufev_private = arg; + struct bufferevent *bufev = &bufev_private->bev; + + BEV_LOCK(bufev); +#define UNLOCKED(stmt) \ + do { BEV_UNLOCK(bufev); stmt; BEV_LOCK(bufev); } while(0) + + if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) && + bufev->errorcb) { + /* The "connected" happened before any reads or writes, so + send it first. */ + bufferevent_event_cb errorcb = bufev->errorcb; + void *cbarg = bufev->cbarg; + bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED; + UNLOCKED(errorcb(bufev, BEV_EVENT_CONNECTED, cbarg)); + } + if (bufev_private->readcb_pending && bufev->readcb) { + bufferevent_data_cb readcb = bufev->readcb; + void *cbarg = bufev->cbarg; + bufev_private->readcb_pending = 0; + UNLOCKED(readcb(bufev, cbarg)); + bufferevent_inbuf_wm_check(bufev); + } + if (bufev_private->writecb_pending && bufev->writecb) { + bufferevent_data_cb writecb = bufev->writecb; + void *cbarg = bufev->cbarg; + bufev_private->writecb_pending = 0; + UNLOCKED(writecb(bufev, cbarg)); + } + if (bufev_private->eventcb_pending && bufev->errorcb) { + bufferevent_event_cb errorcb = bufev->errorcb; + void *cbarg = bufev->cbarg; + short what = bufev_private->eventcb_pending; + int err = bufev_private->errno_pending; + bufev_private->eventcb_pending = 0; + bufev_private->errno_pending = 0; + EVUTIL_SET_SOCKET_ERROR(err); + UNLOCKED(errorcb(bufev,what,cbarg)); + } + bufferevent_decref_and_unlock_(bufev); +#undef UNLOCKED +} + +#define SCHEDULE_DEFERRED(bevp) \ + do { \ + if (event_deferred_cb_schedule_( \ + (bevp)->bev.ev_base, \ + &(bevp)->deferred)) \ + bufferevent_incref_(&(bevp)->bev); \ + } while (0) + + +void +bufferevent_run_readcb_(struct bufferevent *bufev, int options) +{ + /* Requires that we hold the lock and a reference */ + struct bufferevent_private *p = BEV_UPCAST(bufev); + if (bufev->readcb == NULL) + return; + if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) { + p->readcb_pending = 1; + SCHEDULE_DEFERRED(p); + } else { + bufev->readcb(bufev, bufev->cbarg); + bufferevent_inbuf_wm_check(bufev); + } +} + +void +bufferevent_run_writecb_(struct bufferevent *bufev, int options) +{ + /* Requires that we hold the lock and a reference */ + struct bufferevent_private *p = BEV_UPCAST(bufev); + if (bufev->writecb == NULL) + return; + if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) { + p->writecb_pending = 1; + SCHEDULE_DEFERRED(p); + } else { + bufev->writecb(bufev, bufev->cbarg); + } +} + +#define BEV_TRIG_ALL_OPTS ( \ + BEV_TRIG_IGNORE_WATERMARKS| \ + BEV_TRIG_DEFER_CALLBACKS \ + ) + +void +bufferevent_trigger(struct bufferevent *bufev, short iotype, int options) +{ + bufferevent_incref_and_lock_(bufev); + bufferevent_trigger_nolock_(bufev, iotype, options&BEV_TRIG_ALL_OPTS); + bufferevent_decref_and_unlock_(bufev); +} + +void +bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options) +{ + /* Requires that we hold the lock and a reference */ + struct bufferevent_private *p = BEV_UPCAST(bufev); + if (bufev->errorcb == NULL) + return; + if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) { + p->eventcb_pending |= what; + p->errno_pending = EVUTIL_SOCKET_ERROR(); + SCHEDULE_DEFERRED(p); + } else { + bufev->errorcb(bufev, what, bufev->cbarg); + } +} + +void +bufferevent_trigger_event(struct bufferevent *bufev, short what, int options) +{ + bufferevent_incref_and_lock_(bufev); + bufferevent_run_eventcb_(bufev, what, options&BEV_TRIG_ALL_OPTS); + bufferevent_decref_and_unlock_(bufev); +} + +int +bufferevent_init_common_(struct bufferevent_private *bufev_private, + struct event_base *base, + const struct bufferevent_ops *ops, + enum bufferevent_options options) +{ + struct bufferevent *bufev = &bufev_private->bev; + + if (!bufev->input) { + if ((bufev->input = evbuffer_new()) == NULL) + goto err; + } + + if (!bufev->output) { + if ((bufev->output = evbuffer_new()) == NULL) + goto err; + } + + bufev_private->refcnt = 1; + bufev->ev_base = base; + + /* Disable timeouts. */ + evutil_timerclear(&bufev->timeout_read); + evutil_timerclear(&bufev->timeout_write); + + bufev->be_ops = ops; + + if (bufferevent_ratelim_init_(bufev_private)) + goto err; + + /* + * Set to EV_WRITE so that using bufferevent_write is going to + * trigger a callback. Reading needs to be explicitly enabled + * because otherwise no data will be available. + */ + bufev->enabled = EV_WRITE; + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (options & BEV_OPT_THREADSAFE) { + if (bufferevent_enable_locking_(bufev, NULL) < 0) + goto err; + } +#endif + if ((options & (BEV_OPT_DEFER_CALLBACKS|BEV_OPT_UNLOCK_CALLBACKS)) + == BEV_OPT_UNLOCK_CALLBACKS) { + event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS"); + goto err; + } + if (options & BEV_OPT_UNLOCK_CALLBACKS) + event_deferred_cb_init_( + &bufev_private->deferred, + event_base_get_npriorities(base) / 2, + bufferevent_run_deferred_callbacks_unlocked, + bufev_private); + else + event_deferred_cb_init_( + &bufev_private->deferred, + event_base_get_npriorities(base) / 2, + bufferevent_run_deferred_callbacks_locked, + bufev_private); + + bufev_private->options = options; + + evbuffer_set_parent_(bufev->input, bufev); + evbuffer_set_parent_(bufev->output, bufev); + + return 0; + +err: + if (bufev->input) { + evbuffer_free(bufev->input); + bufev->input = NULL; + } + if (bufev->output) { + evbuffer_free(bufev->output); + bufev->output = NULL; + } + return -1; +} + +void +bufferevent_setcb(struct bufferevent *bufev, + bufferevent_data_cb readcb, bufferevent_data_cb writecb, + bufferevent_event_cb eventcb, void *cbarg) +{ + BEV_LOCK(bufev); + + bufev->readcb = readcb; + bufev->writecb = writecb; + bufev->errorcb = eventcb; + + bufev->cbarg = cbarg; + BEV_UNLOCK(bufev); +} + +void +bufferevent_getcb(struct bufferevent *bufev, + bufferevent_data_cb *readcb_ptr, + bufferevent_data_cb *writecb_ptr, + bufferevent_event_cb *eventcb_ptr, + void **cbarg_ptr) +{ + BEV_LOCK(bufev); + if (readcb_ptr) + *readcb_ptr = bufev->readcb; + if (writecb_ptr) + *writecb_ptr = bufev->writecb; + if (eventcb_ptr) + *eventcb_ptr = bufev->errorcb; + if (cbarg_ptr) + *cbarg_ptr = bufev->cbarg; + + BEV_UNLOCK(bufev); +} + +struct evbuffer * +bufferevent_get_input(struct bufferevent *bufev) +{ + return bufev->input; +} + +struct evbuffer * +bufferevent_get_output(struct bufferevent *bufev) +{ + return bufev->output; +} + +struct event_base * +bufferevent_get_base(struct bufferevent *bufev) +{ + return bufev->ev_base; +} + +int +bufferevent_get_priority(const struct bufferevent *bufev) +{ + if (event_initialized(&bufev->ev_read)) { + return event_get_priority(&bufev->ev_read); + } else { + return event_base_get_npriorities(bufev->ev_base) / 2; + } +} + +int +bufferevent_write(struct bufferevent *bufev, const void *data, size_t size) +{ + if (evbuffer_add(bufev->output, data, size) == -1) + return (-1); + + return 0; +} + +int +bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf) +{ + if (evbuffer_add_buffer(bufev->output, buf) == -1) + return (-1); + + return 0; +} + +size_t +bufferevent_read(struct bufferevent *bufev, void *data, size_t size) +{ + return (evbuffer_remove(bufev->input, data, size)); +} + +int +bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf) +{ + return (evbuffer_add_buffer(buf, bufev->input)); +} + +int +bufferevent_enable(struct bufferevent *bufev, short event) +{ + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + short impl_events = event; + int r = 0; + + bufferevent_incref_and_lock_(bufev); + if (bufev_private->read_suspended) + impl_events &= ~EV_READ; + if (bufev_private->write_suspended) + impl_events &= ~EV_WRITE; + + bufev->enabled |= event; + + if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0) + r = -1; + if (r) + event_debug(("%s: cannot enable 0x%hx on %p", __func__, event, bufev)); + + bufferevent_decref_and_unlock_(bufev); + return r; +} + +int +bufferevent_set_timeouts(struct bufferevent *bufev, + const struct timeval *tv_read, + const struct timeval *tv_write) +{ + int r = 0; + BEV_LOCK(bufev); + if (tv_read) { + bufev->timeout_read = *tv_read; + } else { + evutil_timerclear(&bufev->timeout_read); + } + if (tv_write) { + bufev->timeout_write = *tv_write; + } else { + evutil_timerclear(&bufev->timeout_write); + } + + if (bufev->be_ops->adj_timeouts) + r = bufev->be_ops->adj_timeouts(bufev); + BEV_UNLOCK(bufev); + + return r; +} + + +/* Obsolete; use bufferevent_set_timeouts */ +void +bufferevent_settimeout(struct bufferevent *bufev, + int timeout_read, int timeout_write) +{ + struct timeval tv_read, tv_write; + struct timeval *ptv_read = NULL, *ptv_write = NULL; + + memset(&tv_read, 0, sizeof(tv_read)); + memset(&tv_write, 0, sizeof(tv_write)); + + if (timeout_read) { + tv_read.tv_sec = timeout_read; + ptv_read = &tv_read; + } + if (timeout_write) { + tv_write.tv_sec = timeout_write; + ptv_write = &tv_write; + } + + bufferevent_set_timeouts(bufev, ptv_read, ptv_write); +} + + +int +bufferevent_disable_hard_(struct bufferevent *bufev, short event) +{ + int r = 0; + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + + BEV_LOCK(bufev); + bufev->enabled &= ~event; + + bufev_private->connecting = 0; + if (bufev->be_ops->disable(bufev, event) < 0) + r = -1; + + BEV_UNLOCK(bufev); + return r; +} + +int +bufferevent_disable(struct bufferevent *bufev, short event) +{ + int r = 0; + + BEV_LOCK(bufev); + bufev->enabled &= ~event; + + if (bufev->be_ops->disable(bufev, event) < 0) + r = -1; + if (r) + event_debug(("%s: cannot disable 0x%hx on %p", __func__, event, bufev)); + + BEV_UNLOCK(bufev); + return r; +} + +/* + * Sets the water marks + */ + +void +bufferevent_setwatermark(struct bufferevent *bufev, short events, + size_t lowmark, size_t highmark) +{ + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + + BEV_LOCK(bufev); + if (events & EV_WRITE) { + bufev->wm_write.low = lowmark; + bufev->wm_write.high = highmark; + } + + if (events & EV_READ) { + bufev->wm_read.low = lowmark; + bufev->wm_read.high = highmark; + + if (highmark) { + /* There is now a new high-water mark for read. + enable the callback if needed, and see if we should + suspend/bufferevent_wm_unsuspend. */ + + if (bufev_private->read_watermarks_cb == NULL) { + bufev_private->read_watermarks_cb = + evbuffer_add_cb(bufev->input, + bufferevent_inbuf_wm_cb, + bufev); + } + evbuffer_cb_set_flags(bufev->input, + bufev_private->read_watermarks_cb, + EVBUFFER_CB_ENABLED|EVBUFFER_CB_NODEFER); + + if (evbuffer_get_length(bufev->input) >= highmark) + bufferevent_wm_suspend_read(bufev); + else if (evbuffer_get_length(bufev->input) < highmark) + bufferevent_wm_unsuspend_read(bufev); + } else { + /* There is now no high-water mark for read. */ + if (bufev_private->read_watermarks_cb) + evbuffer_cb_clear_flags(bufev->input, + bufev_private->read_watermarks_cb, + EVBUFFER_CB_ENABLED); + bufferevent_wm_unsuspend_read(bufev); + } + } + BEV_UNLOCK(bufev); +} + +int +bufferevent_getwatermark(struct bufferevent *bufev, short events, + size_t *lowmark, size_t *highmark) +{ + if (events == EV_WRITE) { + BEV_LOCK(bufev); + if (lowmark) + *lowmark = bufev->wm_write.low; + if (highmark) + *highmark = bufev->wm_write.high; + BEV_UNLOCK(bufev); + return 0; + } + + if (events == EV_READ) { + BEV_LOCK(bufev); + if (lowmark) + *lowmark = bufev->wm_read.low; + if (highmark) + *highmark = bufev->wm_read.high; + BEV_UNLOCK(bufev); + return 0; + } + return -1; +} + +int +bufferevent_flush(struct bufferevent *bufev, + short iotype, + enum bufferevent_flush_mode mode) +{ + int r = -1; + BEV_LOCK(bufev); + if (bufev->be_ops->flush) + r = bufev->be_ops->flush(bufev, iotype, mode); + BEV_UNLOCK(bufev); + return r; +} + +void +bufferevent_incref_and_lock_(struct bufferevent *bufev) +{ + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + BEV_LOCK(bufev); + ++bufev_private->refcnt; +} + +#if 0 +static void +bufferevent_transfer_lock_ownership_(struct bufferevent *donor, + struct bufferevent *recipient) +{ + struct bufferevent_private *d = BEV_UPCAST(donor); + struct bufferevent_private *r = BEV_UPCAST(recipient); + if (d->lock != r->lock) + return; + if (r->own_lock) + return; + if (d->own_lock) { + d->own_lock = 0; + r->own_lock = 1; + } +} +#endif + +int +bufferevent_decref_and_unlock_(struct bufferevent *bufev) +{ + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + int n_cbs = 0; +#define MAX_CBS 16 + struct event_callback *cbs[MAX_CBS]; + + EVUTIL_ASSERT(bufev_private->refcnt > 0); + + if (--bufev_private->refcnt) { + BEV_UNLOCK(bufev); + return 0; + } + + if (bufev->be_ops->unlink) + bufev->be_ops->unlink(bufev); + + /* Okay, we're out of references. Let's finalize this once all the + * callbacks are done running. */ + cbs[0] = &bufev->ev_read.ev_evcallback; + cbs[1] = &bufev->ev_write.ev_evcallback; + cbs[2] = &bufev_private->deferred; + n_cbs = 3; + if (bufev_private->rate_limiting) { + struct event *e = &bufev_private->rate_limiting->refill_bucket_event; + if (event_initialized(e)) + cbs[n_cbs++] = &e->ev_evcallback; + } + n_cbs += evbuffer_get_callbacks_(bufev->input, cbs+n_cbs, MAX_CBS-n_cbs); + n_cbs += evbuffer_get_callbacks_(bufev->output, cbs+n_cbs, MAX_CBS-n_cbs); + + event_callback_finalize_many_(bufev->ev_base, n_cbs, cbs, + bufferevent_finalize_cb_); + +#undef MAX_CBS + BEV_UNLOCK(bufev); + + return 1; +} + +static void +bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_) +{ + struct bufferevent *bufev = arg_; + struct bufferevent *underlying; + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + + BEV_LOCK(bufev); + underlying = bufferevent_get_underlying(bufev); + + /* Clean up the shared info */ + if (bufev->be_ops->destruct) + bufev->be_ops->destruct(bufev); + + /* XXX what happens if refcnt for these buffers is > 1? + * The buffers can share a lock with this bufferevent object, + * but the lock might be destroyed below. */ + /* evbuffer will free the callbacks */ + evbuffer_free(bufev->input); + evbuffer_free(bufev->output); + + if (bufev_private->rate_limiting) { + if (bufev_private->rate_limiting->group) + bufferevent_remove_from_rate_limit_group_internal_(bufev,0); + mm_free(bufev_private->rate_limiting); + bufev_private->rate_limiting = NULL; + } + + + BEV_UNLOCK(bufev); + + if (bufev_private->own_lock) + EVTHREAD_FREE_LOCK(bufev_private->lock, + EVTHREAD_LOCKTYPE_RECURSIVE); + + /* Free the actual allocated memory. */ + mm_free(((char*)bufev) - bufev->be_ops->mem_offset); + + /* Release the reference to underlying now that we no longer need the + * reference to it. We wait this long mainly in case our lock is + * shared with underlying. + * + * The 'destruct' function will also drop a reference to underlying + * if BEV_OPT_CLOSE_ON_FREE is set. + * + * XXX Should we/can we just refcount evbuffer/bufferevent locks? + * It would probably save us some headaches. + */ + if (underlying) + bufferevent_decref_(underlying); +} + +int +bufferevent_decref(struct bufferevent *bufev) +{ + BEV_LOCK(bufev); + return bufferevent_decref_and_unlock_(bufev); +} + +void +bufferevent_free(struct bufferevent *bufev) +{ + BEV_LOCK(bufev); + bufferevent_setcb(bufev, NULL, NULL, NULL, NULL); + bufferevent_cancel_all_(bufev); + bufferevent_decref_and_unlock_(bufev); +} + +void +bufferevent_incref(struct bufferevent *bufev) +{ + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + + /* XXX: now that this function is public, we might want to + * - return the count from this function + * - create a new function to atomically grab the current refcount + */ + BEV_LOCK(bufev); + ++bufev_private->refcnt; + BEV_UNLOCK(bufev); +} + +int +bufferevent_enable_locking_(struct bufferevent *bufev, void *lock) +{ +#ifdef EVENT__DISABLE_THREAD_SUPPORT + return -1; +#else + struct bufferevent *underlying; + + if (BEV_UPCAST(bufev)->lock) + return -1; + underlying = bufferevent_get_underlying(bufev); + + if (!lock && underlying && BEV_UPCAST(underlying)->lock) { + lock = BEV_UPCAST(underlying)->lock; + BEV_UPCAST(bufev)->lock = lock; + BEV_UPCAST(bufev)->own_lock = 0; + } else if (!lock) { + EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); + if (!lock) + return -1; + BEV_UPCAST(bufev)->lock = lock; + BEV_UPCAST(bufev)->own_lock = 1; + } else { + BEV_UPCAST(bufev)->lock = lock; + BEV_UPCAST(bufev)->own_lock = 0; + } + evbuffer_enable_locking(bufev->input, lock); + evbuffer_enable_locking(bufev->output, lock); + + if (underlying && !BEV_UPCAST(underlying)->lock) + bufferevent_enable_locking_(underlying, lock); + + return 0; +#endif +} + +int +bufferevent_setfd(struct bufferevent *bev, evutil_socket_t fd) +{ + union bufferevent_ctrl_data d; + int res = -1; + d.fd = fd; + BEV_LOCK(bev); + if (bev->be_ops->ctrl) + res = bev->be_ops->ctrl(bev, BEV_CTRL_SET_FD, &d); + if (res) + event_debug(("%s: cannot set fd for %p to "EV_SOCK_FMT, __func__, bev, fd)); + BEV_UNLOCK(bev); + return res; +} + +evutil_socket_t +bufferevent_getfd(struct bufferevent *bev) +{ + union bufferevent_ctrl_data d; + int res = -1; + d.fd = -1; + BEV_LOCK(bev); + if (bev->be_ops->ctrl) + res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_FD, &d); + if (res) + event_debug(("%s: cannot get fd for %p", __func__, bev)); + BEV_UNLOCK(bev); + return (res<0) ? -1 : d.fd; +} + +enum bufferevent_options +bufferevent_get_options_(struct bufferevent *bev) +{ + struct bufferevent_private *bev_p = BEV_UPCAST(bev); + enum bufferevent_options options; + + BEV_LOCK(bev); + options = bev_p->options; + BEV_UNLOCK(bev); + return options; +} + + +static void +bufferevent_cancel_all_(struct bufferevent *bev) +{ + union bufferevent_ctrl_data d; + memset(&d, 0, sizeof(d)); + BEV_LOCK(bev); + if (bev->be_ops->ctrl) + bev->be_ops->ctrl(bev, BEV_CTRL_CANCEL_ALL, &d); + BEV_UNLOCK(bev); +} + +short +bufferevent_get_enabled(struct bufferevent *bufev) +{ + short r; + BEV_LOCK(bufev); + r = bufev->enabled; + BEV_UNLOCK(bufev); + return r; +} + +struct bufferevent * +bufferevent_get_underlying(struct bufferevent *bev) +{ + union bufferevent_ctrl_data d; + int res = -1; + d.ptr = NULL; + BEV_LOCK(bev); + if (bev->be_ops->ctrl) + res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_UNDERLYING, &d); + BEV_UNLOCK(bev); + return (res<0) ? NULL : d.ptr; +} + +static void +bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx) +{ + struct bufferevent *bev = ctx; + bufferevent_incref_and_lock_(bev); + bufferevent_disable(bev, EV_READ); + bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING, 0); + bufferevent_decref_and_unlock_(bev); +} +static void +bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx) +{ + struct bufferevent *bev = ctx; + bufferevent_incref_and_lock_(bev); + bufferevent_disable(bev, EV_WRITE); + bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING, 0); + bufferevent_decref_and_unlock_(bev); +} + +void +bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev) +{ + event_assign(&bev->ev_read, bev->ev_base, -1, EV_FINALIZE, + bufferevent_generic_read_timeout_cb, bev); + event_assign(&bev->ev_write, bev->ev_base, -1, EV_FINALIZE, + bufferevent_generic_write_timeout_cb, bev); +} + +int +bufferevent_generic_adj_timeouts_(struct bufferevent *bev) +{ + const short enabled = bev->enabled; + struct bufferevent_private *bev_p = BEV_UPCAST(bev); + int r1=0, r2=0; + if ((enabled & EV_READ) && !bev_p->read_suspended && + evutil_timerisset(&bev->timeout_read)) + r1 = event_add(&bev->ev_read, &bev->timeout_read); + else + r1 = event_del(&bev->ev_read); + + if ((enabled & EV_WRITE) && !bev_p->write_suspended && + evutil_timerisset(&bev->timeout_write) && + evbuffer_get_length(bev->output)) + r2 = event_add(&bev->ev_write, &bev->timeout_write); + else + r2 = event_del(&bev->ev_write); + if (r1 < 0 || r2 < 0) + return -1; + return 0; +} + +int +bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev) +{ + int r = 0; + if (event_pending(&bev->ev_read, EV_READ, NULL)) { + if (evutil_timerisset(&bev->timeout_read)) { + if (bufferevent_add_event_(&bev->ev_read, &bev->timeout_read) < 0) + r = -1; + } else { + event_remove_timer(&bev->ev_read); + } + } + if (event_pending(&bev->ev_write, EV_WRITE, NULL)) { + if (evutil_timerisset(&bev->timeout_write)) { + if (bufferevent_add_event_(&bev->ev_write, &bev->timeout_write) < 0) + r = -1; + } else { + event_remove_timer(&bev->ev_write); + } + } + return r; +} + +int +bufferevent_add_event_(struct event *ev, const struct timeval *tv) +{ + if (!evutil_timerisset(tv)) + return event_add(ev, NULL); + else + return event_add(ev, tv); +} + +/* For use by user programs only; internally, we should be calling + either bufferevent_incref_and_lock_(), or BEV_LOCK. */ +void +bufferevent_lock(struct bufferevent *bev) +{ + bufferevent_incref_and_lock_(bev); +} + +void +bufferevent_unlock(struct bufferevent *bev) +{ + bufferevent_decref_and_unlock_(bev); +} diff --git a/probe-busybox/libevent-2.1.11-stable/bufferevent_async.c b/probe-busybox/libevent-2.1.11-stable/bufferevent_async.c new file mode 100644 index 00000000..40c7c5e8 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/bufferevent_async.c @@ -0,0 +1,706 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +#include +#include +#include +#include +#ifdef EVENT__HAVE_STDARG_H +#include +#endif +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif + +#ifdef _WIN32 +#include +#include +#include +#endif + +#include + +#include "event2/util.h" +#include "event2/bufferevent.h" +#include "event2/buffer.h" +#include "event2/bufferevent_struct.h" +#include "event2/event.h" +#include "event2/util.h" +#include "event-internal.h" +#include "log-internal.h" +#include "mm-internal.h" +#include "bufferevent-internal.h" +#include "util-internal.h" +#include "iocp-internal.h" + +#ifndef SO_UPDATE_CONNECT_CONTEXT +/* Mingw is sometimes missing this */ +#define SO_UPDATE_CONNECT_CONTEXT 0x7010 +#endif + +/* prototypes */ +static int be_async_enable(struct bufferevent *, short); +static int be_async_disable(struct bufferevent *, short); +static void be_async_destruct(struct bufferevent *); +static int be_async_flush(struct bufferevent *, short, enum bufferevent_flush_mode); +static int be_async_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *); + +struct bufferevent_async { + struct bufferevent_private bev; + struct event_overlapped connect_overlapped; + struct event_overlapped read_overlapped; + struct event_overlapped write_overlapped; + size_t read_in_progress; + size_t write_in_progress; + unsigned ok : 1; + unsigned read_added : 1; + unsigned write_added : 1; +}; + +const struct bufferevent_ops bufferevent_ops_async = { + "socket_async", + evutil_offsetof(struct bufferevent_async, bev.bev), + be_async_enable, + be_async_disable, + NULL, /* Unlink */ + be_async_destruct, + bufferevent_generic_adj_timeouts_, + be_async_flush, + be_async_ctrl, +}; + +static inline void +be_async_run_eventcb(struct bufferevent *bev, short what, int options) +{ bufferevent_run_eventcb_(bev, what, options|BEV_TRIG_DEFER_CALLBACKS); } + +static inline void +be_async_trigger_nolock(struct bufferevent *bev, short what, int options) +{ bufferevent_trigger_nolock_(bev, what, options|BEV_TRIG_DEFER_CALLBACKS); } + +static inline int +fatal_error(int err) +{ + switch (err) { + /* We may have already associated this fd with a port. + * Let's hope it's this port, and that the error code + * for doing this neer changes. */ + case ERROR_INVALID_PARAMETER: + return 0; + } + return 1; +} + +static inline struct bufferevent_async * +upcast(struct bufferevent *bev) +{ + struct bufferevent_async *bev_a; + if (!BEV_IS_ASYNC(bev)) + return NULL; + bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev); + return bev_a; +} + +static inline struct bufferevent_async * +upcast_connect(struct event_overlapped *eo) +{ + struct bufferevent_async *bev_a; + bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped); + EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev)); + return bev_a; +} + +static inline struct bufferevent_async * +upcast_read(struct event_overlapped *eo) +{ + struct bufferevent_async *bev_a; + bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, read_overlapped); + EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev)); + return bev_a; +} + +static inline struct bufferevent_async * +upcast_write(struct event_overlapped *eo) +{ + struct bufferevent_async *bev_a; + bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped); + EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev)); + return bev_a; +} + +static void +bev_async_del_write(struct bufferevent_async *beva) +{ + struct bufferevent *bev = &beva->bev.bev; + + if (beva->write_added) { + beva->write_added = 0; + event_base_del_virtual_(bev->ev_base); + } +} + +static void +bev_async_del_read(struct bufferevent_async *beva) +{ + struct bufferevent *bev = &beva->bev.bev; + + if (beva->read_added) { + beva->read_added = 0; + event_base_del_virtual_(bev->ev_base); + } +} + +static void +bev_async_add_write(struct bufferevent_async *beva) +{ + struct bufferevent *bev = &beva->bev.bev; + + if (!beva->write_added) { + beva->write_added = 1; + event_base_add_virtual_(bev->ev_base); + } +} + +static void +bev_async_add_read(struct bufferevent_async *beva) +{ + struct bufferevent *bev = &beva->bev.bev; + + if (!beva->read_added) { + beva->read_added = 1; + event_base_add_virtual_(bev->ev_base); + } +} + +static void +bev_async_consider_writing(struct bufferevent_async *beva) +{ + size_t at_most; + int limit; + struct bufferevent *bev = &beva->bev.bev; + + /* Don't write if there's a write in progress, or we do not + * want to write, or when there's nothing left to write. */ + if (beva->write_in_progress || beva->bev.connecting) + return; + if (!beva->ok || !(bev->enabled&EV_WRITE) || + !evbuffer_get_length(bev->output)) { + bev_async_del_write(beva); + return; + } + + at_most = evbuffer_get_length(bev->output); + + /* This is safe so long as bufferevent_get_write_max never returns + * more than INT_MAX. That's true for now. XXXX */ + limit = (int)bufferevent_get_write_max_(&beva->bev); + if (at_most >= (size_t)limit && limit >= 0) + at_most = limit; + + if (beva->bev.write_suspended) { + bev_async_del_write(beva); + return; + } + + /* XXXX doesn't respect low-water mark very well. */ + bufferevent_incref_(bev); + if (evbuffer_launch_write_(bev->output, at_most, + &beva->write_overlapped)) { + bufferevent_decref_(bev); + beva->ok = 0; + be_async_run_eventcb(bev, BEV_EVENT_ERROR, 0); + } else { + beva->write_in_progress = at_most; + bufferevent_decrement_write_buckets_(&beva->bev, at_most); + bev_async_add_write(beva); + } +} + +static void +bev_async_consider_reading(struct bufferevent_async *beva) +{ + size_t cur_size; + size_t read_high; + size_t at_most; + int limit; + struct bufferevent *bev = &beva->bev.bev; + + /* Don't read if there is a read in progress, or we do not + * want to read. */ + if (beva->read_in_progress || beva->bev.connecting) + return; + if (!beva->ok || !(bev->enabled&EV_READ)) { + bev_async_del_read(beva); + return; + } + + /* Don't read if we're full */ + cur_size = evbuffer_get_length(bev->input); + read_high = bev->wm_read.high; + if (read_high) { + if (cur_size >= read_high) { + bev_async_del_read(beva); + return; + } + at_most = read_high - cur_size; + } else { + at_most = 16384; /* FIXME totally magic. */ + } + + /* XXXX This over-commits. */ + /* XXXX see also not above on cast on bufferevent_get_write_max_() */ + limit = (int)bufferevent_get_read_max_(&beva->bev); + if (at_most >= (size_t)limit && limit >= 0) + at_most = limit; + + if (beva->bev.read_suspended) { + bev_async_del_read(beva); + return; + } + + bufferevent_incref_(bev); + if (evbuffer_launch_read_(bev->input, at_most, &beva->read_overlapped)) { + beva->ok = 0; + be_async_run_eventcb(bev, BEV_EVENT_ERROR, 0); + bufferevent_decref_(bev); + } else { + beva->read_in_progress = at_most; + bufferevent_decrement_read_buckets_(&beva->bev, at_most); + bev_async_add_read(beva); + } + + return; +} + +static void +be_async_outbuf_callback(struct evbuffer *buf, + const struct evbuffer_cb_info *cbinfo, + void *arg) +{ + struct bufferevent *bev = arg; + struct bufferevent_async *bev_async = upcast(bev); + + /* If we added data to the outbuf and were not writing before, + * we may want to write now. */ + + bufferevent_incref_and_lock_(bev); + + if (cbinfo->n_added) + bev_async_consider_writing(bev_async); + + bufferevent_decref_and_unlock_(bev); +} + +static void +be_async_inbuf_callback(struct evbuffer *buf, + const struct evbuffer_cb_info *cbinfo, + void *arg) +{ + struct bufferevent *bev = arg; + struct bufferevent_async *bev_async = upcast(bev); + + /* If we drained data from the inbuf and were not reading before, + * we may want to read now */ + + bufferevent_incref_and_lock_(bev); + + if (cbinfo->n_deleted) + bev_async_consider_reading(bev_async); + + bufferevent_decref_and_unlock_(bev); +} + +static int +be_async_enable(struct bufferevent *buf, short what) +{ + struct bufferevent_async *bev_async = upcast(buf); + + if (!bev_async->ok) + return -1; + + if (bev_async->bev.connecting) { + /* Don't launch anything during connection attempts. */ + return 0; + } + + if (what & EV_READ) + BEV_RESET_GENERIC_READ_TIMEOUT(buf); + if (what & EV_WRITE) + BEV_RESET_GENERIC_WRITE_TIMEOUT(buf); + + /* If we newly enable reading or writing, and we aren't reading or + writing already, consider launching a new read or write. */ + + if (what & EV_READ) + bev_async_consider_reading(bev_async); + if (what & EV_WRITE) + bev_async_consider_writing(bev_async); + return 0; +} + +static int +be_async_disable(struct bufferevent *bev, short what) +{ + struct bufferevent_async *bev_async = upcast(bev); + /* XXXX If we disable reading or writing, we may want to consider + * canceling any in-progress read or write operation, though it might + * not work. */ + + if (what & EV_READ) { + BEV_DEL_GENERIC_READ_TIMEOUT(bev); + bev_async_del_read(bev_async); + } + if (what & EV_WRITE) { + BEV_DEL_GENERIC_WRITE_TIMEOUT(bev); + bev_async_del_write(bev_async); + } + + return 0; +} + +static void +be_async_destruct(struct bufferevent *bev) +{ + struct bufferevent_async *bev_async = upcast(bev); + struct bufferevent_private *bev_p = BEV_UPCAST(bev); + evutil_socket_t fd; + + EVUTIL_ASSERT(!upcast(bev)->write_in_progress && + !upcast(bev)->read_in_progress); + + bev_async_del_read(bev_async); + bev_async_del_write(bev_async); + + fd = evbuffer_overlapped_get_fd_(bev->input); + if (fd != (evutil_socket_t)EVUTIL_INVALID_SOCKET && + (bev_p->options & BEV_OPT_CLOSE_ON_FREE)) { + evutil_closesocket(fd); + evbuffer_overlapped_set_fd_(bev->input, EVUTIL_INVALID_SOCKET); + } +} + +/* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so + * we use WSAGetOverlappedResult to translate. */ +static void +bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo) +{ + DWORD bytes, flags; + evutil_socket_t fd; + + fd = evbuffer_overlapped_get_fd_(bev->input); + WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags); +} + +static int +be_async_flush(struct bufferevent *bev, short what, + enum bufferevent_flush_mode mode) +{ + return 0; +} + +static void +connect_complete(struct event_overlapped *eo, ev_uintptr_t key, + ev_ssize_t nbytes, int ok) +{ + struct bufferevent_async *bev_a = upcast_connect(eo); + struct bufferevent *bev = &bev_a->bev.bev; + evutil_socket_t sock; + + BEV_LOCK(bev); + + EVUTIL_ASSERT(bev_a->bev.connecting); + bev_a->bev.connecting = 0; + sock = evbuffer_overlapped_get_fd_(bev_a->bev.bev.input); + /* XXXX Handle error? */ + setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0); + + if (ok) + bufferevent_async_set_connected_(bev); + else + bev_async_set_wsa_error(bev, eo); + + be_async_run_eventcb(bev, ok ? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR, 0); + + event_base_del_virtual_(bev->ev_base); + + bufferevent_decref_and_unlock_(bev); +} + +static void +read_complete(struct event_overlapped *eo, ev_uintptr_t key, + ev_ssize_t nbytes, int ok) +{ + struct bufferevent_async *bev_a = upcast_read(eo); + struct bufferevent *bev = &bev_a->bev.bev; + short what = BEV_EVENT_READING; + ev_ssize_t amount_unread; + BEV_LOCK(bev); + EVUTIL_ASSERT(bev_a->read_in_progress); + + amount_unread = bev_a->read_in_progress - nbytes; + evbuffer_commit_read_(bev->input, nbytes); + bev_a->read_in_progress = 0; + if (amount_unread) + bufferevent_decrement_read_buckets_(&bev_a->bev, -amount_unread); + + if (!ok) + bev_async_set_wsa_error(bev, eo); + + if (bev_a->ok) { + if (ok && nbytes) { + BEV_RESET_GENERIC_READ_TIMEOUT(bev); + be_async_trigger_nolock(bev, EV_READ, 0); + bev_async_consider_reading(bev_a); + } else if (!ok) { + what |= BEV_EVENT_ERROR; + bev_a->ok = 0; + be_async_run_eventcb(bev, what, 0); + } else if (!nbytes) { + what |= BEV_EVENT_EOF; + bev_a->ok = 0; + be_async_run_eventcb(bev, what, 0); + } + } + + bufferevent_decref_and_unlock_(bev); +} + +static void +write_complete(struct event_overlapped *eo, ev_uintptr_t key, + ev_ssize_t nbytes, int ok) +{ + struct bufferevent_async *bev_a = upcast_write(eo); + struct bufferevent *bev = &bev_a->bev.bev; + short what = BEV_EVENT_WRITING; + ev_ssize_t amount_unwritten; + + BEV_LOCK(bev); + EVUTIL_ASSERT(bev_a->write_in_progress); + + amount_unwritten = bev_a->write_in_progress - nbytes; + evbuffer_commit_write_(bev->output, nbytes); + bev_a->write_in_progress = 0; + + if (amount_unwritten) + bufferevent_decrement_write_buckets_(&bev_a->bev, + -amount_unwritten); + + + if (!ok) + bev_async_set_wsa_error(bev, eo); + + if (bev_a->ok) { + if (ok && nbytes) { + BEV_RESET_GENERIC_WRITE_TIMEOUT(bev); + be_async_trigger_nolock(bev, EV_WRITE, 0); + bev_async_consider_writing(bev_a); + } else if (!ok) { + what |= BEV_EVENT_ERROR; + bev_a->ok = 0; + be_async_run_eventcb(bev, what, 0); + } else if (!nbytes) { + what |= BEV_EVENT_EOF; + bev_a->ok = 0; + be_async_run_eventcb(bev, what, 0); + } + } + + bufferevent_decref_and_unlock_(bev); +} + +struct bufferevent * +bufferevent_async_new_(struct event_base *base, + evutil_socket_t fd, int options) +{ + struct bufferevent_async *bev_a; + struct bufferevent *bev; + struct event_iocp_port *iocp; + + options |= BEV_OPT_THREADSAFE; + + if (!(iocp = event_base_get_iocp_(base))) + return NULL; + + if (fd >= 0 && event_iocp_port_associate_(iocp, fd, 1)<0) { + if (fatal_error(GetLastError())) + return NULL; + } + + if (!(bev_a = mm_calloc(1, sizeof(struct bufferevent_async)))) + return NULL; + + bev = &bev_a->bev.bev; + if (!(bev->input = evbuffer_overlapped_new_(fd))) { + mm_free(bev_a); + return NULL; + } + if (!(bev->output = evbuffer_overlapped_new_(fd))) { + evbuffer_free(bev->input); + mm_free(bev_a); + return NULL; + } + + if (bufferevent_init_common_(&bev_a->bev, base, &bufferevent_ops_async, + options)<0) + goto err; + + evbuffer_add_cb(bev->input, be_async_inbuf_callback, bev); + evbuffer_add_cb(bev->output, be_async_outbuf_callback, bev); + + event_overlapped_init_(&bev_a->connect_overlapped, connect_complete); + event_overlapped_init_(&bev_a->read_overlapped, read_complete); + event_overlapped_init_(&bev_a->write_overlapped, write_complete); + + bufferevent_init_generic_timeout_cbs_(bev); + + bev_a->ok = fd >= 0; + + return bev; +err: + bufferevent_free(&bev_a->bev.bev); + return NULL; +} + +void +bufferevent_async_set_connected_(struct bufferevent *bev) +{ + struct bufferevent_async *bev_async = upcast(bev); + bev_async->ok = 1; + /* Now's a good time to consider reading/writing */ + be_async_enable(bev, bev->enabled); +} + +int +bufferevent_async_can_connect_(struct bufferevent *bev) +{ + const struct win32_extension_fns *ext = + event_get_win32_extension_fns_(); + + if (BEV_IS_ASYNC(bev) && + event_base_get_iocp_(bev->ev_base) && + ext && ext->ConnectEx) + return 1; + + return 0; +} + +int +bufferevent_async_connect_(struct bufferevent *bev, evutil_socket_t fd, + const struct sockaddr *sa, int socklen) +{ + BOOL rc; + struct bufferevent_async *bev_async = upcast(bev); + struct sockaddr_storage ss; + const struct win32_extension_fns *ext = + event_get_win32_extension_fns_(); + + EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL); + + /* ConnectEx() requires that the socket be bound to an address + * with bind() before using, otherwise it will fail. We attempt + * to issue a bind() here, taking into account that the error + * code is set to WSAEINVAL when the socket is already bound. */ + memset(&ss, 0, sizeof(ss)); + if (sa->sa_family == AF_INET) { + struct sockaddr_in *sin = (struct sockaddr_in *)&ss; + sin->sin_family = AF_INET; + sin->sin_addr.s_addr = INADDR_ANY; + } else if (sa->sa_family == AF_INET6) { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss; + sin6->sin6_family = AF_INET6; + sin6->sin6_addr = in6addr_any; + } else { + /* Well, the user will have to bind() */ + return -1; + } + if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 && + WSAGetLastError() != WSAEINVAL) + return -1; + + event_base_add_virtual_(bev->ev_base); + bufferevent_incref_(bev); + rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL, + &bev_async->connect_overlapped.overlapped); + if (rc || WSAGetLastError() == ERROR_IO_PENDING) + return 0; + + event_base_del_virtual_(bev->ev_base); + bufferevent_decref_(bev); + + return -1; +} + +static int +be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op, + union bufferevent_ctrl_data *data) +{ + switch (op) { + case BEV_CTRL_GET_FD: + data->fd = evbuffer_overlapped_get_fd_(bev->input); + return 0; + case BEV_CTRL_SET_FD: { + struct bufferevent_async *bev_a = upcast(bev); + struct event_iocp_port *iocp; + + if (data->fd == evbuffer_overlapped_get_fd_(bev->input)) + return 0; + if (!(iocp = event_base_get_iocp_(bev->ev_base))) + return -1; + if (event_iocp_port_associate_(iocp, data->fd, 1) < 0) { + if (fatal_error(GetLastError())) + return -1; + } + evbuffer_overlapped_set_fd_(bev->input, data->fd); + evbuffer_overlapped_set_fd_(bev->output, data->fd); + bev_a->ok = data->fd >= 0; + return 0; + } + case BEV_CTRL_CANCEL_ALL: { + struct bufferevent_async *bev_a = upcast(bev); + evutil_socket_t fd = evbuffer_overlapped_get_fd_(bev->input); + if (fd != (evutil_socket_t)EVUTIL_INVALID_SOCKET && + (bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) { + closesocket(fd); + evbuffer_overlapped_set_fd_(bev->input, EVUTIL_INVALID_SOCKET); + } + bev_a->ok = 0; + return 0; + } + case BEV_CTRL_GET_UNDERLYING: + default: + return -1; + } +} + + diff --git a/probe-busybox/libevent-2.1.11-stable/bufferevent_filter.c b/probe-busybox/libevent-2.1.11-stable/bufferevent_filter.c new file mode 100644 index 00000000..a7bdeddd --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/bufferevent_filter.c @@ -0,0 +1,625 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * Copyright (c) 2002-2006 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "evconfig-private.h" + +#include + +#include "event2/event-config.h" + +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +#include +#include +#include +#include +#ifdef EVENT__HAVE_STDARG_H +#include +#endif + +#ifdef _WIN32 +#include +#endif + +#include "event2/util.h" +#include "event2/bufferevent.h" +#include "event2/buffer.h" +#include "event2/bufferevent_struct.h" +#include "event2/event.h" +#include "log-internal.h" +#include "mm-internal.h" +#include "bufferevent-internal.h" +#include "util-internal.h" + +/* prototypes */ +static int be_filter_enable(struct bufferevent *, short); +static int be_filter_disable(struct bufferevent *, short); +static void be_filter_unlink(struct bufferevent *); +static void be_filter_destruct(struct bufferevent *); + +static void be_filter_readcb(struct bufferevent *, void *); +static void be_filter_writecb(struct bufferevent *, void *); +static void be_filter_eventcb(struct bufferevent *, short, void *); +static int be_filter_flush(struct bufferevent *bufev, + short iotype, enum bufferevent_flush_mode mode); +static int be_filter_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *); + +static void bufferevent_filtered_inbuf_cb(struct evbuffer *buf, + const struct evbuffer_cb_info *cbinfo, void *arg); + +static void bufferevent_filtered_outbuf_cb(struct evbuffer *buf, + const struct evbuffer_cb_info *info, void *arg); + +struct bufferevent_filtered { + struct bufferevent_private bev; + + /** The bufferevent that we read/write filtered data from/to. */ + struct bufferevent *underlying; + /** A callback on our inbuf to notice somebory removes data */ + struct evbuffer_cb_entry *inbuf_cb; + /** A callback on our outbuf to notice when somebody adds data */ + struct evbuffer_cb_entry *outbuf_cb; + /** True iff we have received an EOF callback from the underlying + * bufferevent. */ + unsigned got_eof; + + /** Function to free context when we're done. */ + void (*free_context)(void *); + /** Input filter */ + bufferevent_filter_cb process_in; + /** Output filter */ + bufferevent_filter_cb process_out; + /** User-supplied argument to the filters. */ + void *context; +}; + +const struct bufferevent_ops bufferevent_ops_filter = { + "filter", + evutil_offsetof(struct bufferevent_filtered, bev.bev), + be_filter_enable, + be_filter_disable, + be_filter_unlink, + be_filter_destruct, + bufferevent_generic_adj_timeouts_, + be_filter_flush, + be_filter_ctrl, +}; + +/* Given a bufferevent that's really the bev filter of a bufferevent_filtered, + * return that bufferevent_filtered. Returns NULL otherwise.*/ +static inline struct bufferevent_filtered * +upcast(struct bufferevent *bev) +{ + struct bufferevent_filtered *bev_f; + if (!BEV_IS_FILTER(bev)) + return NULL; + bev_f = (void*)( ((char*)bev) - + evutil_offsetof(struct bufferevent_filtered, bev.bev)); + EVUTIL_ASSERT(BEV_IS_FILTER(&bev_f->bev.bev)); + return bev_f; +} + +#define downcast(bev_f) (&(bev_f)->bev.bev) + +/** Return 1 iff bevf's underlying bufferevent's output buffer is at or + * over its high watermark such that we should not write to it in a given + * flush mode. */ +static int +be_underlying_writebuf_full(struct bufferevent_filtered *bevf, + enum bufferevent_flush_mode state) +{ + struct bufferevent *u = bevf->underlying; + return state == BEV_NORMAL && + u->wm_write.high && + evbuffer_get_length(u->output) >= u->wm_write.high; +} + +/** Return 1 if our input buffer is at or over its high watermark such that we + * should not write to it in a given flush mode. */ +static int +be_readbuf_full(struct bufferevent_filtered *bevf, + enum bufferevent_flush_mode state) +{ + struct bufferevent *bufev = downcast(bevf); + return state == BEV_NORMAL && + bufev->wm_read.high && + evbuffer_get_length(bufev->input) >= bufev->wm_read.high; +} + + +/* Filter to use when we're created with a NULL filter. */ +static enum bufferevent_filter_result +be_null_filter(struct evbuffer *src, struct evbuffer *dst, ev_ssize_t lim, + enum bufferevent_flush_mode state, void *ctx) +{ + (void)state; + if (evbuffer_remove_buffer(src, dst, lim) >= 0) + return BEV_OK; + else + return BEV_ERROR; +} + +struct bufferevent * +bufferevent_filter_new(struct bufferevent *underlying, + bufferevent_filter_cb input_filter, + bufferevent_filter_cb output_filter, + int options, + void (*free_context)(void *), + void *ctx) +{ + struct bufferevent_filtered *bufev_f; + int tmp_options = options & ~BEV_OPT_THREADSAFE; + + if (!underlying) + return NULL; + + if (!input_filter) + input_filter = be_null_filter; + if (!output_filter) + output_filter = be_null_filter; + + bufev_f = mm_calloc(1, sizeof(struct bufferevent_filtered)); + if (!bufev_f) + return NULL; + + if (bufferevent_init_common_(&bufev_f->bev, underlying->ev_base, + &bufferevent_ops_filter, tmp_options) < 0) { + mm_free(bufev_f); + return NULL; + } + if (options & BEV_OPT_THREADSAFE) { + bufferevent_enable_locking_(downcast(bufev_f), NULL); + } + + bufev_f->underlying = underlying; + + bufev_f->process_in = input_filter; + bufev_f->process_out = output_filter; + bufev_f->free_context = free_context; + bufev_f->context = ctx; + + bufferevent_setcb(bufev_f->underlying, + be_filter_readcb, be_filter_writecb, be_filter_eventcb, bufev_f); + + bufev_f->inbuf_cb = evbuffer_add_cb(downcast(bufev_f)->input, + bufferevent_filtered_inbuf_cb, bufev_f); + evbuffer_cb_clear_flags(downcast(bufev_f)->input, bufev_f->inbuf_cb, + EVBUFFER_CB_ENABLED); + + bufev_f->outbuf_cb = evbuffer_add_cb(downcast(bufev_f)->output, + bufferevent_filtered_outbuf_cb, bufev_f); + + bufferevent_init_generic_timeout_cbs_(downcast(bufev_f)); + bufferevent_incref_(underlying); + + bufferevent_enable(underlying, EV_READ|EV_WRITE); + bufferevent_suspend_read_(underlying, BEV_SUSPEND_FILT_READ); + + return downcast(bufev_f); +} + +static void +be_filter_unlink(struct bufferevent *bev) +{ + struct bufferevent_filtered *bevf = upcast(bev); + EVUTIL_ASSERT(bevf); + + if (bevf->bev.options & BEV_OPT_CLOSE_ON_FREE) { + /* Yes, there is also a decref in bufferevent_decref_. + * That decref corresponds to the incref when we set + * underlying for the first time. This decref is an + * extra one to remove the last reference. + */ + if (BEV_UPCAST(bevf->underlying)->refcnt < 2) { + event_warnx("BEV_OPT_CLOSE_ON_FREE set on an " + "bufferevent with too few references"); + } else { + bufferevent_free(bevf->underlying); + } + } else { + if (bevf->underlying) { + if (bevf->underlying->errorcb == be_filter_eventcb) + bufferevent_setcb(bevf->underlying, + NULL, NULL, NULL, NULL); + bufferevent_unsuspend_read_(bevf->underlying, + BEV_SUSPEND_FILT_READ); + } + } +} + +static void +be_filter_destruct(struct bufferevent *bev) +{ + struct bufferevent_filtered *bevf = upcast(bev); + EVUTIL_ASSERT(bevf); + if (bevf->free_context) + bevf->free_context(bevf->context); + + if (bevf->inbuf_cb) + evbuffer_remove_cb_entry(bev->input, bevf->inbuf_cb); + + if (bevf->outbuf_cb) + evbuffer_remove_cb_entry(bev->output, bevf->outbuf_cb); +} + +static int +be_filter_enable(struct bufferevent *bev, short event) +{ + struct bufferevent_filtered *bevf = upcast(bev); + if (event & EV_WRITE) + BEV_RESET_GENERIC_WRITE_TIMEOUT(bev); + + if (event & EV_READ) { + BEV_RESET_GENERIC_READ_TIMEOUT(bev); + bufferevent_unsuspend_read_(bevf->underlying, + BEV_SUSPEND_FILT_READ); + } + return 0; +} + +static int +be_filter_disable(struct bufferevent *bev, short event) +{ + struct bufferevent_filtered *bevf = upcast(bev); + if (event & EV_WRITE) + BEV_DEL_GENERIC_WRITE_TIMEOUT(bev); + if (event & EV_READ) { + BEV_DEL_GENERIC_READ_TIMEOUT(bev); + bufferevent_suspend_read_(bevf->underlying, + BEV_SUSPEND_FILT_READ); + } + return 0; +} + +static enum bufferevent_filter_result +be_filter_process_input(struct bufferevent_filtered *bevf, + enum bufferevent_flush_mode state, + int *processed_out) +{ + enum bufferevent_filter_result res; + struct bufferevent *bev = downcast(bevf); + + if (state == BEV_NORMAL) { + /* If we're in 'normal' mode, don't urge data on the filter + * unless we're reading data and under our high-water mark.*/ + if (!(bev->enabled & EV_READ) || + be_readbuf_full(bevf, state)) + return BEV_OK; + } + + do { + ev_ssize_t limit = -1; + if (state == BEV_NORMAL && bev->wm_read.high) + limit = bev->wm_read.high - + evbuffer_get_length(bev->input); + + res = bevf->process_in(bevf->underlying->input, + bev->input, limit, state, bevf->context); + + if (res == BEV_OK) + *processed_out = 1; + } while (res == BEV_OK && + (bev->enabled & EV_READ) && + evbuffer_get_length(bevf->underlying->input) && + !be_readbuf_full(bevf, state)); + + if (*processed_out) + BEV_RESET_GENERIC_READ_TIMEOUT(bev); + + return res; +} + + +static enum bufferevent_filter_result +be_filter_process_output(struct bufferevent_filtered *bevf, + enum bufferevent_flush_mode state, + int *processed_out) +{ + /* Requires references and lock: might call writecb */ + enum bufferevent_filter_result res = BEV_OK; + struct bufferevent *bufev = downcast(bevf); + int again = 0; + + if (state == BEV_NORMAL) { + /* If we're in 'normal' mode, don't urge data on the + * filter unless we're writing data, and the underlying + * bufferevent is accepting data, and we have data to + * give the filter. If we're in 'flush' or 'finish', + * call the filter no matter what. */ + if (!(bufev->enabled & EV_WRITE) || + be_underlying_writebuf_full(bevf, state) || + !evbuffer_get_length(bufev->output)) + return BEV_OK; + } + + /* disable the callback that calls this function + when the user adds to the output buffer. */ + evbuffer_cb_clear_flags(bufev->output, bevf->outbuf_cb, + EVBUFFER_CB_ENABLED); + + do { + int processed = 0; + again = 0; + + do { + ev_ssize_t limit = -1; + if (state == BEV_NORMAL && + bevf->underlying->wm_write.high) + limit = bevf->underlying->wm_write.high - + evbuffer_get_length(bevf->underlying->output); + + res = bevf->process_out(downcast(bevf)->output, + bevf->underlying->output, + limit, + state, + bevf->context); + + if (res == BEV_OK) + processed = *processed_out = 1; + } while (/* Stop if the filter wasn't successful...*/ + res == BEV_OK && + /* Or if we aren't writing any more. */ + (bufev->enabled & EV_WRITE) && + /* Of if we have nothing more to write and we are + * not flushing. */ + evbuffer_get_length(bufev->output) && + /* Or if we have filled the underlying output buffer. */ + !be_underlying_writebuf_full(bevf,state)); + + if (processed) { + /* call the write callback.*/ + bufferevent_trigger_nolock_(bufev, EV_WRITE, 0); + + if (res == BEV_OK && + (bufev->enabled & EV_WRITE) && + evbuffer_get_length(bufev->output) && + !be_underlying_writebuf_full(bevf, state)) { + again = 1; + } + } + } while (again); + + /* reenable the outbuf_cb */ + evbuffer_cb_set_flags(bufev->output,bevf->outbuf_cb, + EVBUFFER_CB_ENABLED); + + if (*processed_out) + BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev); + + return res; +} + +/* Called when the size of our outbuf changes. */ +static void +bufferevent_filtered_outbuf_cb(struct evbuffer *buf, + const struct evbuffer_cb_info *cbinfo, void *arg) +{ + struct bufferevent_filtered *bevf = arg; + struct bufferevent *bev = downcast(bevf); + + if (cbinfo->n_added) { + int processed_any = 0; + /* Somebody added more data to the output buffer. Try to + * process it, if we should. */ + bufferevent_incref_and_lock_(bev); + be_filter_process_output(bevf, BEV_NORMAL, &processed_any); + bufferevent_decref_and_unlock_(bev); + } +} + +static void +be_filter_read_nolock_(struct bufferevent *underlying, void *me_) +{ + struct bufferevent_filtered *bevf = me_; + enum bufferevent_filter_result res; + enum bufferevent_flush_mode state; + struct bufferevent *bufev = downcast(bevf); + struct bufferevent_private *bufev_private = BEV_UPCAST(bufev); + int processed_any = 0; + + // It's possible our refcount is 0 at this point if another thread free'd our filterevent + EVUTIL_ASSERT(bufev_private->refcnt >= 0); + + // If our refcount is > 0 + if (bufev_private->refcnt > 0) { + + if (bevf->got_eof) + state = BEV_FINISHED; + else + state = BEV_NORMAL; + + /* XXXX use return value */ + res = be_filter_process_input(bevf, state, &processed_any); + (void)res; + + /* XXX This should be in process_input, not here. There are + * other places that can call process-input, and they should + * force readcb calls as needed. */ + if (processed_any) { + bufferevent_trigger_nolock_(bufev, EV_READ, 0); + if (evbuffer_get_length(underlying->input) > 0 && + be_readbuf_full(bevf, state)) { + /* data left in underlying buffer and filter input buffer + * hit its read high watermark. + * Schedule callback to avoid data gets stuck in underlying + * input buffer. + */ + evbuffer_cb_set_flags(bufev->input, bevf->inbuf_cb, + EVBUFFER_CB_ENABLED); + } + } + } +} + +/* Called when the size of our inbuf changes. */ +static void +bufferevent_filtered_inbuf_cb(struct evbuffer *buf, + const struct evbuffer_cb_info *cbinfo, void *arg) +{ + struct bufferevent_filtered *bevf = arg; + enum bufferevent_flush_mode state; + struct bufferevent *bev = downcast(bevf); + + BEV_LOCK(bev); + + if (bevf->got_eof) + state = BEV_FINISHED; + else + state = BEV_NORMAL; + + + if (!be_readbuf_full(bevf, state)) { + /* opportunity to read data which was left in underlying + * input buffer because filter input buffer hit read + * high watermark. + */ + evbuffer_cb_clear_flags(bev->input, bevf->inbuf_cb, + EVBUFFER_CB_ENABLED); + if (evbuffer_get_length(bevf->underlying->input) > 0) + be_filter_read_nolock_(bevf->underlying, bevf); + } + + BEV_UNLOCK(bev); +} + +/* Called when the underlying socket has read. */ +static void +be_filter_readcb(struct bufferevent *underlying, void *me_) +{ + struct bufferevent_filtered *bevf = me_; + struct bufferevent *bev = downcast(bevf); + + BEV_LOCK(bev); + + be_filter_read_nolock_(underlying, me_); + + BEV_UNLOCK(bev); +} + +/* Called when the underlying socket has drained enough that we can write to + it. */ +static void +be_filter_writecb(struct bufferevent *underlying, void *me_) +{ + struct bufferevent_filtered *bevf = me_; + struct bufferevent *bev = downcast(bevf); + struct bufferevent_private *bufev_private = BEV_UPCAST(bev); + int processed_any = 0; + + BEV_LOCK(bev); + + // It's possible our refcount is 0 at this point if another thread free'd our filterevent + EVUTIL_ASSERT(bufev_private->refcnt >= 0); + + // If our refcount is > 0 + if (bufev_private->refcnt > 0) { + be_filter_process_output(bevf, BEV_NORMAL, &processed_any); + } + + BEV_UNLOCK(bev); +} + +/* Called when the underlying socket has given us an error */ +static void +be_filter_eventcb(struct bufferevent *underlying, short what, void *me_) +{ + struct bufferevent_filtered *bevf = me_; + struct bufferevent *bev = downcast(bevf); + struct bufferevent_private *bufev_private = BEV_UPCAST(bev); + + BEV_LOCK(bev); + + // It's possible our refcount is 0 at this point if another thread free'd our filterevent + EVUTIL_ASSERT(bufev_private->refcnt >= 0); + + // If our refcount is > 0 + if (bufev_private->refcnt > 0) { + + /* All we can really to is tell our own eventcb. */ + bufferevent_run_eventcb_(bev, what, 0); + } + + BEV_UNLOCK(bev); +} + +static int +be_filter_flush(struct bufferevent *bufev, + short iotype, enum bufferevent_flush_mode mode) +{ + struct bufferevent_filtered *bevf = upcast(bufev); + int processed_any = 0; + EVUTIL_ASSERT(bevf); + + bufferevent_incref_and_lock_(bufev); + + if (iotype & EV_READ) { + be_filter_process_input(bevf, mode, &processed_any); + } + if (iotype & EV_WRITE) { + be_filter_process_output(bevf, mode, &processed_any); + } + /* XXX check the return value? */ + /* XXX does this want to recursively call lower-level flushes? */ + bufferevent_flush(bevf->underlying, iotype, mode); + + bufferevent_decref_and_unlock_(bufev); + + return processed_any; +} + +static int +be_filter_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op, + union bufferevent_ctrl_data *data) +{ + struct bufferevent_filtered *bevf; + switch (op) { + case BEV_CTRL_GET_UNDERLYING: + bevf = upcast(bev); + data->ptr = bevf->underlying; + return 0; + case BEV_CTRL_SET_FD: + case BEV_CTRL_GET_FD: + bevf = upcast(bev); + + if (bevf->underlying && + bevf->underlying->be_ops && + bevf->underlying->be_ops->ctrl) { + return (bevf->underlying->be_ops->ctrl)(bevf->underlying, op, data); + } + EVUTIL_FALLTHROUGH; + + case BEV_CTRL_CANCEL_ALL: + EVUTIL_FALLTHROUGH; + default: + return -1; + } + + return -1; +} diff --git a/probe-busybox/libevent-2.1.11-stable/bufferevent_openssl.c b/probe-busybox/libevent-2.1.11-stable/bufferevent_openssl.c new file mode 100644 index 00000000..b51b834b --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/bufferevent_openssl.c @@ -0,0 +1,1524 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +// Get rid of OSX 10.7 and greater deprecation warnings. +#if defined(__APPLE__) && defined(__clang__) +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include + +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +#include +#include +#include +#include +#ifdef EVENT__HAVE_STDARG_H +#include +#endif +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif + +#ifdef _WIN32 +#include +#endif + +#include "event2/bufferevent.h" +#include "event2/bufferevent_struct.h" +#include "event2/bufferevent_ssl.h" +#include "event2/buffer.h" +#include "event2/event.h" + +#include "mm-internal.h" +#include "bufferevent-internal.h" +#include "log-internal.h" + +#include +#include +#include "openssl-compat.h" + +/* + * Define an OpenSSL bio that targets a bufferevent. + */ + +/* -------------------- + A BIO is an OpenSSL abstraction that handles reading and writing data. The + library will happily speak SSL over anything that implements a BIO + interface. + + Here we define a BIO implementation that directs its output to a + bufferevent. We'll want to use this only when none of OpenSSL's built-in + IO mechanisms work for us. + -------------------- */ + +/* every BIO type needs its own integer type value. */ +#define BIO_TYPE_LIBEVENT 57 +/* ???? Arguably, we should set BIO_TYPE_FILTER or BIO_TYPE_SOURCE_SINK on + * this. */ + +#if 0 +static void +print_err(int val) +{ + int err; + printf("Error was %d\n", val); + + while ((err = ERR_get_error())) { + const char *msg = (const char*)ERR_reason_error_string(err); + const char *lib = (const char*)ERR_lib_error_string(err); + const char *func = (const char*)ERR_func_error_string(err); + + printf("%s in %s %s\n", msg, lib, func); + } +} +#else +#define print_err(v) ((void)0) +#endif + +/* Called to initialize a new BIO */ +static int +bio_bufferevent_new(BIO *b) +{ + BIO_set_init(b, 0); + BIO_set_data(b, NULL); /* We'll be putting the bufferevent in this field.*/ + return 1; +} + +/* Called to uninitialize the BIO. */ +static int +bio_bufferevent_free(BIO *b) +{ + if (!b) + return 0; + if (BIO_get_shutdown(b)) { + if (BIO_get_init(b) && BIO_get_data(b)) + bufferevent_free(BIO_get_data(b)); + BIO_free(b); + } + return 1; +} + +/* Called to extract data from the BIO. */ +static int +bio_bufferevent_read(BIO *b, char *out, int outlen) +{ + int r = 0; + struct evbuffer *input; + + BIO_clear_retry_flags(b); + + if (!out) + return 0; + if (!BIO_get_data(b)) + return -1; + + input = bufferevent_get_input(BIO_get_data(b)); + if (evbuffer_get_length(input) == 0) { + /* If there's no data to read, say so. */ + BIO_set_retry_read(b); + return -1; + } else { + r = evbuffer_remove(input, out, outlen); + } + + return r; +} + +/* Called to write data into the BIO */ +static int +bio_bufferevent_write(BIO *b, const char *in, int inlen) +{ + struct bufferevent *bufev = BIO_get_data(b); + struct evbuffer *output; + size_t outlen; + + BIO_clear_retry_flags(b); + + if (!BIO_get_data(b)) + return -1; + + output = bufferevent_get_output(bufev); + outlen = evbuffer_get_length(output); + + /* Copy only as much data onto the output buffer as can fit under the + * high-water mark. */ + if (bufev->wm_write.high && bufev->wm_write.high <= (outlen+inlen)) { + if (bufev->wm_write.high <= outlen) { + /* If no data can fit, we'll need to retry later. */ + BIO_set_retry_write(b); + return -1; + } + inlen = bufev->wm_write.high - outlen; + } + + EVUTIL_ASSERT(inlen > 0); + evbuffer_add(output, in, inlen); + return inlen; +} + +/* Called to handle various requests */ +static long +bio_bufferevent_ctrl(BIO *b, int cmd, long num, void *ptr) +{ + struct bufferevent *bufev = BIO_get_data(b); + long ret = 1; + + switch (cmd) { + case BIO_CTRL_GET_CLOSE: + ret = BIO_get_shutdown(b); + break; + case BIO_CTRL_SET_CLOSE: + BIO_set_shutdown(b, (int)num); + break; + case BIO_CTRL_PENDING: + ret = evbuffer_get_length(bufferevent_get_input(bufev)) != 0; + break; + case BIO_CTRL_WPENDING: + ret = evbuffer_get_length(bufferevent_get_output(bufev)) != 0; + break; + /* XXXX These two are given a special-case treatment because + * of cargo-cultism. I should come up with a better reason. */ + case BIO_CTRL_DUP: + case BIO_CTRL_FLUSH: + ret = 1; + break; + default: + ret = 0; + break; + } + return ret; +} + +/* Called to write a string to the BIO */ +static int +bio_bufferevent_puts(BIO *b, const char *s) +{ + return bio_bufferevent_write(b, s, strlen(s)); +} + +/* Method table for the bufferevent BIO */ +static BIO_METHOD *methods_bufferevent; + +/* Return the method table for the bufferevents BIO */ +static BIO_METHOD * +BIO_s_bufferevent(void) +{ + if (methods_bufferevent == NULL) { + methods_bufferevent = BIO_meth_new(BIO_TYPE_LIBEVENT, "bufferevent"); + if (methods_bufferevent == NULL) + return NULL; + BIO_meth_set_write(methods_bufferevent, bio_bufferevent_write); + BIO_meth_set_read(methods_bufferevent, bio_bufferevent_read); + BIO_meth_set_puts(methods_bufferevent, bio_bufferevent_puts); + BIO_meth_set_ctrl(methods_bufferevent, bio_bufferevent_ctrl); + BIO_meth_set_create(methods_bufferevent, bio_bufferevent_new); + BIO_meth_set_destroy(methods_bufferevent, bio_bufferevent_free); + } + return methods_bufferevent; +} + +/* Create a new BIO to wrap communication around a bufferevent. If close_flag + * is true, the bufferevent will be freed when the BIO is closed. */ +static BIO * +BIO_new_bufferevent(struct bufferevent *bufferevent) +{ + BIO *result; + if (!bufferevent) + return NULL; + if (!(result = BIO_new(BIO_s_bufferevent()))) + return NULL; + BIO_set_init(result, 1); + BIO_set_data(result, bufferevent); + /* We don't tell the BIO to close the bufferevent; we do it ourselves on + * be_openssl_destruct() */ + BIO_set_shutdown(result, 0); + return result; +} + +/* -------------------- + Now, here's the OpenSSL-based implementation of bufferevent. + + The implementation comes in two flavors: one that connects its SSL object + to an underlying bufferevent using a BIO_bufferevent, and one that has the + SSL object connect to a socket directly. The latter should generally be + faster, except on Windows, where your best bet is using a + bufferevent_async. + + (OpenSSL supports many other BIO types, too. But we can't use any unless + we have a good way to get notified when they become readable/writable.) + -------------------- */ + +struct bio_data_counts { + unsigned long n_written; + unsigned long n_read; +}; + +struct bufferevent_openssl { + /* Shared fields with common bufferevent implementation code. + If we were set up with an underlying bufferevent, we use the + events here as timers only. If we have an SSL, then we use + the events as socket events. + */ + struct bufferevent_private bev; + /* An underlying bufferevent that we're directing our output to. + If it's NULL, then we're connected to an fd, not an evbuffer. */ + struct bufferevent *underlying; + /* The SSL object doing our encryption. */ + SSL *ssl; + + /* A callback that's invoked when data arrives on our outbuf so we + know to write data to the SSL. */ + struct evbuffer_cb_entry *outbuf_cb; + + /* A count of how much data the bios have read/written total. Used + for rate-limiting. */ + struct bio_data_counts counts; + + /* If this value is greater than 0, then the last SSL_write blocked, + * and we need to try it again with this many bytes. */ + ev_ssize_t last_write; + +#define NUM_ERRORS 3 + ev_uint32_t errors[NUM_ERRORS]; + + /* When we next get available space, we should say "read" instead of + "write". This can happen if there's a renegotiation during a read + operation. */ + unsigned read_blocked_on_write : 1; + /* When we next get data, we should say "write" instead of "read". */ + unsigned write_blocked_on_read : 1; + /* Treat TCP close before SSL close on SSL >= v3 as clean EOF. */ + unsigned allow_dirty_shutdown : 1; + /* XXX */ + unsigned n_errors : 2; + + /* Are we currently connecting, accepting, or doing IO? */ + unsigned state : 2; + /* If we reset fd, we sould reset state too */ + unsigned old_state : 2; +}; + +static int be_openssl_enable(struct bufferevent *, short); +static int be_openssl_disable(struct bufferevent *, short); +static void be_openssl_unlink(struct bufferevent *); +static void be_openssl_destruct(struct bufferevent *); +static int be_openssl_adj_timeouts(struct bufferevent *); +static int be_openssl_flush(struct bufferevent *bufev, + short iotype, enum bufferevent_flush_mode mode); +static int be_openssl_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *); + +const struct bufferevent_ops bufferevent_ops_openssl = { + "ssl", + evutil_offsetof(struct bufferevent_openssl, bev.bev), + be_openssl_enable, + be_openssl_disable, + be_openssl_unlink, + be_openssl_destruct, + be_openssl_adj_timeouts, + be_openssl_flush, + be_openssl_ctrl, +}; + +/* Given a bufferevent, return a pointer to the bufferevent_openssl that + * contains it, if any. */ +static inline struct bufferevent_openssl * +upcast(struct bufferevent *bev) +{ + struct bufferevent_openssl *bev_o; + if (!BEV_IS_OPENSSL(bev)) + return NULL; + bev_o = (void*)( ((char*)bev) - + evutil_offsetof(struct bufferevent_openssl, bev.bev)); + EVUTIL_ASSERT(BEV_IS_OPENSSL(&bev_o->bev.bev)); + return bev_o; +} + +static inline void +put_error(struct bufferevent_openssl *bev_ssl, unsigned long err) +{ + if (bev_ssl->n_errors == NUM_ERRORS) + return; + /* The error type according to openssl is "unsigned long", but + openssl never uses more than 32 bits of it. It _can't_ use more + than 32 bits of it, since it needs to report errors on systems + where long is only 32 bits. + */ + bev_ssl->errors[bev_ssl->n_errors++] = (ev_uint32_t) err; +} + +/* Have the base communications channel (either the underlying bufferevent or + * ev_read and ev_write) start reading. Take the read-blocked-on-write flag + * into account. */ +static int +start_reading(struct bufferevent_openssl *bev_ssl) +{ + if (bev_ssl->underlying) { + bufferevent_unsuspend_read_(bev_ssl->underlying, + BEV_SUSPEND_FILT_READ); + return 0; + } else { + struct bufferevent *bev = &bev_ssl->bev.bev; + int r; + r = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read); + if (r == 0 && bev_ssl->read_blocked_on_write) + r = bufferevent_add_event_(&bev->ev_write, + &bev->timeout_write); + return r; + } +} + +/* Have the base communications channel (either the underlying bufferevent or + * ev_read and ev_write) start writing. Take the write-blocked-on-read flag + * into account. */ +static int +start_writing(struct bufferevent_openssl *bev_ssl) +{ + int r = 0; + if (bev_ssl->underlying) { + if (bev_ssl->write_blocked_on_read) { + bufferevent_unsuspend_read_(bev_ssl->underlying, + BEV_SUSPEND_FILT_READ); + } + } else { + struct bufferevent *bev = &bev_ssl->bev.bev; + r = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write); + if (!r && bev_ssl->write_blocked_on_read) + r = bufferevent_add_event_(&bev->ev_read, + &bev->timeout_read); + } + return r; +} + +static void +stop_reading(struct bufferevent_openssl *bev_ssl) +{ + if (bev_ssl->write_blocked_on_read) + return; + if (bev_ssl->underlying) { + bufferevent_suspend_read_(bev_ssl->underlying, + BEV_SUSPEND_FILT_READ); + } else { + struct bufferevent *bev = &bev_ssl->bev.bev; + event_del(&bev->ev_read); + } +} + +static void +stop_writing(struct bufferevent_openssl *bev_ssl) +{ + if (bev_ssl->read_blocked_on_write) + return; + if (bev_ssl->underlying) { + bufferevent_unsuspend_read_(bev_ssl->underlying, + BEV_SUSPEND_FILT_READ); + } else { + struct bufferevent *bev = &bev_ssl->bev.bev; + event_del(&bev->ev_write); + } +} + +static int +set_rbow(struct bufferevent_openssl *bev_ssl) +{ + if (!bev_ssl->underlying) + stop_reading(bev_ssl); + bev_ssl->read_blocked_on_write = 1; + return start_writing(bev_ssl); +} + +static int +set_wbor(struct bufferevent_openssl *bev_ssl) +{ + if (!bev_ssl->underlying) + stop_writing(bev_ssl); + bev_ssl->write_blocked_on_read = 1; + return start_reading(bev_ssl); +} + +static int +clear_rbow(struct bufferevent_openssl *bev_ssl) +{ + struct bufferevent *bev = &bev_ssl->bev.bev; + int r = 0; + bev_ssl->read_blocked_on_write = 0; + if (!(bev->enabled & EV_WRITE)) + stop_writing(bev_ssl); + if (bev->enabled & EV_READ) + r = start_reading(bev_ssl); + return r; +} + + +static int +clear_wbor(struct bufferevent_openssl *bev_ssl) +{ + struct bufferevent *bev = &bev_ssl->bev.bev; + int r = 0; + bev_ssl->write_blocked_on_read = 0; + if (!(bev->enabled & EV_READ)) + stop_reading(bev_ssl); + if (bev->enabled & EV_WRITE) + r = start_writing(bev_ssl); + return r; +} + +static void +conn_closed(struct bufferevent_openssl *bev_ssl, int when, int errcode, int ret) +{ + int event = BEV_EVENT_ERROR; + int dirty_shutdown = 0; + unsigned long err; + + switch (errcode) { + case SSL_ERROR_ZERO_RETURN: + /* Possibly a clean shutdown. */ + if (SSL_get_shutdown(bev_ssl->ssl) & SSL_RECEIVED_SHUTDOWN) + event = BEV_EVENT_EOF; + else + dirty_shutdown = 1; + break; + case SSL_ERROR_SYSCALL: + /* IO error; possibly a dirty shutdown. */ + if ((ret == 0 || ret == -1) && ERR_peek_error() == 0) + dirty_shutdown = 1; + put_error(bev_ssl, errcode); + break; + case SSL_ERROR_SSL: + /* Protocol error. */ + put_error(bev_ssl, errcode); + break; + case SSL_ERROR_WANT_X509_LOOKUP: + /* XXXX handle this. */ + put_error(bev_ssl, errcode); + break; + case SSL_ERROR_NONE: + case SSL_ERROR_WANT_READ: + case SSL_ERROR_WANT_WRITE: + case SSL_ERROR_WANT_CONNECT: + case SSL_ERROR_WANT_ACCEPT: + default: + /* should be impossible; treat as normal error. */ + event_warnx("BUG: Unexpected OpenSSL error code %d", errcode); + break; + } + + while ((err = ERR_get_error())) { + put_error(bev_ssl, err); + } + + if (dirty_shutdown && bev_ssl->allow_dirty_shutdown) + event = BEV_EVENT_EOF; + + stop_reading(bev_ssl); + stop_writing(bev_ssl); + + /* when is BEV_EVENT_{READING|WRITING} */ + event = when | event; + bufferevent_run_eventcb_(&bev_ssl->bev.bev, event, 0); +} + +static void +init_bio_counts(struct bufferevent_openssl *bev_ssl) +{ + BIO *rbio, *wbio; + + wbio = SSL_get_wbio(bev_ssl->ssl); + bev_ssl->counts.n_written = wbio ? BIO_number_written(wbio) : 0; + rbio = SSL_get_rbio(bev_ssl->ssl); + bev_ssl->counts.n_read = rbio ? BIO_number_read(rbio) : 0; +} + +static inline void +decrement_buckets(struct bufferevent_openssl *bev_ssl) +{ + unsigned long num_w = BIO_number_written(SSL_get_wbio(bev_ssl->ssl)); + unsigned long num_r = BIO_number_read(SSL_get_rbio(bev_ssl->ssl)); + /* These next two subtractions can wrap around. That's okay. */ + unsigned long w = num_w - bev_ssl->counts.n_written; + unsigned long r = num_r - bev_ssl->counts.n_read; + if (w) + bufferevent_decrement_write_buckets_(&bev_ssl->bev, w); + if (r) + bufferevent_decrement_read_buckets_(&bev_ssl->bev, r); + bev_ssl->counts.n_written = num_w; + bev_ssl->counts.n_read = num_r; +} + +#define OP_MADE_PROGRESS 1 +#define OP_BLOCKED 2 +#define OP_ERR 4 + +/* Return a bitmask of OP_MADE_PROGRESS (if we read anything); OP_BLOCKED (if + we're now blocked); and OP_ERR (if an error occurred). */ +static int +do_read(struct bufferevent_openssl *bev_ssl, int n_to_read) { + /* Requires lock */ + struct bufferevent *bev = &bev_ssl->bev.bev; + struct evbuffer *input = bev->input; + int r, n, i, n_used = 0, atmost; + struct evbuffer_iovec space[2]; + int result = 0; + + if (bev_ssl->bev.read_suspended) + return 0; + + atmost = bufferevent_get_read_max_(&bev_ssl->bev); + if (n_to_read > atmost) + n_to_read = atmost; + + n = evbuffer_reserve_space(input, n_to_read, space, 2); + if (n < 0) + return OP_ERR; + + for (i=0; ibev.read_suspended) + break; + ERR_clear_error(); + r = SSL_read(bev_ssl->ssl, space[i].iov_base, space[i].iov_len); + if (r>0) { + result |= OP_MADE_PROGRESS; + if (bev_ssl->read_blocked_on_write) + if (clear_rbow(bev_ssl) < 0) + return OP_ERR | result; + ++n_used; + space[i].iov_len = r; + decrement_buckets(bev_ssl); + } else { + int err = SSL_get_error(bev_ssl->ssl, r); + print_err(err); + switch (err) { + case SSL_ERROR_WANT_READ: + /* Can't read until underlying has more data. */ + if (bev_ssl->read_blocked_on_write) + if (clear_rbow(bev_ssl) < 0) + return OP_ERR | result; + break; + case SSL_ERROR_WANT_WRITE: + /* This read operation requires a write, and the + * underlying is full */ + if (!bev_ssl->read_blocked_on_write) + if (set_rbow(bev_ssl) < 0) + return OP_ERR | result; + break; + default: + conn_closed(bev_ssl, BEV_EVENT_READING, err, r); + break; + } + result |= OP_BLOCKED; + break; /* out of the loop */ + } + } + + if (n_used) { + evbuffer_commit_space(input, space, n_used); + if (bev_ssl->underlying) + BEV_RESET_GENERIC_READ_TIMEOUT(bev); + } + + return result; +} + +/* Return a bitmask of OP_MADE_PROGRESS (if we wrote anything); OP_BLOCKED (if + we're now blocked); and OP_ERR (if an error occurred). */ +static int +do_write(struct bufferevent_openssl *bev_ssl, int atmost) +{ + int i, r, n, n_written = 0; + struct bufferevent *bev = &bev_ssl->bev.bev; + struct evbuffer *output = bev->output; + struct evbuffer_iovec space[8]; + int result = 0; + + if (bev_ssl->last_write > 0) + atmost = bev_ssl->last_write; + else + atmost = bufferevent_get_write_max_(&bev_ssl->bev); + + n = evbuffer_peek(output, atmost, NULL, space, 8); + if (n < 0) + return OP_ERR | result; + + if (n > 8) + n = 8; + for (i=0; i < n; ++i) { + if (bev_ssl->bev.write_suspended) + break; + + /* SSL_write will (reasonably) return 0 if we tell it to + send 0 data. Skip this case so we don't interpret the + result as an error */ + if (space[i].iov_len == 0) + continue; + + ERR_clear_error(); + r = SSL_write(bev_ssl->ssl, space[i].iov_base, + space[i].iov_len); + if (r > 0) { + result |= OP_MADE_PROGRESS; + if (bev_ssl->write_blocked_on_read) + if (clear_wbor(bev_ssl) < 0) + return OP_ERR | result; + n_written += r; + bev_ssl->last_write = -1; + decrement_buckets(bev_ssl); + } else { + int err = SSL_get_error(bev_ssl->ssl, r); + print_err(err); + switch (err) { + case SSL_ERROR_WANT_WRITE: + /* Can't read until underlying has more data. */ + if (bev_ssl->write_blocked_on_read) + if (clear_wbor(bev_ssl) < 0) + return OP_ERR | result; + bev_ssl->last_write = space[i].iov_len; + break; + case SSL_ERROR_WANT_READ: + /* This read operation requires a write, and the + * underlying is full */ + if (!bev_ssl->write_blocked_on_read) + if (set_wbor(bev_ssl) < 0) + return OP_ERR | result; + bev_ssl->last_write = space[i].iov_len; + break; + default: + conn_closed(bev_ssl, BEV_EVENT_WRITING, err, r); + bev_ssl->last_write = -1; + break; + } + result |= OP_BLOCKED; + break; + } + } + if (n_written) { + evbuffer_drain(output, n_written); + if (bev_ssl->underlying) + BEV_RESET_GENERIC_WRITE_TIMEOUT(bev); + + bufferevent_trigger_nolock_(bev, EV_WRITE, BEV_OPT_DEFER_CALLBACKS); + } + return result; +} + +#define WRITE_FRAME 15000 + +#define READ_DEFAULT 4096 + +/* Try to figure out how many bytes to read; return 0 if we shouldn't be + * reading. */ +static int +bytes_to_read(struct bufferevent_openssl *bev) +{ + struct evbuffer *input = bev->bev.bev.input; + struct event_watermark *wm = &bev->bev.bev.wm_read; + int result = READ_DEFAULT; + ev_ssize_t limit; + /* XXX 99% of this is generic code that nearly all bufferevents will + * want. */ + + if (bev->write_blocked_on_read) { + return 0; + } + + if (! (bev->bev.bev.enabled & EV_READ)) { + return 0; + } + + if (bev->bev.read_suspended) { + return 0; + } + + if (wm->high) { + if (evbuffer_get_length(input) >= wm->high) { + return 0; + } + + result = wm->high - evbuffer_get_length(input); + } else { + result = READ_DEFAULT; + } + + /* Respect the rate limit */ + limit = bufferevent_get_read_max_(&bev->bev); + if (result > limit) { + result = limit; + } + + return result; +} + + +/* Things look readable. If write is blocked on read, write till it isn't. + * Read from the underlying buffer until we block or we hit our high-water + * mark. + */ +static void +consider_reading(struct bufferevent_openssl *bev_ssl) +{ + int r; + int n_to_read; + int all_result_flags = 0; + + while (bev_ssl->write_blocked_on_read) { + r = do_write(bev_ssl, WRITE_FRAME); + if (r & (OP_BLOCKED|OP_ERR)) + break; + } + if (bev_ssl->write_blocked_on_read) + return; + + n_to_read = bytes_to_read(bev_ssl); + + while (n_to_read) { + r = do_read(bev_ssl, n_to_read); + all_result_flags |= r; + + if (r & (OP_BLOCKED|OP_ERR)) + break; + + if (bev_ssl->bev.read_suspended) + break; + + /* Read all pending data. This won't hit the network + * again, and will (most importantly) put us in a state + * where we don't need to read anything else until the + * socket is readable again. It'll potentially make us + * overrun our read high-watermark (somewhat + * regrettable). The damage to the rate-limit has + * already been done, since OpenSSL went and read a + * whole SSL record anyway. */ + n_to_read = SSL_pending(bev_ssl->ssl); + + /* XXX This if statement is actually a bad bug, added to avoid + * XXX a worse bug. + * + * The bad bug: It can potentially cause resource unfairness + * by reading too much data from the underlying bufferevent; + * it can potentially cause read looping if the underlying + * bufferevent is a bufferevent_pair and deferred callbacks + * aren't used. + * + * The worse bug: If we didn't do this, then we would + * potentially not read any more from bev_ssl->underlying + * until more data arrived there, which could lead to us + * waiting forever. + */ + if (!n_to_read && bev_ssl->underlying) + n_to_read = bytes_to_read(bev_ssl); + } + + if (all_result_flags & OP_MADE_PROGRESS) { + struct bufferevent *bev = &bev_ssl->bev.bev; + + bufferevent_trigger_nolock_(bev, EV_READ, 0); + } + + if (!bev_ssl->underlying) { + /* Should be redundant, but let's avoid busy-looping */ + if (bev_ssl->bev.read_suspended || + !(bev_ssl->bev.bev.enabled & EV_READ)) { + event_del(&bev_ssl->bev.bev.ev_read); + } + } +} + +static void +consider_writing(struct bufferevent_openssl *bev_ssl) +{ + int r; + struct evbuffer *output = bev_ssl->bev.bev.output; + struct evbuffer *target = NULL; + struct event_watermark *wm = NULL; + + while (bev_ssl->read_blocked_on_write) { + r = do_read(bev_ssl, 1024); /* XXXX 1024 is a hack */ + if (r & OP_MADE_PROGRESS) { + struct bufferevent *bev = &bev_ssl->bev.bev; + + bufferevent_trigger_nolock_(bev, EV_READ, 0); + } + if (r & (OP_ERR|OP_BLOCKED)) + break; + } + if (bev_ssl->read_blocked_on_write) + return; + if (bev_ssl->underlying) { + target = bev_ssl->underlying->output; + wm = &bev_ssl->underlying->wm_write; + } + while ((bev_ssl->bev.bev.enabled & EV_WRITE) && + (! bev_ssl->bev.write_suspended) && + evbuffer_get_length(output) && + (!target || (! wm->high || evbuffer_get_length(target) < wm->high))) { + int n_to_write; + if (wm && wm->high) + n_to_write = wm->high - evbuffer_get_length(target); + else + n_to_write = WRITE_FRAME; + r = do_write(bev_ssl, n_to_write); + if (r & (OP_BLOCKED|OP_ERR)) + break; + } + + if (!bev_ssl->underlying) { + if (evbuffer_get_length(output) == 0) { + event_del(&bev_ssl->bev.bev.ev_write); + } else if (bev_ssl->bev.write_suspended || + !(bev_ssl->bev.bev.enabled & EV_WRITE)) { + /* Should be redundant, but let's avoid busy-looping */ + event_del(&bev_ssl->bev.bev.ev_write); + } + } +} + +static void +be_openssl_readcb(struct bufferevent *bev_base, void *ctx) +{ + struct bufferevent_openssl *bev_ssl = ctx; + consider_reading(bev_ssl); +} + +static void +be_openssl_writecb(struct bufferevent *bev_base, void *ctx) +{ + struct bufferevent_openssl *bev_ssl = ctx; + consider_writing(bev_ssl); +} + +static void +be_openssl_eventcb(struct bufferevent *bev_base, short what, void *ctx) +{ + struct bufferevent_openssl *bev_ssl = ctx; + int event = 0; + + if (what & BEV_EVENT_EOF) { + if (bev_ssl->allow_dirty_shutdown) + event = BEV_EVENT_EOF; + else + event = BEV_EVENT_ERROR; + } else if (what & BEV_EVENT_TIMEOUT) { + /* We sure didn't set this. Propagate it to the user. */ + event = what; + } else if (what & BEV_EVENT_ERROR) { + /* An error occurred on the connection. Propagate it to the user. */ + event = what; + } else if (what & BEV_EVENT_CONNECTED) { + /* Ignore it. We're saying SSL_connect() already, which will + eat it. */ + } + if (event) + bufferevent_run_eventcb_(&bev_ssl->bev.bev, event, 0); +} + +static void +be_openssl_readeventcb(evutil_socket_t fd, short what, void *ptr) +{ + struct bufferevent_openssl *bev_ssl = ptr; + bufferevent_incref_and_lock_(&bev_ssl->bev.bev); + if (what == EV_TIMEOUT) { + bufferevent_run_eventcb_(&bev_ssl->bev.bev, + BEV_EVENT_TIMEOUT|BEV_EVENT_READING, 0); + } else { + consider_reading(bev_ssl); + } + bufferevent_decref_and_unlock_(&bev_ssl->bev.bev); +} + +static void +be_openssl_writeeventcb(evutil_socket_t fd, short what, void *ptr) +{ + struct bufferevent_openssl *bev_ssl = ptr; + bufferevent_incref_and_lock_(&bev_ssl->bev.bev); + if (what == EV_TIMEOUT) { + bufferevent_run_eventcb_(&bev_ssl->bev.bev, + BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING, 0); + } else { + consider_writing(bev_ssl); + } + bufferevent_decref_and_unlock_(&bev_ssl->bev.bev); +} + +static evutil_socket_t +be_openssl_auto_fd(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd) +{ + if (!bev_ssl->underlying) { + struct bufferevent *bev = &bev_ssl->bev.bev; + if (event_initialized(&bev->ev_read) && fd < 0) { + fd = event_get_fd(&bev->ev_read); + } + } + return fd; +} + +static int +set_open_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd) +{ + if (bev_ssl->underlying) { + bufferevent_setcb(bev_ssl->underlying, + be_openssl_readcb, be_openssl_writecb, be_openssl_eventcb, + bev_ssl); + return 0; + } else { + struct bufferevent *bev = &bev_ssl->bev.bev; + int rpending=0, wpending=0, r1=0, r2=0; + + if (event_initialized(&bev->ev_read)) { + rpending = event_pending(&bev->ev_read, EV_READ, NULL); + wpending = event_pending(&bev->ev_write, EV_WRITE, NULL); + + event_del(&bev->ev_read); + event_del(&bev->ev_write); + } + + event_assign(&bev->ev_read, bev->ev_base, fd, + EV_READ|EV_PERSIST|EV_FINALIZE, + be_openssl_readeventcb, bev_ssl); + event_assign(&bev->ev_write, bev->ev_base, fd, + EV_WRITE|EV_PERSIST|EV_FINALIZE, + be_openssl_writeeventcb, bev_ssl); + + if (rpending) + r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read); + if (wpending) + r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write); + + return (r1<0 || r2<0) ? -1 : 0; + } +} + +static int +do_handshake(struct bufferevent_openssl *bev_ssl) +{ + int r; + + switch (bev_ssl->state) { + default: + case BUFFEREVENT_SSL_OPEN: + EVUTIL_ASSERT(0); + return -1; + case BUFFEREVENT_SSL_CONNECTING: + case BUFFEREVENT_SSL_ACCEPTING: + ERR_clear_error(); + r = SSL_do_handshake(bev_ssl->ssl); + break; + } + decrement_buckets(bev_ssl); + + if (r==1) { + evutil_socket_t fd = event_get_fd(&bev_ssl->bev.bev.ev_read); + /* We're done! */ + bev_ssl->state = BUFFEREVENT_SSL_OPEN; + set_open_callbacks(bev_ssl, fd); /* XXXX handle failure */ + /* Call do_read and do_write as needed */ + bufferevent_enable(&bev_ssl->bev.bev, bev_ssl->bev.bev.enabled); + bufferevent_run_eventcb_(&bev_ssl->bev.bev, + BEV_EVENT_CONNECTED, 0); + return 1; + } else { + int err = SSL_get_error(bev_ssl->ssl, r); + print_err(err); + switch (err) { + case SSL_ERROR_WANT_WRITE: + stop_reading(bev_ssl); + return start_writing(bev_ssl); + case SSL_ERROR_WANT_READ: + stop_writing(bev_ssl); + return start_reading(bev_ssl); + default: + conn_closed(bev_ssl, BEV_EVENT_READING, err, r); + return -1; + } + } +} + +static void +be_openssl_handshakecb(struct bufferevent *bev_base, void *ctx) +{ + struct bufferevent_openssl *bev_ssl = ctx; + do_handshake(bev_ssl);/* XXX handle failure */ +} + +static void +be_openssl_handshakeeventcb(evutil_socket_t fd, short what, void *ptr) +{ + struct bufferevent_openssl *bev_ssl = ptr; + + bufferevent_incref_and_lock_(&bev_ssl->bev.bev); + if (what & EV_TIMEOUT) { + bufferevent_run_eventcb_(&bev_ssl->bev.bev, BEV_EVENT_TIMEOUT, 0); + } else + do_handshake(bev_ssl);/* XXX handle failure */ + bufferevent_decref_and_unlock_(&bev_ssl->bev.bev); +} + +static int +set_handshake_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd) +{ + if (bev_ssl->underlying) { + bufferevent_setcb(bev_ssl->underlying, + be_openssl_handshakecb, be_openssl_handshakecb, + be_openssl_eventcb, + bev_ssl); + + if (fd < 0) + return 0; + + if (bufferevent_setfd(bev_ssl->underlying, fd)) + return 1; + + return do_handshake(bev_ssl); + } else { + struct bufferevent *bev = &bev_ssl->bev.bev; + + if (event_initialized(&bev->ev_read)) { + event_del(&bev->ev_read); + event_del(&bev->ev_write); + } + + event_assign(&bev->ev_read, bev->ev_base, fd, + EV_READ|EV_PERSIST|EV_FINALIZE, + be_openssl_handshakeeventcb, bev_ssl); + event_assign(&bev->ev_write, bev->ev_base, fd, + EV_WRITE|EV_PERSIST|EV_FINALIZE, + be_openssl_handshakeeventcb, bev_ssl); + if (fd >= 0) + bufferevent_enable(bev, bev->enabled); + return 0; + } +} + +int +bufferevent_ssl_renegotiate(struct bufferevent *bev) +{ + struct bufferevent_openssl *bev_ssl = upcast(bev); + if (!bev_ssl) + return -1; + if (SSL_renegotiate(bev_ssl->ssl) < 0) + return -1; + bev_ssl->state = BUFFEREVENT_SSL_CONNECTING; + if (set_handshake_callbacks(bev_ssl, be_openssl_auto_fd(bev_ssl, -1)) < 0) + return -1; + if (!bev_ssl->underlying) + return do_handshake(bev_ssl); + return 0; +} + +static void +be_openssl_outbuf_cb(struct evbuffer *buf, + const struct evbuffer_cb_info *cbinfo, void *arg) +{ + struct bufferevent_openssl *bev_ssl = arg; + int r = 0; + /* XXX need to hold a reference here. */ + + if (cbinfo->n_added && bev_ssl->state == BUFFEREVENT_SSL_OPEN) { + if (cbinfo->orig_size == 0) + r = bufferevent_add_event_(&bev_ssl->bev.bev.ev_write, + &bev_ssl->bev.bev.timeout_write); + + if (bev_ssl->underlying) + consider_writing(bev_ssl); + } + /* XXX Handle r < 0 */ + (void)r; +} + + +static int +be_openssl_enable(struct bufferevent *bev, short events) +{ + struct bufferevent_openssl *bev_ssl = upcast(bev); + int r1 = 0, r2 = 0; + + if (events & EV_READ) + r1 = start_reading(bev_ssl); + if (events & EV_WRITE) + r2 = start_writing(bev_ssl); + + if (bev_ssl->underlying) { + if (events & EV_READ) + BEV_RESET_GENERIC_READ_TIMEOUT(bev); + if (events & EV_WRITE) + BEV_RESET_GENERIC_WRITE_TIMEOUT(bev); + + if (events & EV_READ) + consider_reading(bev_ssl); + if (events & EV_WRITE) + consider_writing(bev_ssl); + } + return (r1 < 0 || r2 < 0) ? -1 : 0; +} + +static int +be_openssl_disable(struct bufferevent *bev, short events) +{ + struct bufferevent_openssl *bev_ssl = upcast(bev); + + if (events & EV_READ) + stop_reading(bev_ssl); + if (events & EV_WRITE) + stop_writing(bev_ssl); + + if (bev_ssl->underlying) { + if (events & EV_READ) + BEV_DEL_GENERIC_READ_TIMEOUT(bev); + if (events & EV_WRITE) + BEV_DEL_GENERIC_WRITE_TIMEOUT(bev); + } + return 0; +} + +static void +be_openssl_unlink(struct bufferevent *bev) +{ + struct bufferevent_openssl *bev_ssl = upcast(bev); + + if (bev_ssl->bev.options & BEV_OPT_CLOSE_ON_FREE) { + if (bev_ssl->underlying) { + if (BEV_UPCAST(bev_ssl->underlying)->refcnt < 2) { + event_warnx("BEV_OPT_CLOSE_ON_FREE set on an " + "bufferevent with too few references"); + } else { + bufferevent_free(bev_ssl->underlying); + /* We still have a reference to it, via our + * BIO. So we don't drop this. */ + // bev_ssl->underlying = NULL; + } + } + } else { + if (bev_ssl->underlying) { + if (bev_ssl->underlying->errorcb == be_openssl_eventcb) + bufferevent_setcb(bev_ssl->underlying, + NULL,NULL,NULL,NULL); + bufferevent_unsuspend_read_(bev_ssl->underlying, + BEV_SUSPEND_FILT_READ); + } + } +} + +static void +be_openssl_destruct(struct bufferevent *bev) +{ + struct bufferevent_openssl *bev_ssl = upcast(bev); + + if (bev_ssl->bev.options & BEV_OPT_CLOSE_ON_FREE) { + if (! bev_ssl->underlying) { + evutil_socket_t fd = EVUTIL_INVALID_SOCKET; + BIO *bio = SSL_get_wbio(bev_ssl->ssl); + if (bio) + fd = BIO_get_fd(bio, NULL); + if (fd >= 0) + evutil_closesocket(fd); + } + SSL_free(bev_ssl->ssl); + } +} + +static int +be_openssl_adj_timeouts(struct bufferevent *bev) +{ + struct bufferevent_openssl *bev_ssl = upcast(bev); + + if (bev_ssl->underlying) { + return bufferevent_generic_adj_timeouts_(bev); + } else { + return bufferevent_generic_adj_existing_timeouts_(bev); + } +} + +static int +be_openssl_flush(struct bufferevent *bufev, + short iotype, enum bufferevent_flush_mode mode) +{ + /* XXXX Implement this. */ + return 0; +} + +static int +be_openssl_set_fd(struct bufferevent_openssl *bev_ssl, + enum bufferevent_ssl_state state, evutil_socket_t fd) +{ + bev_ssl->state = state; + + switch (state) { + case BUFFEREVENT_SSL_ACCEPTING: + if (!SSL_clear(bev_ssl->ssl)) + return -1; + SSL_set_accept_state(bev_ssl->ssl); + if (set_handshake_callbacks(bev_ssl, fd) < 0) + return -1; + break; + case BUFFEREVENT_SSL_CONNECTING: + if (!SSL_clear(bev_ssl->ssl)) + return -1; + SSL_set_connect_state(bev_ssl->ssl); + if (set_handshake_callbacks(bev_ssl, fd) < 0) + return -1; + break; + case BUFFEREVENT_SSL_OPEN: + if (set_open_callbacks(bev_ssl, fd) < 0) + return -1; + break; + default: + return -1; + } + + return 0; +} + +static int +be_openssl_ctrl(struct bufferevent *bev, + enum bufferevent_ctrl_op op, union bufferevent_ctrl_data *data) +{ + struct bufferevent_openssl *bev_ssl = upcast(bev); + switch (op) { + case BEV_CTRL_SET_FD: + if (!bev_ssl->underlying) { + BIO *bio; + bio = BIO_new_socket((int)data->fd, 0); + SSL_set_bio(bev_ssl->ssl, bio, bio); + } else { + BIO *bio; + if (!(bio = BIO_new_bufferevent(bev_ssl->underlying))) + return -1; + SSL_set_bio(bev_ssl->ssl, bio, bio); + } + + return be_openssl_set_fd(bev_ssl, bev_ssl->old_state, data->fd); + case BEV_CTRL_GET_FD: + if (bev_ssl->underlying) { + data->fd = event_get_fd(&bev_ssl->underlying->ev_read); + } else { + data->fd = event_get_fd(&bev->ev_read); + } + return 0; + case BEV_CTRL_GET_UNDERLYING: + data->ptr = bev_ssl->underlying; + return 0; + case BEV_CTRL_CANCEL_ALL: + default: + return -1; + } +} + +SSL * +bufferevent_openssl_get_ssl(struct bufferevent *bufev) +{ + struct bufferevent_openssl *bev_ssl = upcast(bufev); + if (!bev_ssl) + return NULL; + return bev_ssl->ssl; +} + +static struct bufferevent * +bufferevent_openssl_new_impl(struct event_base *base, + struct bufferevent *underlying, + evutil_socket_t fd, + SSL *ssl, + enum bufferevent_ssl_state state, + int options) +{ + struct bufferevent_openssl *bev_ssl = NULL; + struct bufferevent_private *bev_p = NULL; + int tmp_options = options & ~BEV_OPT_THREADSAFE; + + /* Only one can be set. */ + if (underlying != NULL && fd >= 0) + goto err; + + if (!(bev_ssl = mm_calloc(1, sizeof(struct bufferevent_openssl)))) + goto err; + + bev_p = &bev_ssl->bev; + + if (bufferevent_init_common_(bev_p, base, + &bufferevent_ops_openssl, tmp_options) < 0) + goto err; + + /* Don't explode if we decide to realloc a chunk we're writing from in + * the output buffer. */ + SSL_set_mode(ssl, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); + + bev_ssl->underlying = underlying; + bev_ssl->ssl = ssl; + + bev_ssl->outbuf_cb = evbuffer_add_cb(bev_p->bev.output, + be_openssl_outbuf_cb, bev_ssl); + + if (options & BEV_OPT_THREADSAFE) + bufferevent_enable_locking_(&bev_ssl->bev.bev, NULL); + + if (underlying) { + bufferevent_init_generic_timeout_cbs_(&bev_ssl->bev.bev); + bufferevent_incref_(underlying); + } + + bev_ssl->old_state = state; + bev_ssl->last_write = -1; + + init_bio_counts(bev_ssl); + + fd = be_openssl_auto_fd(bev_ssl, fd); + if (be_openssl_set_fd(bev_ssl, state, fd)) + goto err; + + if (underlying) { + bufferevent_setwatermark(underlying, EV_READ, 0, 0); + bufferevent_enable(underlying, EV_READ|EV_WRITE); + if (state == BUFFEREVENT_SSL_OPEN) + bufferevent_suspend_read_(underlying, + BEV_SUSPEND_FILT_READ); + } + + return &bev_ssl->bev.bev; +err: + if (options & BEV_OPT_CLOSE_ON_FREE) + SSL_free(ssl); + if (bev_ssl) { + bev_ssl->ssl = NULL; + bufferevent_free(&bev_ssl->bev.bev); + } + return NULL; +} + +struct bufferevent * +bufferevent_openssl_filter_new(struct event_base *base, + struct bufferevent *underlying, + SSL *ssl, + enum bufferevent_ssl_state state, + int options) +{ + BIO *bio; + struct bufferevent *bev; + + if (!underlying) + goto err; + if (!(bio = BIO_new_bufferevent(underlying))) + goto err; + + SSL_set_bio(ssl, bio, bio); + + bev = bufferevent_openssl_new_impl( + base, underlying, -1, ssl, state, options); + return bev; + +err: + if (options & BEV_OPT_CLOSE_ON_FREE) + SSL_free(ssl); + return NULL; +} + +struct bufferevent * +bufferevent_openssl_socket_new(struct event_base *base, + evutil_socket_t fd, + SSL *ssl, + enum bufferevent_ssl_state state, + int options) +{ + /* Does the SSL already have an fd? */ + BIO *bio = SSL_get_wbio(ssl); + long have_fd = -1; + + if (bio) + have_fd = BIO_get_fd(bio, NULL); + + if (have_fd >= 0) { + /* The SSL is already configured with an fd. */ + if (fd < 0) { + /* We should learn the fd from the SSL. */ + fd = (evutil_socket_t) have_fd; + } else if (have_fd == (long)fd) { + /* We already know the fd from the SSL; do nothing */ + } else { + /* We specified an fd different from that of the SSL. + This is probably an error on our part. Fail. */ + goto err; + } + BIO_set_close(bio, 0); + } else { + /* The SSL isn't configured with a BIO with an fd. */ + if (fd >= 0) { + /* ... and we have an fd we want to use. */ + bio = BIO_new_socket((int)fd, 0); + SSL_set_bio(ssl, bio, bio); + } else { + /* Leave the fd unset. */ + } + } + + return bufferevent_openssl_new_impl( + base, NULL, fd, ssl, state, options); + +err: + if (options & BEV_OPT_CLOSE_ON_FREE) + SSL_free(ssl); + return NULL; +} + +int +bufferevent_openssl_get_allow_dirty_shutdown(struct bufferevent *bev) +{ + int allow_dirty_shutdown = -1; + struct bufferevent_openssl *bev_ssl; + BEV_LOCK(bev); + bev_ssl = upcast(bev); + if (bev_ssl) + allow_dirty_shutdown = bev_ssl->allow_dirty_shutdown; + BEV_UNLOCK(bev); + return allow_dirty_shutdown; +} + +void +bufferevent_openssl_set_allow_dirty_shutdown(struct bufferevent *bev, + int allow_dirty_shutdown) +{ + struct bufferevent_openssl *bev_ssl; + BEV_LOCK(bev); + bev_ssl = upcast(bev); + if (bev_ssl) + bev_ssl->allow_dirty_shutdown = !!allow_dirty_shutdown; + BEV_UNLOCK(bev); +} + +unsigned long +bufferevent_get_openssl_error(struct bufferevent *bev) +{ + unsigned long err = 0; + struct bufferevent_openssl *bev_ssl; + BEV_LOCK(bev); + bev_ssl = upcast(bev); + if (bev_ssl && bev_ssl->n_errors) { + err = bev_ssl->errors[--bev_ssl->n_errors]; + } + BEV_UNLOCK(bev); + return err; +} diff --git a/probe-busybox/libevent-2.1.11-stable/bufferevent_pair.c b/probe-busybox/libevent-2.1.11-stable/bufferevent_pair.c new file mode 100644 index 00000000..f88cd751 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/bufferevent_pair.c @@ -0,0 +1,365 @@ +/* + * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include + +#ifdef _WIN32 +#include +#endif + +#include "event2/util.h" +#include "event2/buffer.h" +#include "event2/bufferevent.h" +#include "event2/bufferevent_struct.h" +#include "event2/event.h" +#include "defer-internal.h" +#include "bufferevent-internal.h" +#include "mm-internal.h" +#include "util-internal.h" + +struct bufferevent_pair { + struct bufferevent_private bev; + struct bufferevent_pair *partner; + /* For ->destruct() lock checking */ + struct bufferevent_pair *unlinked_partner; +}; + + +/* Given a bufferevent that's really a bev part of a bufferevent_pair, + * return that bufferevent_filtered. Returns NULL otherwise.*/ +static inline struct bufferevent_pair * +upcast(struct bufferevent *bev) +{ + struct bufferevent_pair *bev_p; + if (!BEV_IS_PAIR(bev)) + return NULL; + bev_p = EVUTIL_UPCAST(bev, struct bufferevent_pair, bev.bev); + EVUTIL_ASSERT(BEV_IS_PAIR(&bev_p->bev.bev)); + return bev_p; +} + +#define downcast(bev_pair) (&(bev_pair)->bev.bev) + +static inline void +incref_and_lock(struct bufferevent *b) +{ + struct bufferevent_pair *bevp; + bufferevent_incref_and_lock_(b); + bevp = upcast(b); + if (bevp->partner) + bufferevent_incref_and_lock_(downcast(bevp->partner)); +} + +static inline void +decref_and_unlock(struct bufferevent *b) +{ + struct bufferevent_pair *bevp = upcast(b); + if (bevp->partner) + bufferevent_decref_and_unlock_(downcast(bevp->partner)); + bufferevent_decref_and_unlock_(b); +} + +/* XXX Handle close */ + +static void be_pair_outbuf_cb(struct evbuffer *, + const struct evbuffer_cb_info *, void *); + +static struct bufferevent_pair * +bufferevent_pair_elt_new(struct event_base *base, + int options) +{ + struct bufferevent_pair *bufev; + if (! (bufev = mm_calloc(1, sizeof(struct bufferevent_pair)))) + return NULL; + if (bufferevent_init_common_(&bufev->bev, base, &bufferevent_ops_pair, + options)) { + mm_free(bufev); + return NULL; + } + if (!evbuffer_add_cb(bufev->bev.bev.output, be_pair_outbuf_cb, bufev)) { + bufferevent_free(downcast(bufev)); + return NULL; + } + + bufferevent_init_generic_timeout_cbs_(&bufev->bev.bev); + + return bufev; +} + +int +bufferevent_pair_new(struct event_base *base, int options, + struct bufferevent *pair[2]) +{ + struct bufferevent_pair *bufev1 = NULL, *bufev2 = NULL; + int tmp_options; + + options |= BEV_OPT_DEFER_CALLBACKS; + tmp_options = options & ~BEV_OPT_THREADSAFE; + + bufev1 = bufferevent_pair_elt_new(base, options); + if (!bufev1) + return -1; + bufev2 = bufferevent_pair_elt_new(base, tmp_options); + if (!bufev2) { + bufferevent_free(downcast(bufev1)); + return -1; + } + + if (options & BEV_OPT_THREADSAFE) { + /*XXXX check return */ + bufferevent_enable_locking_(downcast(bufev2), bufev1->bev.lock); + } + + bufev1->partner = bufev2; + bufev2->partner = bufev1; + + evbuffer_freeze(downcast(bufev1)->input, 0); + evbuffer_freeze(downcast(bufev1)->output, 1); + evbuffer_freeze(downcast(bufev2)->input, 0); + evbuffer_freeze(downcast(bufev2)->output, 1); + + pair[0] = downcast(bufev1); + pair[1] = downcast(bufev2); + + return 0; +} + +static void +be_pair_transfer(struct bufferevent *src, struct bufferevent *dst, + int ignore_wm) +{ + size_t dst_size; + size_t n; + + evbuffer_unfreeze(src->output, 1); + evbuffer_unfreeze(dst->input, 0); + + if (dst->wm_read.high) { + dst_size = evbuffer_get_length(dst->input); + if (dst_size < dst->wm_read.high) { + n = dst->wm_read.high - dst_size; + evbuffer_remove_buffer(src->output, dst->input, n); + } else { + if (!ignore_wm) + goto done; + n = evbuffer_get_length(src->output); + evbuffer_add_buffer(dst->input, src->output); + } + } else { + n = evbuffer_get_length(src->output); + evbuffer_add_buffer(dst->input, src->output); + } + + if (n) { + BEV_RESET_GENERIC_READ_TIMEOUT(dst); + + if (evbuffer_get_length(dst->output)) + BEV_RESET_GENERIC_WRITE_TIMEOUT(dst); + else + BEV_DEL_GENERIC_WRITE_TIMEOUT(dst); + } + + bufferevent_trigger_nolock_(dst, EV_READ, 0); + bufferevent_trigger_nolock_(src, EV_WRITE, 0); +done: + evbuffer_freeze(src->output, 1); + evbuffer_freeze(dst->input, 0); +} + +static inline int +be_pair_wants_to_talk(struct bufferevent_pair *src, + struct bufferevent_pair *dst) +{ + return (downcast(src)->enabled & EV_WRITE) && + (downcast(dst)->enabled & EV_READ) && + !dst->bev.read_suspended && + evbuffer_get_length(downcast(src)->output); +} + +static void +be_pair_outbuf_cb(struct evbuffer *outbuf, + const struct evbuffer_cb_info *info, void *arg) +{ + struct bufferevent_pair *bev_pair = arg; + struct bufferevent_pair *partner = bev_pair->partner; + + incref_and_lock(downcast(bev_pair)); + + if (info->n_added > info->n_deleted && partner) { + /* We got more data. If the other side's reading, then + hand it over. */ + if (be_pair_wants_to_talk(bev_pair, partner)) { + be_pair_transfer(downcast(bev_pair), downcast(partner), 0); + } + } + + decref_and_unlock(downcast(bev_pair)); +} + +static int +be_pair_enable(struct bufferevent *bufev, short events) +{ + struct bufferevent_pair *bev_p = upcast(bufev); + struct bufferevent_pair *partner = bev_p->partner; + + incref_and_lock(bufev); + + if (events & EV_READ) { + BEV_RESET_GENERIC_READ_TIMEOUT(bufev); + } + if ((events & EV_WRITE) && evbuffer_get_length(bufev->output)) + BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev); + + /* We're starting to read! Does the other side have anything to write?*/ + if ((events & EV_READ) && partner && + be_pair_wants_to_talk(partner, bev_p)) { + be_pair_transfer(downcast(partner), bufev, 0); + } + /* We're starting to write! Does the other side want to read? */ + if ((events & EV_WRITE) && partner && + be_pair_wants_to_talk(bev_p, partner)) { + be_pair_transfer(bufev, downcast(partner), 0); + } + decref_and_unlock(bufev); + return 0; +} + +static int +be_pair_disable(struct bufferevent *bev, short events) +{ + if (events & EV_READ) { + BEV_DEL_GENERIC_READ_TIMEOUT(bev); + } + if (events & EV_WRITE) { + BEV_DEL_GENERIC_WRITE_TIMEOUT(bev); + } + return 0; +} + +static void +be_pair_unlink(struct bufferevent *bev) +{ + struct bufferevent_pair *bev_p = upcast(bev); + + if (bev_p->partner) { + bev_p->unlinked_partner = bev_p->partner; + bev_p->partner->partner = NULL; + bev_p->partner = NULL; + } +} + +/* Free *shared* lock in the latest be (since we share it between two of them). */ +static void +be_pair_destruct(struct bufferevent *bev) +{ + struct bufferevent_pair *bev_p = upcast(bev); + + /* Transfer ownership of the lock into partner, otherwise we will use + * already free'd lock during freeing second bev, see next example: + * + * bev1->own_lock = 1 + * bev2->own_lock = 0 + * bev2->lock = bev1->lock + * + * bufferevent_free(bev1) # refcnt == 0 -> unlink + * bufferevent_free(bev2) # refcnt == 0 -> unlink + * + * event_base_free() -> finilizers -> EVTHREAD_FREE_LOCK(bev1->lock) + * -> BEV_LOCK(bev2->lock) <-- already freed + * + * Where bev1 == pair[0], bev2 == pair[1]. + */ + if (bev_p->unlinked_partner && bev_p->bev.own_lock) { + bev_p->unlinked_partner->bev.own_lock = 1; + bev_p->bev.own_lock = 0; + } + bev_p->unlinked_partner = NULL; +} + +static int +be_pair_flush(struct bufferevent *bev, short iotype, + enum bufferevent_flush_mode mode) +{ + struct bufferevent_pair *bev_p = upcast(bev); + struct bufferevent *partner; + + if (!bev_p->partner) + return -1; + + if (mode == BEV_NORMAL) + return 0; + + incref_and_lock(bev); + + partner = downcast(bev_p->partner); + + if ((iotype & EV_READ) != 0) + be_pair_transfer(partner, bev, 1); + + if ((iotype & EV_WRITE) != 0) + be_pair_transfer(bev, partner, 1); + + if (mode == BEV_FINISHED) { + short what = BEV_EVENT_EOF; + if (iotype & EV_READ) + what |= BEV_EVENT_WRITING; + if (iotype & EV_WRITE) + what |= BEV_EVENT_READING; + bufferevent_run_eventcb_(partner, what, 0); + } + decref_and_unlock(bev); + return 0; +} + +struct bufferevent * +bufferevent_pair_get_partner(struct bufferevent *bev) +{ + struct bufferevent_pair *bev_p; + struct bufferevent *partner = NULL; + bev_p = upcast(bev); + if (! bev_p) + return NULL; + + incref_and_lock(bev); + if (bev_p->partner) + partner = downcast(bev_p->partner); + decref_and_unlock(bev); + return partner; +} + +const struct bufferevent_ops bufferevent_ops_pair = { + "pair_elt", + evutil_offsetof(struct bufferevent_pair, bev.bev), + be_pair_enable, + be_pair_disable, + be_pair_unlink, + be_pair_destruct, + bufferevent_generic_adj_timeouts_, + be_pair_flush, + NULL, /* ctrl */ +}; diff --git a/probe-busybox/libevent-2.1.11-stable/bufferevent_ratelim.c b/probe-busybox/libevent-2.1.11-stable/bufferevent_ratelim.c new file mode 100644 index 00000000..25874968 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/bufferevent_ratelim.c @@ -0,0 +1,1089 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * Copyright (c) 2002-2006 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "evconfig-private.h" + +#include +#include +#include +#include + +#include "event2/event.h" +#include "event2/event_struct.h" +#include "event2/util.h" +#include "event2/bufferevent.h" +#include "event2/bufferevent_struct.h" +#include "event2/buffer.h" + +#include "ratelim-internal.h" + +#include "bufferevent-internal.h" +#include "mm-internal.h" +#include "util-internal.h" +#include "event-internal.h" + +int +ev_token_bucket_init_(struct ev_token_bucket *bucket, + const struct ev_token_bucket_cfg *cfg, + ev_uint32_t current_tick, + int reinitialize) +{ + if (reinitialize) { + /* on reinitialization, we only clip downwards, since we've + already used who-knows-how-much bandwidth this tick. We + leave "last_updated" as it is; the next update will add the + appropriate amount of bandwidth to the bucket. + */ + if (bucket->read_limit > (ev_int64_t) cfg->read_maximum) + bucket->read_limit = cfg->read_maximum; + if (bucket->write_limit > (ev_int64_t) cfg->write_maximum) + bucket->write_limit = cfg->write_maximum; + } else { + bucket->read_limit = cfg->read_rate; + bucket->write_limit = cfg->write_rate; + bucket->last_updated = current_tick; + } + return 0; +} + +int +ev_token_bucket_update_(struct ev_token_bucket *bucket, + const struct ev_token_bucket_cfg *cfg, + ev_uint32_t current_tick) +{ + /* It's okay if the tick number overflows, since we'll just + * wrap around when we do the unsigned substraction. */ + unsigned n_ticks = current_tick - bucket->last_updated; + + /* Make sure some ticks actually happened, and that time didn't + * roll back. */ + if (n_ticks == 0 || n_ticks > INT_MAX) + return 0; + + /* Naively, we would say + bucket->limit += n_ticks * cfg->rate; + + if (bucket->limit > cfg->maximum) + bucket->limit = cfg->maximum; + + But we're worried about overflow, so we do it like this: + */ + + if ((cfg->read_maximum - bucket->read_limit) / n_ticks < cfg->read_rate) + bucket->read_limit = cfg->read_maximum; + else + bucket->read_limit += n_ticks * cfg->read_rate; + + + if ((cfg->write_maximum - bucket->write_limit) / n_ticks < cfg->write_rate) + bucket->write_limit = cfg->write_maximum; + else + bucket->write_limit += n_ticks * cfg->write_rate; + + + bucket->last_updated = current_tick; + + return 1; +} + +static inline void +bufferevent_update_buckets(struct bufferevent_private *bev) +{ + /* Must hold lock on bev. */ + struct timeval now; + unsigned tick; + event_base_gettimeofday_cached(bev->bev.ev_base, &now); + tick = ev_token_bucket_get_tick_(&now, bev->rate_limiting->cfg); + if (tick != bev->rate_limiting->limit.last_updated) + ev_token_bucket_update_(&bev->rate_limiting->limit, + bev->rate_limiting->cfg, tick); +} + +ev_uint32_t +ev_token_bucket_get_tick_(const struct timeval *tv, + const struct ev_token_bucket_cfg *cfg) +{ + /* This computation uses two multiplies and a divide. We could do + * fewer if we knew that the tick length was an integer number of + * seconds, or if we knew it divided evenly into a second. We should + * investigate that more. + */ + + /* We cast to an ev_uint64_t first, since we don't want to overflow + * before we do the final divide. */ + ev_uint64_t msec = (ev_uint64_t)tv->tv_sec * 1000 + tv->tv_usec / 1000; + return (unsigned)(msec / cfg->msec_per_tick); +} + +struct ev_token_bucket_cfg * +ev_token_bucket_cfg_new(size_t read_rate, size_t read_burst, + size_t write_rate, size_t write_burst, + const struct timeval *tick_len) +{ + struct ev_token_bucket_cfg *r; + struct timeval g; + if (! tick_len) { + g.tv_sec = 1; + g.tv_usec = 0; + tick_len = &g; + } + if (read_rate > read_burst || write_rate > write_burst || + read_rate < 1 || write_rate < 1) + return NULL; + if (read_rate > EV_RATE_LIMIT_MAX || + write_rate > EV_RATE_LIMIT_MAX || + read_burst > EV_RATE_LIMIT_MAX || + write_burst > EV_RATE_LIMIT_MAX) + return NULL; + r = mm_calloc(1, sizeof(struct ev_token_bucket_cfg)); + if (!r) + return NULL; + r->read_rate = read_rate; + r->write_rate = write_rate; + r->read_maximum = read_burst; + r->write_maximum = write_burst; + memcpy(&r->tick_timeout, tick_len, sizeof(struct timeval)); + r->msec_per_tick = (tick_len->tv_sec * 1000) + + (tick_len->tv_usec & COMMON_TIMEOUT_MICROSECONDS_MASK)/1000; + return r; +} + +void +ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg) +{ + mm_free(cfg); +} + +/* Default values for max_single_read & max_single_write variables. */ +#define MAX_SINGLE_READ_DEFAULT 16384 +#define MAX_SINGLE_WRITE_DEFAULT 16384 + +#define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0) +#define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0) + +static int bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g); +static int bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g); +static void bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g); +static void bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g); + +/** Helper: figure out the maximum amount we should write if is_write, or + the maximum amount we should read if is_read. Return that maximum, or + 0 if our bucket is wholly exhausted. + */ +static inline ev_ssize_t +bufferevent_get_rlim_max_(struct bufferevent_private *bev, int is_write) +{ + /* needs lock on bev. */ + ev_ssize_t max_so_far = is_write?bev->max_single_write:bev->max_single_read; + +#define LIM(x) \ + (is_write ? (x).write_limit : (x).read_limit) + +#define GROUP_SUSPENDED(g) \ + (is_write ? (g)->write_suspended : (g)->read_suspended) + + /* Sets max_so_far to MIN(x, max_so_far) */ +#define CLAMPTO(x) \ + do { \ + if (max_so_far > (x)) \ + max_so_far = (x); \ + } while (0); + + if (!bev->rate_limiting) + return max_so_far; + + /* If rate-limiting is enabled at all, update the appropriate + bucket, and take the smaller of our rate limit and the group + rate limit. + */ + + if (bev->rate_limiting->cfg) { + bufferevent_update_buckets(bev); + max_so_far = LIM(bev->rate_limiting->limit); + } + if (bev->rate_limiting->group) { + struct bufferevent_rate_limit_group *g = + bev->rate_limiting->group; + ev_ssize_t share; + LOCK_GROUP(g); + if (GROUP_SUSPENDED(g)) { + /* We can get here if we failed to lock this + * particular bufferevent while suspending the whole + * group. */ + if (is_write) + bufferevent_suspend_write_(&bev->bev, + BEV_SUSPEND_BW_GROUP); + else + bufferevent_suspend_read_(&bev->bev, + BEV_SUSPEND_BW_GROUP); + share = 0; + } else { + /* XXXX probably we should divide among the active + * members, not the total members. */ + share = LIM(g->rate_limit) / g->n_members; + if (share < g->min_share) + share = g->min_share; + } + UNLOCK_GROUP(g); + CLAMPTO(share); + } + + if (max_so_far < 0) + max_so_far = 0; + return max_so_far; +} + +ev_ssize_t +bufferevent_get_read_max_(struct bufferevent_private *bev) +{ + return bufferevent_get_rlim_max_(bev, 0); +} + +ev_ssize_t +bufferevent_get_write_max_(struct bufferevent_private *bev) +{ + return bufferevent_get_rlim_max_(bev, 1); +} + +int +bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes) +{ + /* XXXXX Make sure all users of this function check its return value */ + int r = 0; + /* need to hold lock on bev */ + if (!bev->rate_limiting) + return 0; + + if (bev->rate_limiting->cfg) { + bev->rate_limiting->limit.read_limit -= bytes; + if (bev->rate_limiting->limit.read_limit <= 0) { + bufferevent_suspend_read_(&bev->bev, BEV_SUSPEND_BW); + if (event_add(&bev->rate_limiting->refill_bucket_event, + &bev->rate_limiting->cfg->tick_timeout) < 0) + r = -1; + } else if (bev->read_suspended & BEV_SUSPEND_BW) { + if (!(bev->write_suspended & BEV_SUSPEND_BW)) + event_del(&bev->rate_limiting->refill_bucket_event); + bufferevent_unsuspend_read_(&bev->bev, BEV_SUSPEND_BW); + } + } + + if (bev->rate_limiting->group) { + LOCK_GROUP(bev->rate_limiting->group); + bev->rate_limiting->group->rate_limit.read_limit -= bytes; + bev->rate_limiting->group->total_read += bytes; + if (bev->rate_limiting->group->rate_limit.read_limit <= 0) { + bev_group_suspend_reading_(bev->rate_limiting->group); + } else if (bev->rate_limiting->group->read_suspended) { + bev_group_unsuspend_reading_(bev->rate_limiting->group); + } + UNLOCK_GROUP(bev->rate_limiting->group); + } + + return r; +} + +int +bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes) +{ + /* XXXXX Make sure all users of this function check its return value */ + int r = 0; + /* need to hold lock */ + if (!bev->rate_limiting) + return 0; + + if (bev->rate_limiting->cfg) { + bev->rate_limiting->limit.write_limit -= bytes; + if (bev->rate_limiting->limit.write_limit <= 0) { + bufferevent_suspend_write_(&bev->bev, BEV_SUSPEND_BW); + if (event_add(&bev->rate_limiting->refill_bucket_event, + &bev->rate_limiting->cfg->tick_timeout) < 0) + r = -1; + } else if (bev->write_suspended & BEV_SUSPEND_BW) { + if (!(bev->read_suspended & BEV_SUSPEND_BW)) + event_del(&bev->rate_limiting->refill_bucket_event); + bufferevent_unsuspend_write_(&bev->bev, BEV_SUSPEND_BW); + } + } + + if (bev->rate_limiting->group) { + LOCK_GROUP(bev->rate_limiting->group); + bev->rate_limiting->group->rate_limit.write_limit -= bytes; + bev->rate_limiting->group->total_written += bytes; + if (bev->rate_limiting->group->rate_limit.write_limit <= 0) { + bev_group_suspend_writing_(bev->rate_limiting->group); + } else if (bev->rate_limiting->group->write_suspended) { + bev_group_unsuspend_writing_(bev->rate_limiting->group); + } + UNLOCK_GROUP(bev->rate_limiting->group); + } + + return r; +} + +/** Stop reading on every bufferevent in g */ +static int +bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g) +{ + /* Needs group lock */ + struct bufferevent_private *bev; + g->read_suspended = 1; + g->pending_unsuspend_read = 0; + + /* Note that in this loop we call EVLOCK_TRY_LOCK_ instead of BEV_LOCK, + to prevent a deadlock. (Ordinarily, the group lock nests inside + the bufferevent locks. If we are unable to lock any individual + bufferevent, it will find out later when it looks at its limit + and sees that its group is suspended.) + */ + LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) { + if (EVLOCK_TRY_LOCK_(bev->lock)) { + bufferevent_suspend_read_(&bev->bev, + BEV_SUSPEND_BW_GROUP); + EVLOCK_UNLOCK(bev->lock, 0); + } + } + return 0; +} + +/** Stop writing on every bufferevent in g */ +static int +bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g) +{ + /* Needs group lock */ + struct bufferevent_private *bev; + g->write_suspended = 1; + g->pending_unsuspend_write = 0; + LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) { + if (EVLOCK_TRY_LOCK_(bev->lock)) { + bufferevent_suspend_write_(&bev->bev, + BEV_SUSPEND_BW_GROUP); + EVLOCK_UNLOCK(bev->lock, 0); + } + } + return 0; +} + +/** Timer callback invoked on a single bufferevent with one or more exhausted + buckets when they are ready to refill. */ +static void +bev_refill_callback_(evutil_socket_t fd, short what, void *arg) +{ + unsigned tick; + struct timeval now; + struct bufferevent_private *bev = arg; + int again = 0; + BEV_LOCK(&bev->bev); + if (!bev->rate_limiting || !bev->rate_limiting->cfg) { + BEV_UNLOCK(&bev->bev); + return; + } + + /* First, update the bucket */ + event_base_gettimeofday_cached(bev->bev.ev_base, &now); + tick = ev_token_bucket_get_tick_(&now, + bev->rate_limiting->cfg); + ev_token_bucket_update_(&bev->rate_limiting->limit, + bev->rate_limiting->cfg, + tick); + + /* Now unsuspend any read/write operations as appropriate. */ + if ((bev->read_suspended & BEV_SUSPEND_BW)) { + if (bev->rate_limiting->limit.read_limit > 0) + bufferevent_unsuspend_read_(&bev->bev, BEV_SUSPEND_BW); + else + again = 1; + } + if ((bev->write_suspended & BEV_SUSPEND_BW)) { + if (bev->rate_limiting->limit.write_limit > 0) + bufferevent_unsuspend_write_(&bev->bev, BEV_SUSPEND_BW); + else + again = 1; + } + if (again) { + /* One or more of the buckets may need another refill if they + started negative. + + XXXX if we need to be quiet for more ticks, we should + maybe figure out what timeout we really want. + */ + /* XXXX Handle event_add failure somehow */ + event_add(&bev->rate_limiting->refill_bucket_event, + &bev->rate_limiting->cfg->tick_timeout); + } + BEV_UNLOCK(&bev->bev); +} + +/** Helper: grab a random element from a bufferevent group. + * + * Requires that we hold the lock on the group. + */ +static struct bufferevent_private * +bev_group_random_element_(struct bufferevent_rate_limit_group *group) +{ + int which; + struct bufferevent_private *bev; + + /* requires group lock */ + + if (!group->n_members) + return NULL; + + EVUTIL_ASSERT(! LIST_EMPTY(&group->members)); + + which = evutil_weakrand_range_(&group->weakrand_seed, group->n_members); + + bev = LIST_FIRST(&group->members); + while (which--) + bev = LIST_NEXT(bev, rate_limiting->next_in_group); + + return bev; +} + +/** Iterate over the elements of a rate-limiting group 'g' with a random + starting point, assigning each to the variable 'bev', and executing the + block 'block'. + + We do this in a half-baked effort to get fairness among group members. + XXX Round-robin or some kind of priority queue would be even more fair. + */ +#define FOREACH_RANDOM_ORDER(block) \ + do { \ + first = bev_group_random_element_(g); \ + for (bev = first; bev != LIST_END(&g->members); \ + bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \ + block ; \ + } \ + for (bev = LIST_FIRST(&g->members); bev && bev != first; \ + bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \ + block ; \ + } \ + } while (0) + +static void +bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g) +{ + int again = 0; + struct bufferevent_private *bev, *first; + + g->read_suspended = 0; + FOREACH_RANDOM_ORDER({ + if (EVLOCK_TRY_LOCK_(bev->lock)) { + bufferevent_unsuspend_read_(&bev->bev, + BEV_SUSPEND_BW_GROUP); + EVLOCK_UNLOCK(bev->lock, 0); + } else { + again = 1; + } + }); + g->pending_unsuspend_read = again; +} + +static void +bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g) +{ + int again = 0; + struct bufferevent_private *bev, *first; + g->write_suspended = 0; + + FOREACH_RANDOM_ORDER({ + if (EVLOCK_TRY_LOCK_(bev->lock)) { + bufferevent_unsuspend_write_(&bev->bev, + BEV_SUSPEND_BW_GROUP); + EVLOCK_UNLOCK(bev->lock, 0); + } else { + again = 1; + } + }); + g->pending_unsuspend_write = again; +} + +/** Callback invoked every tick to add more elements to the group bucket + and unsuspend group members as needed. + */ +static void +bev_group_refill_callback_(evutil_socket_t fd, short what, void *arg) +{ + struct bufferevent_rate_limit_group *g = arg; + unsigned tick; + struct timeval now; + + event_base_gettimeofday_cached(event_get_base(&g->master_refill_event), &now); + + LOCK_GROUP(g); + + tick = ev_token_bucket_get_tick_(&now, &g->rate_limit_cfg); + ev_token_bucket_update_(&g->rate_limit, &g->rate_limit_cfg, tick); + + if (g->pending_unsuspend_read || + (g->read_suspended && (g->rate_limit.read_limit >= g->min_share))) { + bev_group_unsuspend_reading_(g); + } + if (g->pending_unsuspend_write || + (g->write_suspended && (g->rate_limit.write_limit >= g->min_share))){ + bev_group_unsuspend_writing_(g); + } + + /* XXXX Rather than waiting to the next tick to unsuspend stuff + * with pending_unsuspend_write/read, we should do it on the + * next iteration of the mainloop. + */ + + UNLOCK_GROUP(g); +} + +int +bufferevent_set_rate_limit(struct bufferevent *bev, + struct ev_token_bucket_cfg *cfg) +{ + struct bufferevent_private *bevp = BEV_UPCAST(bev); + int r = -1; + struct bufferevent_rate_limit *rlim; + struct timeval now; + ev_uint32_t tick; + int reinit = 0, suspended = 0; + /* XXX reference-count cfg */ + + BEV_LOCK(bev); + + if (cfg == NULL) { + if (bevp->rate_limiting) { + rlim = bevp->rate_limiting; + rlim->cfg = NULL; + bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW); + bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW); + if (event_initialized(&rlim->refill_bucket_event)) + event_del(&rlim->refill_bucket_event); + } + r = 0; + goto done; + } + + event_base_gettimeofday_cached(bev->ev_base, &now); + tick = ev_token_bucket_get_tick_(&now, cfg); + + if (bevp->rate_limiting && bevp->rate_limiting->cfg == cfg) { + /* no-op */ + r = 0; + goto done; + } + if (bevp->rate_limiting == NULL) { + rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit)); + if (!rlim) + goto done; + bevp->rate_limiting = rlim; + } else { + rlim = bevp->rate_limiting; + } + reinit = rlim->cfg != NULL; + + rlim->cfg = cfg; + ev_token_bucket_init_(&rlim->limit, cfg, tick, reinit); + + if (reinit) { + EVUTIL_ASSERT(event_initialized(&rlim->refill_bucket_event)); + event_del(&rlim->refill_bucket_event); + } + event_assign(&rlim->refill_bucket_event, bev->ev_base, + -1, EV_FINALIZE, bev_refill_callback_, bevp); + + if (rlim->limit.read_limit > 0) { + bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW); + } else { + bufferevent_suspend_read_(bev, BEV_SUSPEND_BW); + suspended=1; + } + if (rlim->limit.write_limit > 0) { + bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW); + } else { + bufferevent_suspend_write_(bev, BEV_SUSPEND_BW); + suspended = 1; + } + + if (suspended) + event_add(&rlim->refill_bucket_event, &cfg->tick_timeout); + + r = 0; + +done: + BEV_UNLOCK(bev); + return r; +} + +struct bufferevent_rate_limit_group * +bufferevent_rate_limit_group_new(struct event_base *base, + const struct ev_token_bucket_cfg *cfg) +{ + struct bufferevent_rate_limit_group *g; + struct timeval now; + ev_uint32_t tick; + + event_base_gettimeofday_cached(base, &now); + tick = ev_token_bucket_get_tick_(&now, cfg); + + g = mm_calloc(1, sizeof(struct bufferevent_rate_limit_group)); + if (!g) + return NULL; + memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg)); + LIST_INIT(&g->members); + + ev_token_bucket_init_(&g->rate_limit, cfg, tick, 0); + + event_assign(&g->master_refill_event, base, -1, EV_PERSIST|EV_FINALIZE, + bev_group_refill_callback_, g); + /*XXXX handle event_add failure */ + event_add(&g->master_refill_event, &cfg->tick_timeout); + + EVTHREAD_ALLOC_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE); + + bufferevent_rate_limit_group_set_min_share(g, 64); + + evutil_weakrand_seed_(&g->weakrand_seed, + (ev_uint32_t) ((now.tv_sec + now.tv_usec) + (ev_intptr_t)g)); + + return g; +} + +int +bufferevent_rate_limit_group_set_cfg( + struct bufferevent_rate_limit_group *g, + const struct ev_token_bucket_cfg *cfg) +{ + int same_tick; + if (!g || !cfg) + return -1; + + LOCK_GROUP(g); + same_tick = evutil_timercmp( + &g->rate_limit_cfg.tick_timeout, &cfg->tick_timeout, ==); + memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg)); + + if (g->rate_limit.read_limit > (ev_ssize_t)cfg->read_maximum) + g->rate_limit.read_limit = cfg->read_maximum; + if (g->rate_limit.write_limit > (ev_ssize_t)cfg->write_maximum) + g->rate_limit.write_limit = cfg->write_maximum; + + if (!same_tick) { + /* This can cause a hiccup in the schedule */ + event_add(&g->master_refill_event, &cfg->tick_timeout); + } + + /* The new limits might force us to adjust min_share differently. */ + bufferevent_rate_limit_group_set_min_share(g, g->configured_min_share); + + UNLOCK_GROUP(g); + return 0; +} + +int +bufferevent_rate_limit_group_set_min_share( + struct bufferevent_rate_limit_group *g, + size_t share) +{ + if (share > EV_SSIZE_MAX) + return -1; + + g->configured_min_share = share; + + /* Can't set share to less than the one-tick maximum. IOW, at steady + * state, at least one connection can go per tick. */ + if (share > g->rate_limit_cfg.read_rate) + share = g->rate_limit_cfg.read_rate; + if (share > g->rate_limit_cfg.write_rate) + share = g->rate_limit_cfg.write_rate; + + g->min_share = share; + return 0; +} + +void +bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group *g) +{ + LOCK_GROUP(g); + EVUTIL_ASSERT(0 == g->n_members); + event_del(&g->master_refill_event); + UNLOCK_GROUP(g); + EVTHREAD_FREE_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE); + mm_free(g); +} + +int +bufferevent_add_to_rate_limit_group(struct bufferevent *bev, + struct bufferevent_rate_limit_group *g) +{ + int wsuspend, rsuspend; + struct bufferevent_private *bevp = BEV_UPCAST(bev); + BEV_LOCK(bev); + + if (!bevp->rate_limiting) { + struct bufferevent_rate_limit *rlim; + rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit)); + if (!rlim) { + BEV_UNLOCK(bev); + return -1; + } + event_assign(&rlim->refill_bucket_event, bev->ev_base, + -1, EV_FINALIZE, bev_refill_callback_, bevp); + bevp->rate_limiting = rlim; + } + + if (bevp->rate_limiting->group == g) { + BEV_UNLOCK(bev); + return 0; + } + if (bevp->rate_limiting->group) + bufferevent_remove_from_rate_limit_group(bev); + + LOCK_GROUP(g); + bevp->rate_limiting->group = g; + ++g->n_members; + LIST_INSERT_HEAD(&g->members, bevp, rate_limiting->next_in_group); + + rsuspend = g->read_suspended; + wsuspend = g->write_suspended; + + UNLOCK_GROUP(g); + + if (rsuspend) + bufferevent_suspend_read_(bev, BEV_SUSPEND_BW_GROUP); + if (wsuspend) + bufferevent_suspend_write_(bev, BEV_SUSPEND_BW_GROUP); + + BEV_UNLOCK(bev); + return 0; +} + +int +bufferevent_remove_from_rate_limit_group(struct bufferevent *bev) +{ + return bufferevent_remove_from_rate_limit_group_internal_(bev, 1); +} + +int +bufferevent_remove_from_rate_limit_group_internal_(struct bufferevent *bev, + int unsuspend) +{ + struct bufferevent_private *bevp = BEV_UPCAST(bev); + BEV_LOCK(bev); + if (bevp->rate_limiting && bevp->rate_limiting->group) { + struct bufferevent_rate_limit_group *g = + bevp->rate_limiting->group; + LOCK_GROUP(g); + bevp->rate_limiting->group = NULL; + --g->n_members; + LIST_REMOVE(bevp, rate_limiting->next_in_group); + UNLOCK_GROUP(g); + } + if (unsuspend) { + bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW_GROUP); + bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW_GROUP); + } + BEV_UNLOCK(bev); + return 0; +} + +/* === + * API functions to expose rate limits. + * + * Don't use these from inside Libevent; they're meant to be for use by + * the program. + * === */ + +/* Mostly you don't want to use this function from inside libevent; + * bufferevent_get_read_max_() is more likely what you want*/ +ev_ssize_t +bufferevent_get_read_limit(struct bufferevent *bev) +{ + ev_ssize_t r; + struct bufferevent_private *bevp; + BEV_LOCK(bev); + bevp = BEV_UPCAST(bev); + if (bevp->rate_limiting && bevp->rate_limiting->cfg) { + bufferevent_update_buckets(bevp); + r = bevp->rate_limiting->limit.read_limit; + } else { + r = EV_SSIZE_MAX; + } + BEV_UNLOCK(bev); + return r; +} + +/* Mostly you don't want to use this function from inside libevent; + * bufferevent_get_write_max_() is more likely what you want*/ +ev_ssize_t +bufferevent_get_write_limit(struct bufferevent *bev) +{ + ev_ssize_t r; + struct bufferevent_private *bevp; + BEV_LOCK(bev); + bevp = BEV_UPCAST(bev); + if (bevp->rate_limiting && bevp->rate_limiting->cfg) { + bufferevent_update_buckets(bevp); + r = bevp->rate_limiting->limit.write_limit; + } else { + r = EV_SSIZE_MAX; + } + BEV_UNLOCK(bev); + return r; +} + +int +bufferevent_set_max_single_read(struct bufferevent *bev, size_t size) +{ + struct bufferevent_private *bevp; + BEV_LOCK(bev); + bevp = BEV_UPCAST(bev); + if (size == 0 || size > EV_SSIZE_MAX) + bevp->max_single_read = MAX_SINGLE_READ_DEFAULT; + else + bevp->max_single_read = size; + BEV_UNLOCK(bev); + return 0; +} + +int +bufferevent_set_max_single_write(struct bufferevent *bev, size_t size) +{ + struct bufferevent_private *bevp; + BEV_LOCK(bev); + bevp = BEV_UPCAST(bev); + if (size == 0 || size > EV_SSIZE_MAX) + bevp->max_single_write = MAX_SINGLE_WRITE_DEFAULT; + else + bevp->max_single_write = size; + BEV_UNLOCK(bev); + return 0; +} + +ev_ssize_t +bufferevent_get_max_single_read(struct bufferevent *bev) +{ + ev_ssize_t r; + + BEV_LOCK(bev); + r = BEV_UPCAST(bev)->max_single_read; + BEV_UNLOCK(bev); + return r; +} + +ev_ssize_t +bufferevent_get_max_single_write(struct bufferevent *bev) +{ + ev_ssize_t r; + + BEV_LOCK(bev); + r = BEV_UPCAST(bev)->max_single_write; + BEV_UNLOCK(bev); + return r; +} + +ev_ssize_t +bufferevent_get_max_to_read(struct bufferevent *bev) +{ + ev_ssize_t r; + BEV_LOCK(bev); + r = bufferevent_get_read_max_(BEV_UPCAST(bev)); + BEV_UNLOCK(bev); + return r; +} + +ev_ssize_t +bufferevent_get_max_to_write(struct bufferevent *bev) +{ + ev_ssize_t r; + BEV_LOCK(bev); + r = bufferevent_get_write_max_(BEV_UPCAST(bev)); + BEV_UNLOCK(bev); + return r; +} + +const struct ev_token_bucket_cfg * +bufferevent_get_token_bucket_cfg(const struct bufferevent *bev) { + struct bufferevent_private *bufev_private = BEV_UPCAST(bev); + struct ev_token_bucket_cfg *cfg; + + BEV_LOCK(bev); + + if (bufev_private->rate_limiting) { + cfg = bufev_private->rate_limiting->cfg; + } else { + cfg = NULL; + } + + BEV_UNLOCK(bev); + + return cfg; +} + +/* Mostly you don't want to use this function from inside libevent; + * bufferevent_get_read_max_() is more likely what you want*/ +ev_ssize_t +bufferevent_rate_limit_group_get_read_limit( + struct bufferevent_rate_limit_group *grp) +{ + ev_ssize_t r; + LOCK_GROUP(grp); + r = grp->rate_limit.read_limit; + UNLOCK_GROUP(grp); + return r; +} + +/* Mostly you don't want to use this function from inside libevent; + * bufferevent_get_write_max_() is more likely what you want. */ +ev_ssize_t +bufferevent_rate_limit_group_get_write_limit( + struct bufferevent_rate_limit_group *grp) +{ + ev_ssize_t r; + LOCK_GROUP(grp); + r = grp->rate_limit.write_limit; + UNLOCK_GROUP(grp); + return r; +} + +int +bufferevent_decrement_read_limit(struct bufferevent *bev, ev_ssize_t decr) +{ + int r = 0; + ev_ssize_t old_limit, new_limit; + struct bufferevent_private *bevp; + BEV_LOCK(bev); + bevp = BEV_UPCAST(bev); + EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg); + old_limit = bevp->rate_limiting->limit.read_limit; + + new_limit = (bevp->rate_limiting->limit.read_limit -= decr); + if (old_limit > 0 && new_limit <= 0) { + bufferevent_suspend_read_(bev, BEV_SUSPEND_BW); + if (event_add(&bevp->rate_limiting->refill_bucket_event, + &bevp->rate_limiting->cfg->tick_timeout) < 0) + r = -1; + } else if (old_limit <= 0 && new_limit > 0) { + if (!(bevp->write_suspended & BEV_SUSPEND_BW)) + event_del(&bevp->rate_limiting->refill_bucket_event); + bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW); + } + + BEV_UNLOCK(bev); + return r; +} + +int +bufferevent_decrement_write_limit(struct bufferevent *bev, ev_ssize_t decr) +{ + /* XXXX this is mostly copy-and-paste from + * bufferevent_decrement_read_limit */ + int r = 0; + ev_ssize_t old_limit, new_limit; + struct bufferevent_private *bevp; + BEV_LOCK(bev); + bevp = BEV_UPCAST(bev); + EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg); + old_limit = bevp->rate_limiting->limit.write_limit; + + new_limit = (bevp->rate_limiting->limit.write_limit -= decr); + if (old_limit > 0 && new_limit <= 0) { + bufferevent_suspend_write_(bev, BEV_SUSPEND_BW); + if (event_add(&bevp->rate_limiting->refill_bucket_event, + &bevp->rate_limiting->cfg->tick_timeout) < 0) + r = -1; + } else if (old_limit <= 0 && new_limit > 0) { + if (!(bevp->read_suspended & BEV_SUSPEND_BW)) + event_del(&bevp->rate_limiting->refill_bucket_event); + bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW); + } + + BEV_UNLOCK(bev); + return r; +} + +int +bufferevent_rate_limit_group_decrement_read( + struct bufferevent_rate_limit_group *grp, ev_ssize_t decr) +{ + int r = 0; + ev_ssize_t old_limit, new_limit; + LOCK_GROUP(grp); + old_limit = grp->rate_limit.read_limit; + new_limit = (grp->rate_limit.read_limit -= decr); + + if (old_limit > 0 && new_limit <= 0) { + bev_group_suspend_reading_(grp); + } else if (old_limit <= 0 && new_limit > 0) { + bev_group_unsuspend_reading_(grp); + } + + UNLOCK_GROUP(grp); + return r; +} + +int +bufferevent_rate_limit_group_decrement_write( + struct bufferevent_rate_limit_group *grp, ev_ssize_t decr) +{ + int r = 0; + ev_ssize_t old_limit, new_limit; + LOCK_GROUP(grp); + old_limit = grp->rate_limit.write_limit; + new_limit = (grp->rate_limit.write_limit -= decr); + + if (old_limit > 0 && new_limit <= 0) { + bev_group_suspend_writing_(grp); + } else if (old_limit <= 0 && new_limit > 0) { + bev_group_unsuspend_writing_(grp); + } + + UNLOCK_GROUP(grp); + return r; +} + +void +bufferevent_rate_limit_group_get_totals(struct bufferevent_rate_limit_group *grp, + ev_uint64_t *total_read_out, ev_uint64_t *total_written_out) +{ + EVUTIL_ASSERT(grp != NULL); + if (total_read_out) + *total_read_out = grp->total_read; + if (total_written_out) + *total_written_out = grp->total_written; +} + +void +bufferevent_rate_limit_group_reset_totals(struct bufferevent_rate_limit_group *grp) +{ + grp->total_read = grp->total_written = 0; +} + +int +bufferevent_ratelim_init_(struct bufferevent_private *bev) +{ + bev->rate_limiting = NULL; + bev->max_single_read = MAX_SINGLE_READ_DEFAULT; + bev->max_single_write = MAX_SINGLE_WRITE_DEFAULT; + + return 0; +} diff --git a/probe-busybox/libevent-2.1.11-stable/bufferevent_sock.c b/probe-busybox/libevent-2.1.11-stable/bufferevent_sock.c new file mode 100644 index 00000000..f275b023 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/bufferevent_sock.c @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * Copyright (c) 2002-2006 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include + +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +#include +#include +#include +#include +#ifdef EVENT__HAVE_STDARG_H +#include +#endif +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif + +#ifdef _WIN32 +#include +#include +#endif + +#ifdef EVENT__HAVE_SYS_SOCKET_H +#include +#endif +#ifdef EVENT__HAVE_NETINET_IN_H +#include +#endif +#ifdef EVENT__HAVE_NETINET_IN6_H +#include +#endif + +#include "event2/util.h" +#include "event2/bufferevent.h" +#include "event2/buffer.h" +#include "event2/bufferevent_struct.h" +#include "event2/bufferevent_compat.h" +#include "event2/event.h" +#include "log-internal.h" +#include "mm-internal.h" +#include "bufferevent-internal.h" +#include "util-internal.h" +#ifdef _WIN32 +#include "iocp-internal.h" +#endif + +/* prototypes */ +static int be_socket_enable(struct bufferevent *, short); +static int be_socket_disable(struct bufferevent *, short); +static void be_socket_destruct(struct bufferevent *); +static int be_socket_flush(struct bufferevent *, short, enum bufferevent_flush_mode); +static int be_socket_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *); + +static void be_socket_setfd(struct bufferevent *, evutil_socket_t); + +const struct bufferevent_ops bufferevent_ops_socket = { + "socket", + evutil_offsetof(struct bufferevent_private, bev), + be_socket_enable, + be_socket_disable, + NULL, /* unlink */ + be_socket_destruct, + bufferevent_generic_adj_existing_timeouts_, + be_socket_flush, + be_socket_ctrl, +}; + +const struct sockaddr* +bufferevent_socket_get_conn_address_(struct bufferevent *bev) +{ + struct bufferevent_private *bev_p = BEV_UPCAST(bev); + return (struct sockaddr *)&bev_p->conn_address; +} + +void +bufferevent_socket_set_conn_address_fd_(struct bufferevent *bev, + evutil_socket_t fd) +{ + struct bufferevent_private *bev_p = BEV_UPCAST(bev); + + socklen_t len = sizeof(bev_p->conn_address); + + struct sockaddr *addr = (struct sockaddr *)&bev_p->conn_address; + if (addr->sa_family != AF_UNSPEC) + getpeername(fd, addr, &len); +} + +void +bufferevent_socket_set_conn_address_(struct bufferevent *bev, + struct sockaddr *addr, size_t addrlen) +{ + struct bufferevent_private *bev_p = BEV_UPCAST(bev); + EVUTIL_ASSERT(addrlen <= sizeof(bev_p->conn_address)); + memcpy(&bev_p->conn_address, addr, addrlen); +} + +static void +bufferevent_socket_outbuf_cb(struct evbuffer *buf, + const struct evbuffer_cb_info *cbinfo, + void *arg) +{ + struct bufferevent *bufev = arg; + struct bufferevent_private *bufev_p = BEV_UPCAST(bufev); + + if (cbinfo->n_added && + (bufev->enabled & EV_WRITE) && + !event_pending(&bufev->ev_write, EV_WRITE, NULL) && + !bufev_p->write_suspended) { + /* Somebody added data to the buffer, and we would like to + * write, and we were not writing. So, start writing. */ + if (bufferevent_add_event_(&bufev->ev_write, &bufev->timeout_write) == -1) { + /* Should we log this? */ + } + } +} + +static void +bufferevent_readcb(evutil_socket_t fd, short event, void *arg) +{ + struct bufferevent *bufev = arg; + struct bufferevent_private *bufev_p = BEV_UPCAST(bufev); + struct evbuffer *input; + int res = 0; + short what = BEV_EVENT_READING; + ev_ssize_t howmuch = -1, readmax=-1; + + bufferevent_incref_and_lock_(bufev); + + if (event == EV_TIMEOUT) { + /* Note that we only check for event==EV_TIMEOUT. If + * event==EV_TIMEOUT|EV_READ, we can safely ignore the + * timeout, since a read has occurred */ + what |= BEV_EVENT_TIMEOUT; + goto error; + } + + input = bufev->input; + + /* + * If we have a high watermark configured then we don't want to + * read more data than would make us reach the watermark. + */ + if (bufev->wm_read.high != 0) { + howmuch = bufev->wm_read.high - evbuffer_get_length(input); + /* we somehow lowered the watermark, stop reading */ + if (howmuch <= 0) { + bufferevent_wm_suspend_read(bufev); + goto done; + } + } + readmax = bufferevent_get_read_max_(bufev_p); + if (howmuch < 0 || howmuch > readmax) /* The use of -1 for "unlimited" + * uglifies this code. XXXX */ + howmuch = readmax; + if (bufev_p->read_suspended) + goto done; + + evbuffer_unfreeze(input, 0); + res = evbuffer_read(input, fd, (int)howmuch); /* XXXX evbuffer_read would do better to take and return ev_ssize_t */ + evbuffer_freeze(input, 0); + + if (res == -1) { + int err = evutil_socket_geterror(fd); + if (EVUTIL_ERR_RW_RETRIABLE(err)) + goto reschedule; + if (EVUTIL_ERR_CONNECT_REFUSED(err)) { + bufev_p->connection_refused = 1; + goto done; + } + /* error case */ + what |= BEV_EVENT_ERROR; + } else if (res == 0) { + /* eof case */ + what |= BEV_EVENT_EOF; + } + + if (res <= 0) + goto error; + + bufferevent_decrement_read_buckets_(bufev_p, res); + + /* Invoke the user callback - must always be called last */ + bufferevent_trigger_nolock_(bufev, EV_READ, 0); + + goto done; + + reschedule: + goto done; + + error: + bufferevent_disable(bufev, EV_READ); + bufferevent_run_eventcb_(bufev, what, 0); + + done: + bufferevent_decref_and_unlock_(bufev); +} + +static void +bufferevent_writecb(evutil_socket_t fd, short event, void *arg) +{ + struct bufferevent *bufev = arg; + struct bufferevent_private *bufev_p = BEV_UPCAST(bufev); + int res = 0; + short what = BEV_EVENT_WRITING; + int connected = 0; + ev_ssize_t atmost = -1; + + bufferevent_incref_and_lock_(bufev); + + if (event == EV_TIMEOUT) { + /* Note that we only check for event==EV_TIMEOUT. If + * event==EV_TIMEOUT|EV_WRITE, we can safely ignore the + * timeout, since a read has occurred */ + what |= BEV_EVENT_TIMEOUT; + goto error; + } + if (bufev_p->connecting) { + int c = evutil_socket_finished_connecting_(fd); + /* we need to fake the error if the connection was refused + * immediately - usually connection to localhost on BSD */ + if (bufev_p->connection_refused) { + bufev_p->connection_refused = 0; + c = -1; + } + + if (c == 0) + goto done; + + bufev_p->connecting = 0; + if (c < 0) { + event_del(&bufev->ev_write); + event_del(&bufev->ev_read); + bufferevent_run_eventcb_(bufev, BEV_EVENT_ERROR, 0); + goto done; + } else { + connected = 1; + bufferevent_socket_set_conn_address_fd_(bufev, fd); +#ifdef _WIN32 + if (BEV_IS_ASYNC(bufev)) { + event_del(&bufev->ev_write); + bufferevent_async_set_connected_(bufev); + bufferevent_run_eventcb_(bufev, + BEV_EVENT_CONNECTED, 0); + goto done; + } +#endif + bufferevent_run_eventcb_(bufev, + BEV_EVENT_CONNECTED, 0); + if (!(bufev->enabled & EV_WRITE) || + bufev_p->write_suspended) { + event_del(&bufev->ev_write); + goto done; + } + } + } + + atmost = bufferevent_get_write_max_(bufev_p); + + if (bufev_p->write_suspended) + goto done; + + if (evbuffer_get_length(bufev->output)) { + evbuffer_unfreeze(bufev->output, 1); + res = evbuffer_write_atmost(bufev->output, fd, atmost); + evbuffer_freeze(bufev->output, 1); + if (res == -1) { + int err = evutil_socket_geterror(fd); + if (EVUTIL_ERR_RW_RETRIABLE(err)) + goto reschedule; + what |= BEV_EVENT_ERROR; + } else if (res == 0) { + /* eof case + XXXX Actually, a 0 on write doesn't indicate + an EOF. An ECONNRESET might be more typical. + */ + what |= BEV_EVENT_EOF; + } + if (res <= 0) + goto error; + + bufferevent_decrement_write_buckets_(bufev_p, res); + } + + if (evbuffer_get_length(bufev->output) == 0) { + event_del(&bufev->ev_write); + } + + /* + * Invoke the user callback if our buffer is drained or below the + * low watermark. + */ + if (res || !connected) { + bufferevent_trigger_nolock_(bufev, EV_WRITE, 0); + } + + goto done; + + reschedule: + if (evbuffer_get_length(bufev->output) == 0) { + event_del(&bufev->ev_write); + } + goto done; + + error: + bufferevent_disable(bufev, EV_WRITE); + bufferevent_run_eventcb_(bufev, what, 0); + + done: + bufferevent_decref_and_unlock_(bufev); +} + +struct bufferevent * +bufferevent_socket_new(struct event_base *base, evutil_socket_t fd, + int options) +{ + struct bufferevent_private *bufev_p; + struct bufferevent *bufev; + +#ifdef _WIN32 + if (base && event_base_get_iocp_(base)) + return bufferevent_async_new_(base, fd, options); +#endif + + if ((bufev_p = mm_calloc(1, sizeof(struct bufferevent_private)))== NULL) + return NULL; + + if (bufferevent_init_common_(bufev_p, base, &bufferevent_ops_socket, + options) < 0) { + mm_free(bufev_p); + return NULL; + } + bufev = &bufev_p->bev; + evbuffer_set_flags(bufev->output, EVBUFFER_FLAG_DRAINS_TO_FD); + + event_assign(&bufev->ev_read, bufev->ev_base, fd, + EV_READ|EV_PERSIST|EV_FINALIZE, bufferevent_readcb, bufev); + event_assign(&bufev->ev_write, bufev->ev_base, fd, + EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bufev); + + evbuffer_add_cb(bufev->output, bufferevent_socket_outbuf_cb, bufev); + + evbuffer_freeze(bufev->input, 0); + evbuffer_freeze(bufev->output, 1); + + return bufev; +} + +int +bufferevent_socket_connect(struct bufferevent *bev, + const struct sockaddr *sa, int socklen) +{ + struct bufferevent_private *bufev_p = BEV_UPCAST(bev); + + evutil_socket_t fd; + int r = 0; + int result=-1; + int ownfd = 0; + + bufferevent_incref_and_lock_(bev); + + fd = bufferevent_getfd(bev); + if (fd < 0) { + if (!sa) + goto done; + fd = evutil_socket_(sa->sa_family, + SOCK_STREAM|EVUTIL_SOCK_NONBLOCK, 0); + if (fd < 0) + goto freesock; + ownfd = 1; + } + if (sa) { +#ifdef _WIN32 + if (bufferevent_async_can_connect_(bev)) { + bufferevent_setfd(bev, fd); + r = bufferevent_async_connect_(bev, fd, sa, socklen); + if (r < 0) + goto freesock; + bufev_p->connecting = 1; + result = 0; + goto done; + } else +#endif + r = evutil_socket_connect_(&fd, sa, socklen); + if (r < 0) + goto freesock; + } +#ifdef _WIN32 + /* ConnectEx() isn't always around, even when IOCP is enabled. + * Here, we borrow the socket object's write handler to fall back + * on a non-blocking connect() when ConnectEx() is unavailable. */ + if (BEV_IS_ASYNC(bev)) { + event_assign(&bev->ev_write, bev->ev_base, fd, + EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bev); + } +#endif + bufferevent_setfd(bev, fd); + if (r == 0) { + if (! be_socket_enable(bev, EV_WRITE)) { + bufev_p->connecting = 1; + result = 0; + goto done; + } + } else if (r == 1) { + /* The connect succeeded already. How very BSD of it. */ + result = 0; + bufev_p->connecting = 1; + bufferevent_trigger_nolock_(bev, EV_WRITE, BEV_OPT_DEFER_CALLBACKS); + } else { + /* The connect failed already. How very BSD of it. */ + result = 0; + bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, BEV_OPT_DEFER_CALLBACKS); + bufferevent_disable(bev, EV_WRITE|EV_READ); + } + + goto done; + +freesock: + if (ownfd) + evutil_closesocket(fd); +done: + bufferevent_decref_and_unlock_(bev); + return result; +} + +static void +bufferevent_connect_getaddrinfo_cb(int result, struct evutil_addrinfo *ai, + void *arg) +{ + struct bufferevent *bev = arg; + struct bufferevent_private *bev_p = BEV_UPCAST(bev); + int r; + BEV_LOCK(bev); + + bufferevent_unsuspend_write_(bev, BEV_SUSPEND_LOOKUP); + bufferevent_unsuspend_read_(bev, BEV_SUSPEND_LOOKUP); + + bev_p->dns_request = NULL; + + if (result == EVUTIL_EAI_CANCEL) { + bev_p->dns_error = result; + bufferevent_decref_and_unlock_(bev); + return; + } + if (result != 0) { + bev_p->dns_error = result; + bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0); + bufferevent_decref_and_unlock_(bev); + if (ai) + evutil_freeaddrinfo(ai); + return; + } + + /* XXX use the other addrinfos? */ + bufferevent_socket_set_conn_address_(bev, ai->ai_addr, (int)ai->ai_addrlen); + r = bufferevent_socket_connect(bev, ai->ai_addr, (int)ai->ai_addrlen); + if (r < 0) + bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0); + bufferevent_decref_and_unlock_(bev); + evutil_freeaddrinfo(ai); +} + +int +bufferevent_socket_connect_hostname(struct bufferevent *bev, + struct evdns_base *evdns_base, int family, const char *hostname, int port) +{ + char portbuf[10]; + struct evutil_addrinfo hint; + struct bufferevent_private *bev_p = BEV_UPCAST(bev); + + if (family != AF_INET && family != AF_INET6 && family != AF_UNSPEC) + return -1; + if (port < 1 || port > 65535) + return -1; + + memset(&hint, 0, sizeof(hint)); + hint.ai_family = family; + hint.ai_protocol = IPPROTO_TCP; + hint.ai_socktype = SOCK_STREAM; + + evutil_snprintf(portbuf, sizeof(portbuf), "%d", port); + + BEV_LOCK(bev); + bev_p->dns_error = 0; + + bufferevent_suspend_write_(bev, BEV_SUSPEND_LOOKUP); + bufferevent_suspend_read_(bev, BEV_SUSPEND_LOOKUP); + + bufferevent_incref_(bev); + bev_p->dns_request = evutil_getaddrinfo_async_(evdns_base, hostname, + portbuf, &hint, bufferevent_connect_getaddrinfo_cb, bev); + BEV_UNLOCK(bev); + + return 0; +} + +int +bufferevent_socket_get_dns_error(struct bufferevent *bev) +{ + int rv; + struct bufferevent_private *bev_p = BEV_UPCAST(bev); + + BEV_LOCK(bev); + rv = bev_p->dns_error; + BEV_UNLOCK(bev); + + return rv; +} + +/* + * Create a new buffered event object. + * + * The read callback is invoked whenever we read new data. + * The write callback is invoked whenever the output buffer is drained. + * The error callback is invoked on a write/read error or on EOF. + * + * Both read and write callbacks maybe NULL. The error callback is not + * allowed to be NULL and have to be provided always. + */ + +struct bufferevent * +bufferevent_new(evutil_socket_t fd, + bufferevent_data_cb readcb, bufferevent_data_cb writecb, + bufferevent_event_cb eventcb, void *cbarg) +{ + struct bufferevent *bufev; + + if (!(bufev = bufferevent_socket_new(NULL, fd, 0))) + return NULL; + + bufferevent_setcb(bufev, readcb, writecb, eventcb, cbarg); + + return bufev; +} + + +static int +be_socket_enable(struct bufferevent *bufev, short event) +{ + if (event & EV_READ && + bufferevent_add_event_(&bufev->ev_read, &bufev->timeout_read) == -1) + return -1; + if (event & EV_WRITE && + bufferevent_add_event_(&bufev->ev_write, &bufev->timeout_write) == -1) + return -1; + return 0; +} + +static int +be_socket_disable(struct bufferevent *bufev, short event) +{ + struct bufferevent_private *bufev_p = BEV_UPCAST(bufev); + if (event & EV_READ) { + if (event_del(&bufev->ev_read) == -1) + return -1; + } + /* Don't actually disable the write if we are trying to connect. */ + if ((event & EV_WRITE) && ! bufev_p->connecting) { + if (event_del(&bufev->ev_write) == -1) + return -1; + } + return 0; +} + +static void +be_socket_destruct(struct bufferevent *bufev) +{ + struct bufferevent_private *bufev_p = BEV_UPCAST(bufev); + evutil_socket_t fd; + EVUTIL_ASSERT(BEV_IS_SOCKET(bufev)); + + fd = event_get_fd(&bufev->ev_read); + + if ((bufev_p->options & BEV_OPT_CLOSE_ON_FREE) && fd >= 0) + EVUTIL_CLOSESOCKET(fd); + + evutil_getaddrinfo_cancel_async_(bufev_p->dns_request); +} + +static int +be_socket_flush(struct bufferevent *bev, short iotype, + enum bufferevent_flush_mode mode) +{ + return 0; +} + + +static void +be_socket_setfd(struct bufferevent *bufev, evutil_socket_t fd) +{ + struct bufferevent_private *bufev_p = BEV_UPCAST(bufev); + + BEV_LOCK(bufev); + EVUTIL_ASSERT(BEV_IS_SOCKET(bufev)); + + event_del(&bufev->ev_read); + event_del(&bufev->ev_write); + + evbuffer_unfreeze(bufev->input, 0); + evbuffer_unfreeze(bufev->output, 1); + + event_assign(&bufev->ev_read, bufev->ev_base, fd, + EV_READ|EV_PERSIST|EV_FINALIZE, bufferevent_readcb, bufev); + event_assign(&bufev->ev_write, bufev->ev_base, fd, + EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bufev); + + if (fd >= 0) + bufferevent_enable(bufev, bufev->enabled); + + evutil_getaddrinfo_cancel_async_(bufev_p->dns_request); + + BEV_UNLOCK(bufev); +} + +/* XXXX Should non-socket bufferevents support this? */ +int +bufferevent_priority_set(struct bufferevent *bufev, int priority) +{ + int r = -1; + struct bufferevent_private *bufev_p = BEV_UPCAST(bufev); + + BEV_LOCK(bufev); + if (!BEV_IS_SOCKET(bufev)) + goto done; + + if (event_priority_set(&bufev->ev_read, priority) == -1) + goto done; + if (event_priority_set(&bufev->ev_write, priority) == -1) + goto done; + + event_deferred_cb_set_priority_(&bufev_p->deferred, priority); + + r = 0; +done: + BEV_UNLOCK(bufev); + return r; +} + +/* XXXX Should non-socket bufferevents support this? */ +int +bufferevent_base_set(struct event_base *base, struct bufferevent *bufev) +{ + int res = -1; + + BEV_LOCK(bufev); + if (!BEV_IS_SOCKET(bufev)) + goto done; + + bufev->ev_base = base; + + res = event_base_set(base, &bufev->ev_read); + if (res == -1) + goto done; + + res = event_base_set(base, &bufev->ev_write); +done: + BEV_UNLOCK(bufev); + return res; +} + +static int +be_socket_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op, + union bufferevent_ctrl_data *data) +{ + switch (op) { + case BEV_CTRL_SET_FD: + be_socket_setfd(bev, data->fd); + return 0; + case BEV_CTRL_GET_FD: + data->fd = event_get_fd(&bev->ev_read); + return 0; + case BEV_CTRL_GET_UNDERLYING: + case BEV_CTRL_CANCEL_ALL: + default: + return -1; + } +} diff --git a/probe-busybox/libevent-2.1.11-stable/changelist-internal.h b/probe-busybox/libevent-2.1.11-stable/changelist-internal.h new file mode 100644 index 00000000..98fc52ae --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/changelist-internal.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef CHANGELIST_INTERNAL_H_INCLUDED_ +#define CHANGELIST_INTERNAL_H_INCLUDED_ + +/* + A "changelist" is a list of all the fd status changes that should be made + between calls to the backend's dispatch function. There are a few reasons + that a backend would want to queue changes like this rather than processing + them immediately. + + 1) Sometimes applications will add and delete the same event more than + once between calls to dispatch. Processing these changes immediately + is needless, and potentially expensive (especially if we're on a system + that makes one syscall per changed event). + + 2) Sometimes we can coalesce multiple changes on the same fd into a single + syscall if we know about them in advance. For example, epoll can do an + add and a delete at the same time, but only if we have found out about + both of them before we tell epoll. + + 3) Sometimes adding an event that we immediately delete can cause + unintended consequences: in kqueue, this makes pending events get + reported spuriously. + */ + +#include "event2/util.h" + +/** Represents a */ +struct event_change { + /** The fd or signal whose events are to be changed */ + evutil_socket_t fd; + /* The events that were enabled on the fd before any of these changes + were made. May include EV_READ or EV_WRITE. */ + short old_events; + + /* The changes that we want to make in reading and writing on this fd. + * If this is a signal, then read_change has EV_CHANGE_SIGNAL set, + * and write_change is unused. */ + ev_uint8_t read_change; + ev_uint8_t write_change; + ev_uint8_t close_change; +}; + +/* Flags for read_change and write_change. */ + +/* If set, add the event. */ +#define EV_CHANGE_ADD 0x01 +/* If set, delete the event. Exclusive with EV_CHANGE_ADD */ +#define EV_CHANGE_DEL 0x02 +/* If set, this event refers a signal, not an fd. */ +#define EV_CHANGE_SIGNAL EV_SIGNAL +/* Set for persistent events. Currently not used. */ +#define EV_CHANGE_PERSIST EV_PERSIST +/* Set for adding edge-triggered events. */ +#define EV_CHANGE_ET EV_ET + +/* The value of fdinfo_size that a backend should use if it is letting + * changelist handle its add and delete functions. */ +#define EVENT_CHANGELIST_FDINFO_SIZE sizeof(int) + +/** Set up the data fields in a changelist. */ +void event_changelist_init_(struct event_changelist *changelist); +/** Remove every change in the changelist, and make corresponding changes + * in the event maps in the base. This function is generally used right + * after making all the changes in the changelist. */ +void event_changelist_remove_all_(struct event_changelist *changelist, + struct event_base *base); +/** Free all memory held in a changelist. */ +void event_changelist_freemem_(struct event_changelist *changelist); + +/** Implementation of eventop_add that queues the event in a changelist. */ +int event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events, + void *p); +/** Implementation of eventop_del that queues the event in a changelist. */ +int event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events, + void *p); + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/checkpatch.sh b/probe-busybox/libevent-2.1.11-stable/checkpatch.sh new file mode 100755 index 00000000..6eaa19c4 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/checkpatch.sh @@ -0,0 +1,299 @@ +#!/usr/bin/env bash + +# TODO: +# - inline replace +# - clang-format-diff replacement +# - uncrustify for patches (not git refs) +# - maybe integrate into travis-ci? + +function usage() +{ + cat <&2 + exit 1 +} +function default_arg() +{ + if [ "${options[ref]}" -eq 1 ]; then + echo "HEAD" + else + [ ! -t 0 ] || abort " is a tty" + echo "/dev/stdin" + fi +} +function parse_options() +{ + options[patch]=0 + options[file]=0 + options[file_diff]=0 + options[ref]=1 + options[clang]=1 + options[uncrustify]=0 + options[cfg]= + + local OPTARG OPTIND c + while getopts "pfrdCUc:h?" c; do + case "$c" in + p) + options[patch]=1 + options[ref]=0 + options[file]=0 + options[file_diff]=0 + ;; + f) + options[file]=1 + options[ref]=0 + options[patch]=0 + options[file_diff]=0 + ;; + r) + options[ref]=1 + options[file]=0 + options[patch]=0 + options[file_diff]=0 + ;; + d) + options[file_diff]=1 + options[file]=0 + options[patch]=0 + options[ref]=0 + ;; + C) + options[clang]=1 + options[uncrustify]=0 + ;; + U) + options[uncrustify]=1 + options[clang]=0 + ;; + c) options[cfg]="$OPTIND" ;; + ?|h) + usage + exit 0 + ;; + *) + usage + exit 1 + ;; + esac + done + + options[cfg]="$(cfg)" + + [ -f "${options[cfg]}" ] || \ + abort "Config '%s' does not exist" "${options[cfg]}" + + shift $((OPTIND - 1)) + args=( "$@" ) + + if [ ${#args[@]} -eq 0 ]; then + # exit on error globally, not only in subshell + default_arg > /dev/null + args=( "$(default_arg)" ) + fi + + if [ "${args[0]}" = "/dev/stdin" ]; then + TMP_FILE="/tmp/libevent.checkpatch.$RANDOM" + cat > "$TMP_FILE" + trap "rm '$TMP_FILE'" EXIT + + args[0]="$TMP_FILE" + fi +} + +function diff() { command diff --color=always "$@"; } + +function clang_style() +{ + local c="${options[cfg]}" + echo "{ $(sed -e 's/#.*//' -e '/---/d' -e '/\.\.\./d' "$c" | tr $'\n' ,) }" +} +function clang_format() { clang-format --style="$(clang_style)" "$@"; } +function clang_format_diff() { clang-format-diff --style="$(clang_style)" "$@"; } +# for non-bare repo will work +function clang_format_git() +{ git format-patch --stdout "$@" -1 | clang_format_diff; } + +function uncrustify() { command uncrustify -c "${options[cfg]}" "$@"; } +function uncrustify_frag() { uncrustify -l C --frag "$@"; } +function uncrustify_indent_off() { echo '/* *INDENT-OFF* */'; } +function uncrustify_indent_on() { echo '/* *INDENT-ON* */'; } +function git_hunk() +{ + local ref=$1 f=$2 + shift 2 + git cat-file -p $ref:$f +} +function uncrustify_git_indent_hunk() +{ + local start=$1 end=$2 + shift 2 + + # Will be beatier with tee(1), but doh bash async substitution + { uncrustify_indent_off; git_hunk "$@" | head -n$((start - 1)); } + { uncrustify_indent_on; git_hunk "$@" | head -n$((end - 1)) | tail -n+$start; } + { uncrustify_indent_off; git_hunk "$@" | tail -n+$((end + 1)); } +} +function strip() +{ + local start=$1 end=$2 + shift 2 + + # seek indent_{on,off}() + let start+=2 + head -n$end | tail -n+$start +} +function patch_ranges() +{ + egrep -o '^@@ -[0-9]+(,[0-9]+|) \+[0-9]+(,[0-9]+|) @@' | \ + cut -d' ' -f3 +} +function git_ranges() +{ + local ref=$1 f=$2 + shift 2 + + git diff -W $ref^..$ref -- $f | patch_ranges +} +function diff_substitute() +{ + local f="$1" + shift + + sed \ + -e "s#^--- /dev/fd.*\$#--- a/$f#" \ + -e "s#^+++ /dev/fd.*\$#+++ b/$f#" +} +function uncrustify_git() +{ + local ref=$1 r f start end length + shift + + local files=( $(git diff --name-only $ref^..$ref | egrep "\.(c|h)$") ) + for f in "${files[@]}"; do + local ranges=( $(git_ranges $ref "$f") ) + for r in "${ranges[@]}"; do + [[ ! "$r" =~ ^\+([0-9]+)(,([0-9]+)|)$ ]] && continue + start=${BASH_REMATCH[1]} + [ -n "${BASH_REMATCH[3]}" ] && \ + length=${BASH_REMATCH[3]} || \ + length=1 + end=$((start + length)) + echo "Range: $start:$end ($length)" >&2 + + diff -u \ + <(uncrustify_git_indent_hunk $start $end $ref "$f" | strip $start $end) \ + <(uncrustify_git_indent_hunk $start $end $ref "$f" | uncrustify_frag | strip $start $end) \ + | diff_substitute "$f" + done + done +} +function uncrustify_diff() { abort "Not implemented"; } +function uncrustify_file() { uncrustify -f "$@"; } + +function checker() +{ + local c=$1 u=$2 + shift 2 + + [ "${options[clang]}" -eq 0 ] || { + $c "$@" + return + } + [ "${options[uncrustify]}" -eq 0 ] || { + $u "$@" + return + } +} +function check_patch() { checker clang_format_diff uncrustify_diff "$@"; } +function check_file() { checker clang_format uncrustify_file "$@"; } +function check_ref() { checker clang_format_git uncrustify_git "$@"; } + +function check_arg() +{ + [ "${options[patch]}" -eq 0 ] || { + check_patch "$@" + return + } + [ "${options[file]}" -eq 0 ] || { + check_file "$@" + return + } + [ "${options[file_diff]}" -eq 0 ] || { + diff -u "$@" <(check_file "$@") | diff_substitute "$@" + return + } + [ "${options[ref]}" -eq 0 ] || { + check_ref "$@" + return + } +} + +function main() +{ + local a + for a in "${args}"; do + check_arg "$a" + done +} + +declare -A options +parse_options "$@" + +main "$@" | less -FRSX diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/AddCompilerFlags.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/AddCompilerFlags.cmake new file mode 100644 index 00000000..9dc21d03 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/AddCompilerFlags.cmake @@ -0,0 +1,13 @@ +include(CheckCCompilerFlag) + +macro(add_compiler_flags) + foreach(flag ${ARGN}) + string(REGEX REPLACE "[-.+/:= ]" "_" _flag_esc "${flag}") + + check_c_compiler_flag("${flag}" check_c_compiler_flag_${_flag_esc}) + + if (check_c_compiler_flag_${_flag_esc}) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}") + endif() + endforeach() +endmacro() diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/AddEventLibrary.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/AddEventLibrary.cmake new file mode 100644 index 00000000..352c86ba --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/AddEventLibrary.cmake @@ -0,0 +1,163 @@ +include(CMakeParseArguments) + +set(LIBEVENT_SHARED_LIBRARIES "") +set(LIBEVENT_STATIC_LIBRARIES "") + +macro(set_event_shared_lib_flags LIB_NAME) + set_target_properties("${LIB_NAME}_shared" PROPERTIES + COMPILE_FLAGS ${ARGN}) + set_target_properties("${LIB_NAME}_shared" PROPERTIES + LINK_FLAGS ${ARGN}) +endmacro() + +macro(generate_pkgconfig LIB_NAME) + set(prefix ${CMAKE_INSTALL_PREFIX}) + set(exec_prefix ${CMAKE_INSTALL_PREFIX}) + set(libdir ${CMAKE_INSTALL_PREFIX}/lib) + set(includedir ${CMAKE_INSTALL_PREFIX}/include) + + set(VERSION ${EVENT_ABI_LIBVERSION}) + + set(LIBS "") + foreach (LIB ${LIB_PLATFORM}) + set(LIBS "${LIBS} -L${LIB}") + endforeach() + + set(OPENSSL_LIBS "") + foreach(LIB ${OPENSSL_LIBRARIES}) + set(OPENSSL_LIBS "${OPENSSL_LIBS} -L${LIB}") + endforeach() + + configure_file("lib${LIB_NAME}.pc.in" "lib${LIB_NAME}.pc" @ONLY) + install( + FILES "${CMAKE_CURRENT_BINARY_DIR}/lib${LIB_NAME}.pc" + DESTINATION "${CMAKE_INSTALL_PREFIX}/lib/pkgconfig" + ) +endmacro() + + +# Global variables that it uses: +# - EVENT_ABI_LIBVERSION +# - EVENT_ABI_LIBVERSION_CURRENT +# - EVENT_ABI_LIBVERSION_REVISION +# - EVENT_ABI_LIBVERSION_AGE +# - EVENT_PACKAGE_RELEASE +# - CMAKE_THREAD_LIBS_INIT LIB_PLATFORM +# - OPENSSL_LIBRARIES +# - HDR_PUBLIC +# - EVENT_INSTALL_INCLUDE_DIR +# - EVENT_SHARED_FLAGS +# - EVENT_LIBRARY_STATIC +# - EVENT_LIBRARY_SHARED +# +# Exported variables: +# - LIBEVENT_SHARED_LIBRARIES +# - LIBEVENT_STATIC_LIBRARIES +macro(add_event_library LIB_NAME) + cmake_parse_arguments(LIB + "" # Options + "VERSION" # One val + "SOURCES;LIBRARIES" # Multi val + ${ARGN} + ) + + set(ADD_EVENT_LIBRARY_TARGETS) + set(ADD_EVENT_LIBRARY_INTERFACE) + + if (${EVENT_LIBRARY_STATIC}) + add_library("${LIB_NAME}_static" STATIC ${LIB_SOURCES}) + set_target_properties("${LIB_NAME}_static" PROPERTIES + OUTPUT_NAME "${LIB_NAME}" + CLEAN_DIRECT_OUTPUT 1) + set_target_properties( + "${LIB_NAME}_static" PROPERTIES + PUBLIC_HEADER "${HDR_PUBLIC}") + + list(APPEND LIBEVENT_STATIC_LIBRARIES "${LIB_NAME}_static") + list(APPEND ADD_EVENT_LIBRARY_TARGETS "${LIB_NAME}_static") + + set(ADD_EVENT_LIBRARY_INTERFACE "${LIB_NAME}_static") + endif() + + if (${EVENT_LIBRARY_SHARED}) + add_library("${LIB_NAME}_shared" SHARED ${LIB_SOURCES}) + + target_link_libraries("${LIB_NAME}_shared" + ${CMAKE_THREAD_LIBS_INIT} + ${LIB_PLATFORM} + ${LIB_LIBRARIES}) + + if (EVENT_SHARED_FLAGS) + set_event_shared_lib_flags("${LIB_NAME}" "${EVENT_SHARED_FLAGS}") + endif() + + if (WIN32) + set_target_properties( + "${LIB_NAME}_shared" PROPERTIES + OUTPUT_NAME "${LIB_NAME}" + SOVERSION ${EVENT_ABI_LIBVERSION}) + elseif (APPLE) + math(EXPR COMPATIBILITY_VERSION "${EVENT_ABI_LIBVERSION_CURRENT}+1") + math(EXPR CURRENT_MINUS_AGE "${EVENT_ABI_LIBVERSION_CURRENT}-${EVENT_ABI_LIBVERSION_AGE}") + set_target_properties( + "${LIB_NAME}_shared" PROPERTIES + OUTPUT_NAME "${LIB_NAME}-${EVENT_PACKAGE_RELEASE}.${CURRENT_MINUS_AGE}" + INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib" + LINK_FLAGS "-compatibility_version ${COMPATIBILITY_VERSION} -current_version ${COMPATIBILITY_VERSION}.${EVENT_ABI_LIBVERSION_REVISION}") + else() + math(EXPR CURRENT_MINUS_AGE "${EVENT_ABI_LIBVERSION_CURRENT}-${EVENT_ABI_LIBVERSION_AGE}") + set_target_properties( + "${LIB_NAME}_shared" PROPERTIES + OUTPUT_NAME "${LIB_NAME}-${EVENT_PACKAGE_RELEASE}" + VERSION "${CURRENT_MINUS_AGE}.${EVENT_ABI_LIBVERSION_AGE}.${EVENT_ABI_LIBVERSION_REVISION}" + SOVERSION "${CURRENT_MINUS_AGE}") + endif() + + set_target_properties( + "${LIB_NAME}_shared" PROPERTIES + PUBLIC_HEADER "${HDR_PUBLIC}" + CLEAN_DIRECT_OUTPUT 1) + + if (NOT WIN32) + set(LIB_LINK_NAME + "${CMAKE_SHARED_LIBRARY_PREFIX}${LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}") + + add_custom_command(TARGET ${LIB_NAME}_shared + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E create_symlink + "$" + "${LIB_LINK_NAME}" + WORKING_DIRECTORY "lib") + endif() + + list(APPEND LIBEVENT_SHARED_LIBRARIES "${LIB_NAME}_shared") + list(APPEND ADD_EVENT_LIBRARY_TARGETS "${LIB_NAME}_shared") + + set(ADD_EVENT_LIBRARY_INTERFACE "${LIB_NAME}_shared") + endif() + + export(TARGETS ${ADD_EVENT_LIBRARY_TARGETS} + FILE "${PROJECT_BINARY_DIR}/LibeventTargets.cmake" + APPEND + ) + + install(TARGETS ${ADD_EVENT_LIBRARY_TARGETS} + EXPORT LibeventTargets + LIBRARY DESTINATION "lib" COMPONENT lib + ARCHIVE DESTINATION "lib" COMPONENT lib + RUNTIME DESTINATION "lib" COMPONENT lib + PUBLIC_HEADER DESTINATION "include/event2" + COMPONENT dev + ) + if (NOT WIN32 AND ${EVENT_LIBRARY_SHARED}) + install(FILES + "$/${LIB_LINK_NAME}" + DESTINATION "lib" + COMPONENT lib) + endif() + + add_library(${LIB_NAME} INTERFACE) + target_link_libraries(${LIB_NAME} INTERFACE ${ADD_EVENT_LIBRARY_INTERFACE}) + + generate_pkgconfig("${LIB_NAME}") +endmacro() diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/COPYING-CMAKE-SCRIPTS b/probe-busybox/libevent-2.1.11-stable/cmake/COPYING-CMAKE-SCRIPTS new file mode 100644 index 00000000..ab3c4d25 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/COPYING-CMAKE-SCRIPTS @@ -0,0 +1,22 @@ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CheckConstExists.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/CheckConstExists.cmake new file mode 100644 index 00000000..de074581 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CheckConstExists.cmake @@ -0,0 +1,25 @@ +include(CheckCSourceCompiles) + +macro(check_const_exists CONST FILES VARIABLE) + if (NOT DEFINED ${VARIABLE}) + set(check_const_exists_source "") + foreach(file ${FILES}) + set(check_const_exists_source + "${check_const_exists_source} + #include <${file}>") + endforeach() + set(check_const_exists_source + "${check_const_exists_source} + int main() { (void)${CONST}; return 0; }") + + check_c_source_compiles("${check_const_exists_source}" ${VARIABLE}) + + if (${${VARIABLE}}) + set(${VARIABLE} 1 CACHE INTERNAL "Have const ${CONST}") + message(STATUS "Looking for ${CONST} - found") + else() + set(${VARIABLE} 0 CACHE INTERNAL "Have const ${CONST}") + message(STATUS "Looking for ${CONST} - not found") + endif() + endif() +endmacro(check_const_exists) diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CheckFileOffsetBits.c b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFileOffsetBits.c new file mode 100644 index 00000000..d948fecf --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFileOffsetBits.c @@ -0,0 +1,14 @@ +#include + +#define KB ((off_t)1024) +#define MB ((off_t)1024 * KB) +#define GB ((off_t)1024 * MB) +#define TB ((off_t)1024 * GB) +int t2[(((64 * GB -1) % 671088649) == 268434537) + && (((TB - (64 * GB -1) + 255) % 1792151290) == 305159546)? 1: -1]; + +int main() +{ + ; + return 0; +} diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CheckFileOffsetBits.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFileOffsetBits.cmake new file mode 100644 index 00000000..12534401 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFileOffsetBits.cmake @@ -0,0 +1,43 @@ +# - Check if _FILE_OFFSET_BITS macro needed for large files +# CHECK_FILE_OFFSET_BITS () +# +# The following variables may be set before calling this macro to +# modify the way the check is run: +# +# CMAKE_REQUIRED_FLAGS = string of compile command line flags +# CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar) +# CMAKE_REQUIRED_INCLUDES = list of include directories +# Copyright (c) 2009, Michihiro NAKAJIMA +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +#INCLUDE(CheckCSourceCompiles) + +GET_FILENAME_COMPONENT(_selfdir_CheckFileOffsetBits + "${CMAKE_CURRENT_LIST_FILE}" PATH) + +MACRO (CHECK_FILE_OFFSET_BITS) + IF(NOT DEFINED _FILE_OFFSET_BITS) + MESSAGE(STATUS "Cheking _FILE_OFFSET_BITS for large files") + TRY_COMPILE(__WITHOUT_FILE_OFFSET_BITS_64 + ${CMAKE_CURRENT_BINARY_DIR} + ${_selfdir_CheckFileOffsetBits}/CheckFileOffsetBits.c + COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS}) + IF(NOT __WITHOUT_FILE_OFFSET_BITS_64) + TRY_COMPILE(__WITH_FILE_OFFSET_BITS_64 + ${CMAKE_CURRENT_BINARY_DIR} + ${_selfdir_CheckFileOffsetBits}/CheckFileOffsetBits.c + COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} -D_FILE_OFFSET_BITS=64) + ENDIF(NOT __WITHOUT_FILE_OFFSET_BITS_64) + + IF(NOT __WITHOUT_FILE_OFFSET_BITS_64 AND __WITH_FILE_OFFSET_BITS_64) + SET(_FILE_OFFSET_BITS 64 CACHE INTERNAL "_FILE_OFFSET_BITS macro needed for large files") + MESSAGE(STATUS "Cheking _FILE_OFFSET_BITS for large files - needed") + ELSE(NOT __WITHOUT_FILE_OFFSET_BITS_64 AND __WITH_FILE_OFFSET_BITS_64) + SET(_FILE_OFFSET_BITS "" CACHE INTERNAL "_FILE_OFFSET_BITS macro needed for large files") + MESSAGE(STATUS "Cheking _FILE_OFFSET_BITS for large files - not needed") + ENDIF(NOT __WITHOUT_FILE_OFFSET_BITS_64 AND __WITH_FILE_OFFSET_BITS_64) + ENDIF(NOT DEFINED _FILE_OFFSET_BITS) + +ENDMACRO (CHECK_FILE_OFFSET_BITS) diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CheckFunctionExistsEx.c b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFunctionExistsEx.c new file mode 100644 index 00000000..224e3404 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFunctionExistsEx.c @@ -0,0 +1,28 @@ +#ifdef CHECK_FUNCTION_EXISTS + +#ifdef __cplusplus +extern "C" +#endif + char + CHECK_FUNCTION_EXISTS(void); +#ifdef __CLASSIC_C__ +int main() +{ + int ac; + char* av[]; +#else +int main(int ac, char* av[]) +{ +#endif + CHECK_FUNCTION_EXISTS(); + if (ac > 1000) { + return *av[0]; + } + return 0; +} + +#else /* CHECK_FUNCTION_EXISTS */ + +#error "CHECK_FUNCTION_EXISTS has to specify the function" + +#endif /* CHECK_FUNCTION_EXISTS */ diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CheckFunctionExistsEx.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFunctionExistsEx.cmake new file mode 100644 index 00000000..78bc2ecc --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFunctionExistsEx.cmake @@ -0,0 +1,69 @@ +# - Check if a C function can be linked +# CHECK_FUNCTION_EXISTS( ) +# +# Check that the is provided by libraries on the system and +# store the result in a . This does not verify that any +# system header file declares the function, only that it can be found +# at link time (considure using CheckSymbolExists). +# +# The following variables may be set before calling this macro to +# modify the way the check is run: +# +# CMAKE_REQUIRED_FLAGS = string of compile command line flags +# CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar) +# CMAKE_REQUIRED_INCLUDES = list of include directories +# CMAKE_REQUIRED_LIBRARIES = list of libraries to link + +#============================================================================= +# Copyright 2002-2011 Kitware, Inc. +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file Copyright.txt for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distribute this file outside of CMake, substitute the full +# License text for the above reference.) + +MACRO(CHECK_FUNCTION_EXISTS_EX FUNCTION VARIABLE) + IF(${VARIABLE} MATCHES "^${VARIABLE}$") + SET(MACRO_CHECK_FUNCTION_DEFINITIONS + "-DCHECK_FUNCTION_EXISTS=${FUNCTION} ${CMAKE_REQUIRED_FLAGS}") + MESSAGE(STATUS "Looking for ${FUNCTION}") + IF(CMAKE_REQUIRED_LIBRARIES) + SET(CHECK_FUNCTION_EXISTS_ADD_LIBRARIES + "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}") + ELSE(CMAKE_REQUIRED_LIBRARIES) + SET(CHECK_FUNCTION_EXISTS_ADD_LIBRARIES) + ENDIF(CMAKE_REQUIRED_LIBRARIES) + IF(CMAKE_REQUIRED_INCLUDES) + SET(CHECK_FUNCTION_EXISTS_ADD_INCLUDES + "-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}") + ELSE(CMAKE_REQUIRED_INCLUDES) + SET(CHECK_FUNCTION_EXISTS_ADD_INCLUDES) + ENDIF(CMAKE_REQUIRED_INCLUDES) + TRY_COMPILE(${VARIABLE} + ${CMAKE_BINARY_DIR} + ${PROJECT_SOURCE_DIR}/cmake/CheckFunctionExistsEx.c + COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} + CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS} + "${CHECK_FUNCTION_EXISTS_ADD_LIBRARIES}" + "${CHECK_FUNCTION_EXISTS_ADD_INCLUDES}" + OUTPUT_VARIABLE OUTPUT) + IF(${VARIABLE}) + SET(${VARIABLE} 1 CACHE INTERNAL "Have function ${FUNCTION}") + MESSAGE(STATUS "Looking for ${FUNCTION} - found") + FILE(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log + "Determining if the function ${FUNCTION} exists passed with the following output:\n" + "${OUTPUT}\n\n") + ELSE(${VARIABLE}) + MESSAGE(STATUS "Looking for ${FUNCTION} - not found") + SET(${VARIABLE} "" CACHE INTERNAL "Have function ${FUNCTION}") + FILE(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log + "Determining if the function ${FUNCTION} exists failed with the following output:\n" + "${OUTPUT}\n\n") + ENDIF(${VARIABLE}) + ENDIF() +ENDMACRO(CHECK_FUNCTION_EXISTS_EX) diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CheckFunctionKeywords.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFunctionKeywords.cmake new file mode 100644 index 00000000..3d968b8a --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CheckFunctionKeywords.cmake @@ -0,0 +1,14 @@ +include(CheckCSourceCompiles) + +macro(check_function_keywords _wordlist) + set(${_result} "") + foreach(flag ${_wordlist}) + string(REGEX REPLACE "[-+/ ()]" "_" flagname "${flag}") + string(TOUPPER "${flagname}" flagname) + set(have_flag "HAVE_${flagname}") + check_c_source_compiles("${flag} void func(); void func() { } int main() { func(); return 0; }" ${have_flag}) + if(${have_flag} AND NOT ${_result}) + set(${_result} "${flag}") + endif(${have_flag} AND NOT ${_result}) + endforeach(flag) +endmacro(check_function_keywords) diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CheckPrototypeDefinition.c.in b/probe-busybox/libevent-2.1.11-stable/cmake/CheckPrototypeDefinition.c.in new file mode 100644 index 00000000..a97344ac --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CheckPrototypeDefinition.c.in @@ -0,0 +1,29 @@ +@CHECK_PROTOTYPE_DEFINITION_HEADER@ + +static void cmakeRequireSymbol(int dummy, ...) { + (void) dummy; +} + +static void checkSymbol(void) { +#ifndef @CHECK_PROTOTYPE_DEFINITION_SYMBOL@ + cmakeRequireSymbol(0, &@CHECK_PROTOTYPE_DEFINITION_SYMBOL@); +#endif +} + +@CHECK_PROTOTYPE_DEFINITION_PROTO@ { + return @CHECK_PROTOTYPE_DEFINITION_RETURN@; +} + +#ifdef __CLASSIC_C__ +int main() { + int ac; + char*av[]; +#else +int main(int ac, char *av[]) { +#endif + checkSymbol(); + if (ac > 1000) { + return *av[0]; + } + return 0; +} diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CheckPrototypeDefinition.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/CheckPrototypeDefinition.cmake new file mode 100644 index 00000000..940d1ff0 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CheckPrototypeDefinition.cmake @@ -0,0 +1,82 @@ +# - Check if the protoype we expect is correct. +# check_prototype_definition(FUNCTION PROTOTYPE RETURN HEADER VARIABLE) +# +# FUNCTION - The name of the function (used to check if prototype exists) +# PROTOTYPE- The prototype to check. +# RETURN - The return value of the function. +# HEADER - The header files required. +# VARIABLE - The variable to store the result. +# +# Example: +# +# check_prototype_definition(getpwent_r +# "struct passwd *getpwent_r(struct passwd *src, char *buf, int buflen)" +# "NULL" +# "unistd.h;pwd.h" +# SOLARIS_GETPWENT_R) +# +# The following variables may be set before calling this macro to +# modify the way the check is run: +# +# CMAKE_REQUIRED_FLAGS = string of compile command line flags +# CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar) +# CMAKE_REQUIRED_INCLUDES = list of include directories +# CMAKE_REQUIRED_LIBRARIES = list of libraries to link + + +function(CHECK_PROTOTYPE_DEFINITION _FUNCTION _PROTOTYPE _RETURN _HEADER _VARIABLE) + if (${_VARIABLE} MATCHES "^${_VARIABLE}$") + set(CHECK_PROTOTYPE_DEFINITION_CONTENT "/* */\n") + + set(CHECK_PROTOTYPE_DEFINITION_FLAGS ${CMAKE_REQUIRED_FLAGS}) + if (CMAKE_REQUIRED_LIBRARIES) + set(CHECK_PROTOTYPE_DEFINITION_LIBS + "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}") + else(CMAKE_REQUIRED_LIBRARIES) + set(CHECK_PROTOTYPE_DEFINITION_LIBS) + endif(CMAKE_REQUIRED_LIBRARIES) + if (CMAKE_REQUIRED_INCLUDES) + set(CMAKE_SYMBOL_EXISTS_INCLUDES + "-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}") + else(CMAKE_REQUIRED_INCLUDES) + set(CMAKE_SYMBOL_EXISTS_INCLUDES) + endif(CMAKE_REQUIRED_INCLUDES) + + foreach(_FILE ${_HEADER}) + set(CHECK_PROTOTYPE_DEFINITION_HEADER + "${CHECK_PROTOTYPE_DEFINITION_HEADER}#include <${_FILE}>\n") + endforeach(_FILE) + + set(CHECK_PROTOTYPE_DEFINITION_SYMBOL ${_FUNCTION}) + set(CHECK_PROTOTYPE_DEFINITION_PROTO ${_PROTOTYPE}) + set(CHECK_PROTOTYPE_DEFINITION_RETURN ${_RETURN}) + + configure_file("${PROJECT_SOURCE_DIR}/cmake/CheckPrototypeDefinition.c.in" + "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/CheckPrototypeDefinition.c" @ONLY) + + file(READ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/CheckPrototypeDefinition.c _SOURCE) + + try_compile(${_VARIABLE} + ${CMAKE_BINARY_DIR} + ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/CheckPrototypeDefinition.c + COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} + CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${CHECK_PROTOTYPE_DEFINITION_FLAGS} + "${CHECK_PROTOTYPE_DEFINITION_LIBS}" + "${CMAKE_SYMBOL_EXISTS_INCLUDES}" + OUTPUT_VARIABLE OUTPUT) + + if (${_VARIABLE}) + set(${_VARIABLE} 1 CACHE INTERNAL "Have correct prototype for ${_FUNCTION}") + message(STATUS "Checking prototype ${_FUNCTION} for ${_VARIABLE} - True") + file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log + "Determining if the prototype ${_FUNCTION} exists for ${_VARIABLE} passed with the following output:\n" + "${OUTPUT}\n\n") + else (${_VARIABLE}) + message(STATUS "Checking prototype ${_FUNCTION} for ${_VARIABLE} - False") + set(${_VARIABLE} 0 CACHE INTERNAL "Have correct prototype for ${_FUNCTION}") + file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log + "Determining if the prototype ${_FUNCTION} exists for ${_VARIABLE} failed with the following output:\n" + "${OUTPUT}\n\n${_SOURCE}\n\n") + endif (${_VARIABLE}) + endif() +endfunction(CHECK_PROTOTYPE_DEFINITION) diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CheckWorkingKqueue.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/CheckWorkingKqueue.cmake new file mode 100644 index 00000000..f22f011b --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CheckWorkingKqueue.cmake @@ -0,0 +1,52 @@ +include(CheckCSourceRuns) + +check_c_source_runs( +" +#include +#include +#include +#include +#include +#include + +int +main(int argc, char **argv) +{ + int kq; + int n; + int fd[2]; + struct kevent ev; + struct timespec ts; + char buf[80000]; + + if (pipe(fd) == -1) + exit(1); + if (fcntl(fd[1], F_SETFL, O_NONBLOCK) == -1) + exit(1); + + while ((n = write(fd[1], buf, sizeof(buf))) == sizeof(buf)) + ; + + if ((kq = kqueue()) == -1) + exit(1); + + memset(&ev, 0, sizeof(ev)); + ev.ident = fd[1]; + ev.filter = EVFILT_WRITE; + ev.flags = EV_ADD | EV_ENABLE; + n = kevent(kq, &ev, 1, NULL, 0, NULL); + if (n == -1) + exit(1); + + read(fd[0], buf, sizeof(buf)); + + ts.tv_sec = 0; + ts.tv_nsec = 0; + n = kevent(kq, NULL, 0, &ev, 1, &ts); + if (n == -1 || n == 0) + exit(1); + + exit(0); +} + +" EVENT__HAVE_WORKING_KQUEUE) diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/CodeCoverage.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/CodeCoverage.cmake new file mode 100644 index 00000000..eba85b3f --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/CodeCoverage.cmake @@ -0,0 +1,165 @@ +# +# Boost Software License - Version 1.0 - August 17th, 2003 +# +# Permission is hereby granted, free of charge, to any person or organization +# obtaining a copy of the software and accompanying documentation covered by +# this license (the "Software") to use, reproduce, display, distribute, +# execute, and transmit the Software, and to prepare derivative works of the +# Software, and to permit third-parties to whom the Software is furnished to +# do so, all subject to the following: +# +# The copyright notices in the Software and this entire statement, including +# the above license grant, this restriction and the following disclaimer, +# must be included in all copies of the Software, in whole or in part, and +# all derivative works of the Software, unless such copies or derivative +# works are solely in the form of machine-executable object code generated by +# a source language processor. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +# SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +# FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +# 2012-01-31, Lars Bilke +# - Enable Code Coverage +# +# 2013-09-17, Joakim Söderberg +# - Added support for Clang. +# - Some additional usage instructions. +# +# 2016-11-02, Azat Khuzhin +# - Adopt for C compiler only (libevent) +# +# USAGE: +# 1. Copy this file into your cmake modules path. +# +# 2. Add the following line to your CMakeLists.txt: +# INCLUDE(CodeCoverage) +# +# 3. Set compiler flags to turn off optimization and enable coverage: +# SET(CMAKE_CXX_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage") +# SET(CMAKE_C_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage") +# +# 3. Use the function SETUP_TARGET_FOR_COVERAGE to create a custom make target +# which runs your test executable and produces a lcov code coverage report: +# Example: +# SETUP_TARGET_FOR_COVERAGE( +# my_coverage_target # Name for custom target. +# test_driver # Name of the test driver executable that runs the tests. +# # NOTE! This should always have a ZERO as exit code +# # otherwise the coverage generation will not complete. +# coverage # Name of output directory. +# ) +# +# 4. Build a Debug build: +# cmake -DCMAKE_BUILD_TYPE=Debug .. +# make +# make my_coverage_target +# +# + +# Check prereqs +FIND_PROGRAM( GCOV_PATH gcov ) +FIND_PROGRAM( LCOV_PATH lcov ) +FIND_PROGRAM( GENHTML_PATH genhtml ) +FIND_PROGRAM( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/tests) + +IF(NOT GCOV_PATH) + MESSAGE(FATAL_ERROR "gcov not found! Aborting...") +ENDIF() # NOT GCOV_PATH + +IF(NOT CMAKE_COMPILER_IS_GNUCC) + # Clang version 3.0.0 and greater now supports gcov as well. + MESSAGE(WARNING "Compiler is not GNU gcc! Clang Version 3.0.0 and greater supports gcov as well, but older versions don't.") + + IF(NOT "${CMAKE_C_COMPILER_ID}" STREQUAL "Clang") + MESSAGE(FATAL_ERROR "Compiler is not GNU gcc! Aborting...") + ENDIF() +ENDIF() # NOT CMAKE_COMPILER_IS_GNUCC + +IF ( NOT CMAKE_BUILD_TYPE STREQUAL "Debug" ) + MESSAGE( WARNING "Code coverage results with an optimized (non-Debug) build may be misleading" ) +ENDIF() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug" + + +# Param _targetname The name of new the custom make target +# Param _testrunner The name of the target which runs the tests. +# MUST return ZERO always, even on errors. +# If not, no coverage report will be created! +# Param _outputname lcov output is generated as _outputname.info +# HTML report is generated in _outputname/index.html +# Optional fourth parameter is passed as arguments to _testrunner +# Pass them in list form, e.g.: "-j;2" for -j 2 +FUNCTION(SETUP_TARGET_FOR_COVERAGE _targetname _testrunner _outputname) + + IF(NOT LCOV_PATH) + MESSAGE(FATAL_ERROR "lcov not found! Aborting...") + ENDIF() # NOT LCOV_PATH + + IF(NOT GENHTML_PATH) + MESSAGE(FATAL_ERROR "genhtml not found! Aborting...") + ENDIF() # NOT GENHTML_PATH + + # Setup target + ADD_CUSTOM_TARGET(${_targetname} + + # Cleanup lcov + ${LCOV_PATH} --directory . --zerocounters + + # Run tests + COMMAND ${_testrunner} ${ARGV3} + + # Capturing lcov counters and generating report + COMMAND ${LCOV_PATH} --directory . --capture --output-file ${_outputname}.info + COMMAND ${LCOV_PATH} --remove ${_outputname}.info 'tests/*' '/usr/*' --output-file ${_outputname}.info.cleaned + COMMAND ${GENHTML_PATH} -o ${_outputname} ${_outputname}.info.cleaned + COMMAND ${CMAKE_COMMAND} -E remove ${_outputname}.info ${_outputname}.info.cleaned + + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report." + ) + + # Show info where to find the report + ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD + COMMAND ; + COMMENT "Open ./${_outputname}/index.html in your browser to view the coverage report." + ) + +ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE + +# Param _targetname The name of new the custom make target +# Param _testrunner The name of the target which runs the tests +# Param _outputname cobertura output is generated as _outputname.xml +# Optional fourth parameter is passed as arguments to _testrunner +# Pass them in list form, e.g.: "-j;2" for -j 2 +FUNCTION(SETUP_TARGET_FOR_COVERAGE_COBERTURA _targetname _testrunner _outputname) + + IF(NOT PYTHON_EXECUTABLE) + MESSAGE(FATAL_ERROR "Python not found! Aborting...") + ENDIF() # NOT PYTHON_EXECUTABLE + + IF(NOT GCOVR_PATH) + MESSAGE(FATAL_ERROR "gcovr not found! Aborting...") + ENDIF() # NOT GCOVR_PATH + + ADD_CUSTOM_TARGET(${_targetname} + + # Run tests + ${_testrunner} ${ARGV3} + + # Running gcovr + COMMAND ${GCOVR_PATH} -x -r ${CMAKE_SOURCE_DIR} -e '${CMAKE_SOURCE_DIR}/tests/' -o ${_outputname}.xml + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "Running gcovr to produce Cobertura code coverage report." + ) + + # Show info where to find the report + ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD + COMMAND ; + COMMENT "Cobertura code coverage report saved in ${_outputname}.xml." + ) + +ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE_COBERTURA diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/Copyright.txt b/probe-busybox/libevent-2.1.11-stable/cmake/Copyright.txt new file mode 100644 index 00000000..813124f0 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/Copyright.txt @@ -0,0 +1,57 @@ +CMake - Cross Platform Makefile Generator +Copyright 2000-2013 Kitware, Inc. +Copyright 2000-2011 Insight Software Consortium +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +* Neither the names of Kitware, Inc., the Insight Software Consortium, + nor the names of their contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ + +The above copyright and license notice applies to distributions of +CMake in source and binary form. Some source files contain additional +notices of original copyright by their contributors; see each source +for details. Third-party software packages supplied with CMake under +compatible licenses provide their own copyright notices documented in +corresponding subdirectories. + +------------------------------------------------------------------------------ + +CMake was initially developed by Kitware with the following sponsorship: + + * National Library of Medicine at the National Institutes of Health + as part of the Insight Segmentation and Registration Toolkit (ITK). + + * US National Labs (Los Alamos, Livermore, Sandia) ASC Parallel + Visualization Initiative. + + * National Alliance for Medical Image Computing (NAMIC) is funded by the + National Institutes of Health through the NIH Roadmap for Medical Research, + Grant U54 EB005149. + + * Kitware, Inc. \ No newline at end of file diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/FindGit.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/FindGit.cmake new file mode 100644 index 00000000..2abbfe4e --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/FindGit.cmake @@ -0,0 +1,45 @@ +# The module defines the following variables: +# GIT_EXECUTABLE - path to git command line client +# GIT_FOUND - true if the command line client was found +# Example usage: +# find_package(Git) +# if(GIT_FOUND) +# message("git found: ${GIT_EXECUTABLE}") +# endif() + +#============================================================================= +# Copyright 2010 Kitware, Inc. +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file Copyright.txt for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distributed this file outside of CMake, substitute the full +# License text for the above reference.) + +# Look for 'git' or 'eg' (easy git) +set(git_names git eg) + +# Prefer .cmd variants on Windows unless running in a Makefile +# in the MSYS shell. +if(WIN32) + if(NOT CMAKE_GENERATOR MATCHES "MSYS") + set(git_names git.cmd git eg.cmd eg) + endif() +endif() + +find_program(GIT_EXECUTABLE + NAMES ${git_names} + DOC "git command line client") + +mark_as_advanced(GIT_EXECUTABLE) + +# Handle the QUIETLY and REQUIRED arguments and set GIT_FOUND to TRUE if +# all listed variables are TRUE + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Git DEFAULT_MSG GIT_EXECUTABLE) + diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/LibeventConfig.cmake.in b/probe-busybox/libevent-2.1.11-stable/cmake/LibeventConfig.cmake.in new file mode 100644 index 00000000..54223360 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/LibeventConfig.cmake.in @@ -0,0 +1,18 @@ +# - Config file for the Libevent package +# It defines the following variables +# LIBEVENT_INCLUDE_DIRS - include directories +# LIBEVENT_STATIC_LIBRARIES - libraries to link against (archive/static) +# LIBEVENT_SHARED_LIBRARIES - libraries to link against (shared) + +# Get the path of the current file. +get_filename_component(LIBEVENT_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) + +# Set the include directories. +set(LIBEVENT_INCLUDE_DIRS "@EVENT_INSTALL_INCLUDE_DIR@") + +# Include the project Targets file, this contains definitions for IMPORTED targets. +include(${LIBEVENT_CMAKE_DIR}/LibeventTargets.cmake) + +# IMPORTED targets from LibeventTargets.cmake +set(LIBEVENT_STATIC_LIBRARIES "@LIBEVENT_STATIC_LIBRARIES@") +set(LIBEVENT_SHARED_LIBRARIES "@LIBEVENT_SHARED_LIBRARIES@") diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/LibeventConfigBuildTree.cmake.in b/probe-busybox/libevent-2.1.11-stable/cmake/LibeventConfigBuildTree.cmake.in new file mode 100644 index 00000000..02edef32 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/LibeventConfigBuildTree.cmake.in @@ -0,0 +1,17 @@ +# - Config file for the Libevent package +# It defines the following variables +# LIBEVENT_INCLUDE_DIRS - include directories for FooBar +# LIBEVENT_LIBRARIES - libraries to link against + +# Get the path of the current file. +get_filename_component(LIBEVENT_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) + +# Set the include directories. +set(LIBEVENT_INCLUDE_DIRS "@EVENT__INCLUDE_DIRS@") + +# Include the project Targets file, this contains definitions for IMPORTED targets. +include(${LIBEVENT_CMAKE_DIR}/LibeventTargets.cmake) + +# IMPORTED targets from LibeventTargets.cmake +set(LIBEVENT_LIBRARIES event event_core event_extra) + diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/LibeventConfigVersion.cmake.in b/probe-busybox/libevent-2.1.11-stable/cmake/LibeventConfigVersion.cmake.in new file mode 100644 index 00000000..56371a8f --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/LibeventConfigVersion.cmake.in @@ -0,0 +1,11 @@ +set(PACKAGE_VERSION "@EVENT_PACKAGE_VERSION@") + +# Check whether the requested PACKAGE_FIND_VERSION is compatible +if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_EXACT TRUE) + endif() +endif() diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/Uninstall.cmake.in b/probe-busybox/libevent-2.1.11-stable/cmake/Uninstall.cmake.in new file mode 100644 index 00000000..c6dc09ef --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/Uninstall.cmake.in @@ -0,0 +1,23 @@ +# https://gitlab.kitware.com/cmake/community/wikis/FAQ#can-i-do-make-uninstall-with-cmake + +if(NOT EXISTS "@CMAKE_BINARY_DIR@/install_manifest.txt") + message(FATAL_ERROR "Cannot find install manifest: @CMAKE_BINARY_DIR@/install_manifest.txt") +endif(NOT EXISTS "@CMAKE_BINARY_DIR@/install_manifest.txt") + +file(READ "@CMAKE_BINARY_DIR@/install_manifest.txt" files) +string(REGEX REPLACE "\n" ";" files "${files}") +foreach(file ${files}) + message(STATUS "Uninstalling $ENV{DESTDIR}${file}") + if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") + exec_program( + "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" + OUTPUT_VARIABLE rm_out + RETURN_VALUE rm_retval + ) + if(NOT "${rm_retval}" STREQUAL 0) + message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}") + endif(NOT "${rm_retval}" STREQUAL 0) + else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") + message(STATUS "File $ENV{DESTDIR}${file} does not exist.") + endif(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") +endforeach(file) diff --git a/probe-busybox/libevent-2.1.11-stable/cmake/VersionViaGit.cmake b/probe-busybox/libevent-2.1.11-stable/cmake/VersionViaGit.cmake new file mode 100644 index 00000000..504980ad --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/cmake/VersionViaGit.cmake @@ -0,0 +1,64 @@ +# This module defines the following variables utilizing +# git to determine the parent tag. And if found the macro +# will attempt to parse them in the github tag fomat +# +# Useful for auto-versioning in our CMakeLists +# +# EVENT_GIT___VERSION_MAJOR - Major version. +# EVENT_GIT___VERSION_MINOR - Minor version +# EVENT_GIT___VERSION_STAGE - Stage version +# +# Example usage: +# +# event_fuzzy_version_from_git() +# message("Libvent major=${EVENT_GIT___VERSION_MAJOR}") +# message(" minor=${EVENT_GIT___VERSION_MINOR}") +# message(" patch=${EVENT_GIT___VERSION_PATCH}") +# message(" stage=${EVENT_GIT___VERSION_STAGE}") +# endif() + +include(FindGit) + +macro(event_fuzzy_version_from_git) + # set our defaults. + set(EVENT_GIT___VERSION_MAJOR 2) + set(EVENT_GIT___VERSION_MINOR 1) + set(EVENT_GIT___VERSION_PATCH 11) + set(EVENT_GIT___VERSION_STAGE "stable") + + find_package(Git) + + if (GIT_FOUND) + execute_process( + COMMAND + ${GIT_EXECUTABLE} describe --abbrev=0 + WORKING_DIRECTORY + ${PROJECT_SOURCE_DIR} + RESULT_VARIABLE + GITRET + OUTPUT_VARIABLE + GITVERSION + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + + string(REGEX REPLACE "[\\._-]" ";" VERSION_LIST "${GITVERSION}") + list(LENGTH VERSION_LIST VERSION_LIST_LENGTH) + + if ((GITRET EQUAL 0) AND (VERSION_LIST_LENGTH EQUAL 5)) + list(GET VERSION_LIST 1 _MAJOR) + list(GET VERSION_LIST 2 _MINOR) + list(GET VERSION_LIST 3 _PATCH) + list(GET VERSION_LIST 4 _STAGE) + + set(_DEFAULT_VERSION "${EVENT_GIT___VERSION_MAJOR}.${EVENT_GIT___VERSION_MINOR}.${EVENT_GIT___VERSION_PATCH}-${EVENT_GIT___VERSION_STAGE}") + set(_GIT_VERSION "${_MAJOR}.${_MINOR}.${_PATCH}-${_STAGE}") + + if (${_DEFAULT_VERSION} VERSION_LESS ${_GIT_VERSION}) + set(EVENT_GIT___VERSION_MAJOR ${_MAJOR}) + set(EVENT_GIT___VERSION_MINOR ${_MINOR}) + set(EVENT_GIT___VERSION_PATCH ${_PATCH}) + set(EVENT_GIT___VERSION_STAGE ${_STAGE}) + endif() + endif() + endif() +endmacro() diff --git a/probe-busybox/libevent-2.1.11-stable/compat/sys/queue.h b/probe-busybox/libevent-2.1.11-stable/compat/sys/queue.h new file mode 100644 index 00000000..c387bdcf --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/compat/sys/queue.h @@ -0,0 +1,488 @@ +/* $OpenBSD: queue.h,v 1.16 2000/09/07 19:47:59 art Exp $ */ +/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */ + +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef SYS_QUEUE_H__ +#define SYS_QUEUE_H__ + +/* + * This file defines five types of data structures: singly-linked lists, + * lists, simple queues, tail queues, and circular queues. + * + * + * A singly-linked list is headed by a single forward pointer. The elements + * are singly linked for minimum space and pointer manipulation overhead at + * the expense of O(n) removal for arbitrary elements. New elements can be + * added to the list after an existing element or at the head of the list. + * Elements being removed from the head of the list should use the explicit + * macro for this purpose for optimum efficiency. A singly-linked list may + * only be traversed in the forward direction. Singly-linked lists are ideal + * for applications with large datasets and few or no removals or for + * implementing a LIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A simple queue is headed by a pair of pointers, one the head of the + * list and the other to the tail of the list. The elements are singly + * linked to save space, so elements can only be removed from the + * head of the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the + * list. A simple queue may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * A circle queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the list. + * A circle queue may be traversed in either direction, but has a more + * complex end of list detection. + * + * For details on the use of these macros, see the queue(3) manual page. + */ + +/* + * Singly-linked List definitions. + */ +#define SLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define SLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#ifndef _WIN32 +#define SLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} +#endif + +/* + * Singly-linked List access methods. + */ +#define SLIST_FIRST(head) ((head)->slh_first) +#define SLIST_END(head) NULL +#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head)) +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) + +#define SLIST_FOREACH(var, head, field) \ + for((var) = SLIST_FIRST(head); \ + (var) != SLIST_END(head); \ + (var) = SLIST_NEXT(var, field)) + +/* + * Singly-linked List functions. + */ +#define SLIST_INIT(head) { \ + SLIST_FIRST(head) = SLIST_END(head); \ +} + +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (0) + +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (0) + +#define SLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (0) + +/* + * List definitions. + */ +#define LIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define LIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define LIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List access methods + */ +#define LIST_FIRST(head) ((head)->lh_first) +#define LIST_END(head) NULL +#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head)) +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_FOREACH(var, head, field) \ + for((var) = LIST_FIRST(head); \ + (var)!= LIST_END(head); \ + (var) = LIST_NEXT(var, field)) + +/* + * List functions. + */ +#define LIST_INIT(head) do { \ + LIST_FIRST(head) = LIST_END(head); \ +} while (0) + +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.le_next = (head)->lh_first) != NULL) \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (0) + +#define LIST_REMOVE(elm, field) do { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ +} while (0) + +#define LIST_REPLACE(elm, elm2, field) do { \ + if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \ + (elm2)->field.le_next->field.le_prev = \ + &(elm2)->field.le_next; \ + (elm2)->field.le_prev = (elm)->field.le_prev; \ + *(elm2)->field.le_prev = (elm2); \ +} while (0) + +/* + * Simple queue definitions. + */ +#define SIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqh_first; /* first element */ \ + struct type **sqh_last; /* addr of last next element */ \ +} + +#define SIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } + +#define SIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqe_next; /* next element */ \ +} + +/* + * Simple queue access methods. + */ +#define SIMPLEQ_FIRST(head) ((head)->sqh_first) +#define SIMPLEQ_END(head) NULL +#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head)) +#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) + +#define SIMPLEQ_FOREACH(var, head, field) \ + for((var) = SIMPLEQ_FIRST(head); \ + (var) != SIMPLEQ_END(head); \ + (var) = SIMPLEQ_NEXT(var, field)) + +/* + * Simple queue functions. + */ +#define SIMPLEQ_INIT(head) do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (0) + +#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ +} while (0) + +#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (0) + +#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ +} while (0) + +#define SIMPLEQ_REMOVE_HEAD(head, elm, field) do { \ + if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (0) + +/* + * Tail queue definitions. + */ +#define TAILQ_HEAD(name, type) \ +struct name { \ + struct type *tqh_first; /* first element */ \ + struct type **tqh_last; /* addr of last next element */ \ +} + +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define TAILQ_ENTRY(type) \ +struct { \ + struct type *tqe_next; /* next element */ \ + struct type **tqe_prev; /* address of previous next element */ \ +} + +/* + * tail queue access methods + */ +#define TAILQ_FIRST(head) ((head)->tqh_first) +#define TAILQ_END(head) NULL +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) +#define TAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) +/* XXX */ +#define TAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) +#define TAILQ_EMPTY(head) \ + (TAILQ_FIRST(head) == TAILQ_END(head)) + +#define TAILQ_FOREACH(var, head, field) \ + for((var) = TAILQ_FIRST(head); \ + (var) != TAILQ_END(head); \ + (var) = TAILQ_NEXT(var, field)) + +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for((var) = TAILQ_LAST(head, headname); \ + (var) != TAILQ_END(head); \ + (var) = TAILQ_PREV(var, headname, field)) + +/* + * Tail queue functions. + */ +#define TAILQ_INIT(head) do { \ + (head)->tqh_first = NULL; \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (0) + +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (0) + +#define TAILQ_REMOVE(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ +} while (0) + +#define TAILQ_REPLACE(head, elm, elm2, field) do { \ + if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \ + (elm2)->field.tqe_next->field.tqe_prev = \ + &(elm2)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm2)->field.tqe_next; \ + (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \ + *(elm2)->field.tqe_prev = (elm2); \ +} while (0) + +/* + * Circular queue definitions. + */ +#define CIRCLEQ_HEAD(name, type) \ +struct name { \ + struct type *cqh_first; /* first element */ \ + struct type *cqh_last; /* last element */ \ +} + +#define CIRCLEQ_HEAD_INITIALIZER(head) \ + { CIRCLEQ_END(&head), CIRCLEQ_END(&head) } + +#define CIRCLEQ_ENTRY(type) \ +struct { \ + struct type *cqe_next; /* next element */ \ + struct type *cqe_prev; /* previous element */ \ +} + +/* + * Circular queue access methods + */ +#define CIRCLEQ_FIRST(head) ((head)->cqh_first) +#define CIRCLEQ_LAST(head) ((head)->cqh_last) +#define CIRCLEQ_END(head) ((void *)(head)) +#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) +#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) +#define CIRCLEQ_EMPTY(head) \ + (CIRCLEQ_FIRST(head) == CIRCLEQ_END(head)) + +#define CIRCLEQ_FOREACH(var, head, field) \ + for((var) = CIRCLEQ_FIRST(head); \ + (var) != CIRCLEQ_END(head); \ + (var) = CIRCLEQ_NEXT(var, field)) + +#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ + for((var) = CIRCLEQ_LAST(head); \ + (var) != CIRCLEQ_END(head); \ + (var) = CIRCLEQ_PREV(var, field)) + +/* + * Circular queue functions. + */ +#define CIRCLEQ_INIT(head) do { \ + (head)->cqh_first = CIRCLEQ_END(head); \ + (head)->cqh_last = CIRCLEQ_END(head); \ +} while (0) + +#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm)->field.cqe_next; \ + (elm)->field.cqe_prev = (listelm); \ + if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \ + (head)->cqh_last = (elm); \ + else \ + (listelm)->field.cqe_next->field.cqe_prev = (elm); \ + (listelm)->field.cqe_next = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm); \ + (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ + if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \ + (head)->cqh_first = (elm); \ + else \ + (listelm)->field.cqe_prev->field.cqe_next = (elm); \ + (listelm)->field.cqe_prev = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.cqe_next = (head)->cqh_first; \ + (elm)->field.cqe_prev = CIRCLEQ_END(head); \ + if ((head)->cqh_last == CIRCLEQ_END(head)) \ + (head)->cqh_last = (elm); \ + else \ + (head)->cqh_first->field.cqe_prev = (elm); \ + (head)->cqh_first = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.cqe_next = CIRCLEQ_END(head); \ + (elm)->field.cqe_prev = (head)->cqh_last; \ + if ((head)->cqh_first == CIRCLEQ_END(head)) \ + (head)->cqh_first = (elm); \ + else \ + (head)->cqh_last->field.cqe_next = (elm); \ + (head)->cqh_last = (elm); \ +} while (0) + +#define CIRCLEQ_REMOVE(head, elm, field) do { \ + if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \ + (head)->cqh_last = (elm)->field.cqe_prev; \ + else \ + (elm)->field.cqe_next->field.cqe_prev = \ + (elm)->field.cqe_prev; \ + if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \ + (head)->cqh_first = (elm)->field.cqe_next; \ + else \ + (elm)->field.cqe_prev->field.cqe_next = \ + (elm)->field.cqe_next; \ +} while (0) + +#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \ + if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \ + CIRCLEQ_END(head)) \ + (head).cqh_last = (elm2); \ + else \ + (elm2)->field.cqe_next->field.cqe_prev = (elm2); \ + if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \ + CIRCLEQ_END(head)) \ + (head).cqh_first = (elm2); \ + else \ + (elm2)->field.cqe_prev->field.cqe_next = (elm2); \ +} while (0) + +#endif /* !SYS_QUEUE_H__ */ diff --git a/probe-busybox/libevent-2.1.11-stable/configure.ac b/probe-busybox/libevent-2.1.11-stable/configure.ac new file mode 100644 index 00000000..298d3ab9 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/configure.ac @@ -0,0 +1,955 @@ +dnl Copyright 2000-2007 Niels Provos +dnl Copyright 2007-2012 Niels Provos and Nick Mathewson +dnl +dnl See LICENSE for copying information. +dnl +dnl Original version Dug Song + +AC_INIT(libevent,2.1.11-stable) +AC_PREREQ(2.62) +AC_CONFIG_SRCDIR(event.c) + +AC_CONFIG_MACRO_DIR([m4]) +AM_INIT_AUTOMAKE +dnl AM_SILENT_RULES req. automake 1.11. [no] defaults V=1 +m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) +AC_CONFIG_HEADERS(config.h evconfig-private.h:evconfig-private.h.in) +AC_DEFINE(NUMERIC_VERSION, 0x02010b00, [Numeric representation of the version]) + +dnl Initialize prefix. +if test "$prefix" = "NONE"; then + prefix="/usr/local" +fi + +dnl Try and get a full POSIX environment on obscure systems +ifdef([AC_USE_SYSTEM_EXTENSIONS], [ +AC_USE_SYSTEM_EXTENSIONS +], [ +AC_AIX +AC_GNU_SOURCE +AC_MINIX +]) + +AC_CANONICAL_BUILD +AC_CANONICAL_HOST +dnl the 'build' machine is where we run configure and compile +dnl the 'host' machine is where the resulting stuff runs. + +#case "$host_os" in +# +# osf5*) +# CFLAGS="$CFLAGS -D_OSF_SOURCE" +# ;; +#esac + +dnl Checks for programs. +AM_PROG_CC_C_O +AC_PROG_INSTALL +AC_PROG_LN_S +# AC_PROG_MKDIR_P - $(MKDIR_P) should be defined by AM_INIT_AUTOMAKE + +# AC_PROG_SED is only available in Autoconf >= 2.59b; workaround for older +# versions +ifdef([AC_PROG_SED], [AC_PROG_SED], [ +AC_CHECK_PROGS(SED, [gsed sed]) +]) + +AC_PROG_GCC_TRADITIONAL + +# We need to test for at least gcc 2.95 here, because older versions don't +# have -fno-strict-aliasing +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ +#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 95) +#error +#endif])], have_gcc295=yes, have_gcc295=no) + +if test "$GCC" = "yes" ; then + # Enable many gcc warnings by default... + CFLAGS="$CFLAGS -Wall" + # And disable the strict-aliasing optimization, since it breaks + # our sockaddr-handling code in strange ways. + if test x$have_gcc295 = xyes; then + CFLAGS="$CFLAGS -fno-strict-aliasing" + fi +fi + +# OS X Lion started deprecating the system openssl. Let's just disable +# all deprecation warnings on OS X; but do so only for gcc... +if test "$GCC" = "yes" ; then + case "$host_os" in + darwin*) + CFLAGS="$CFLAGS -Wno-deprecated-declarations" + ;; + esac +fi + +AC_ARG_ENABLE(gcc-warnings, + AS_HELP_STRING(--disable-gcc-warnings, disable verbose warnings with GCC)) + +AC_ARG_ENABLE(gcc-hardening, + AS_HELP_STRING(--enable-gcc-hardening, enable compiler security checks), +[if test x$enableval = xyes; then + CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=2 -fstack-protector-all" + CFLAGS="$CFLAGS -fwrapv -fPIE -Wstack-protector" + CFLAGS="$CFLAGS --param ssp-buffer-size=1" +fi]) + +AC_ARG_ENABLE(thread-support, + AS_HELP_STRING(--disable-thread-support, disable support for threading), + [], [enable_thread_support=yes]) +AC_ARG_ENABLE(malloc-replacement, + AS_HELP_STRING(--disable-malloc-replacement, disable support for replacing the memory mgt functions), + [], [enable_malloc_replacement=yes]) +AC_ARG_ENABLE(openssl, + AS_HELP_STRING(--disable-openssl, disable support for openssl encryption), + [], [enable_openssl=yes]) +AC_ARG_ENABLE(debug-mode, + AS_HELP_STRING(--disable-debug-mode, disable support for running in debug mode), + [], [enable_debug_mode=yes]) +AC_ARG_ENABLE([libevent-install], + AS_HELP_STRING([--disable-libevent-install, disable installation of libevent]), + [], [enable_libevent_install=yes]) +AC_ARG_ENABLE([libevent-regress], + AS_HELP_STRING([--disable-libevent-regress, skip regress in make check]), + [], [enable_libevent_regress=yes]) +AC_ARG_ENABLE([samples], + AS_HELP_STRING([--disable-samples, skip building of sample programs]), + [], [enable_samples=yes]) +AC_ARG_ENABLE([function-sections], + AS_HELP_STRING([--enable-function-sections, make static library allow smaller binaries with --gc-sections]), + [], [enable_function_sections=no]) +AC_ARG_ENABLE([verbose-debug], + AS_HELP_STRING([--enable-verbose-debug, verbose debug logging]), + [], [enable_verbose_debug=no]) +AC_ARG_ENABLE([clock-gettime], + AS_HELP_STRING(--disable-clock-gettime, do not use clock_gettime even if it is available), + [], [enable_clock_gettime=yes]) + + +AC_PROG_LIBTOOL + +dnl Uncomment "AC_DISABLE_SHARED" to make shared libraries not get +dnl built by default. You can also turn shared libs on and off from +dnl the command line with --enable-shared and --disable-shared. +dnl AC_DISABLE_SHARED +AC_SUBST(LIBTOOL_DEPS) + +AM_CONDITIONAL([BUILD_SAMPLES], [test "$enable_samples" = "yes"]) +AM_CONDITIONAL([BUILD_REGRESS], [test "$enable_libevent_regress" = "yes"]) + +dnl Checks for libraries. +AC_SEARCH_LIBS([inet_ntoa], [nsl]) +AC_SEARCH_LIBS([socket], [socket]) +AC_SEARCH_LIBS([inet_aton], [resolv]) +if test "x$enable_clock_gettime" = "xyes"; then + AC_SEARCH_LIBS([clock_gettime], [rt]) + AC_CHECK_FUNCS([clock_gettime]) +fi +AC_SEARCH_LIBS([sendfile], [sendfile]) + +dnl - check if the macro _WIN32 is defined on this compiler. +dnl - (this is how we check for a windows compiler) +AC_MSG_CHECKING(for WIN32) +AC_TRY_COMPILE(, + [ +#ifndef _WIN32 +die horribly +#endif + ], + bwin32=true; AC_MSG_RESULT(yes), + bwin32=false; AC_MSG_RESULT(no), +) + +dnl - check if the macro __midipix__ is defined on this compiler. +dnl - (this is how we check for a midipix version of GCC) +AC_MSG_CHECKING(for MIDIPIX) +AC_TRY_COMPILE(, + [ +#ifndef __midipix__ +die horribly +#endif + ], + midipix=true; AC_MSG_RESULT(yes), + midipix=false; AC_MSG_RESULT(no), +) + +dnl - check if the macro __CYGWIN__ is defined on this compiler. +dnl - (this is how we check for a cygwin version of GCC) +AC_MSG_CHECKING(for CYGWIN) +AC_TRY_COMPILE(, + [ +#ifndef __CYGWIN__ +die horribly +#endif + ], + cygwin=true; AC_MSG_RESULT(yes), + cygwin=false; AC_MSG_RESULT(no), +) + +AC_CHECK_HEADERS([zlib.h]) + +if test "x$ac_cv_header_zlib_h" = "xyes"; then +dnl Determine if we have zlib for regression tests +dnl Don't put this one in LIBS +save_LIBS="$LIBS" +LIBS="" +ZLIB_LIBS="" +have_zlib=no +AC_SEARCH_LIBS([inflateEnd], [z], + [have_zlib=yes + ZLIB_LIBS="$LIBS" + AC_DEFINE(HAVE_LIBZ, 1, [Define if the system has zlib])]) +LIBS="$save_LIBS" +AC_SUBST(ZLIB_LIBS) +fi +AM_CONDITIONAL(ZLIB_REGRESS, [test "$have_zlib" = "yes"]) + +dnl See if we have openssl. This doesn't go in LIBS either. +if test "$bwin32" = true; then + EV_LIB_WS32=-lws2_32 + EV_LIB_GDI=-lgdi32 +else + EV_LIB_WS32= + EV_LIB_GDI= +fi +AC_SUBST(EV_LIB_WS32) +AC_SUBST(EV_LIB_GDI) +AC_SUBST(OPENSSL_LIBADD) + +AC_SYS_LARGEFILE + +LIBEVENT_OPENSSL + +dnl Checks for header files. +AC_CHECK_HEADERS([ \ + arpa/inet.h \ + fcntl.h \ + ifaddrs.h \ + mach/mach_time.h \ + netdb.h \ + netinet/in.h \ + netinet/in6.h \ + netinet/tcp.h \ + sys/un.h \ + poll.h \ + port.h \ + stdarg.h \ + stddef.h \ + sys/devpoll.h \ + sys/epoll.h \ + sys/event.h \ + sys/eventfd.h \ + sys/ioctl.h \ + sys/mman.h \ + sys/param.h \ + sys/queue.h \ + sys/resource.h \ + sys/select.h \ + sys/sendfile.h \ + sys/socket.h \ + sys/stat.h \ + sys/time.h \ + sys/timerfd.h \ + sys/uio.h \ + sys/wait.h \ + errno.h \ +]) + +AC_CHECK_HEADERS(sys/sysctl.h, [], [], [ +#ifdef HAVE_SYS_PARAM_H +#include +#endif +]) +if test "x$ac_cv_header_sys_queue_h" = "xyes"; then + AC_MSG_CHECKING(for TAILQ_FOREACH in sys/queue.h) + AC_EGREP_CPP(yes, +[ +#include +#ifdef TAILQ_FOREACH + yes +#endif +], [AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_TAILQFOREACH, 1, + [Define if TAILQ_FOREACH is defined in ])], + AC_MSG_RESULT(no) + ) +fi + +if test "x$ac_cv_header_sys_time_h" = "xyes"; then + AC_MSG_CHECKING(for timeradd in sys/time.h) + AC_EGREP_CPP(yes, +[ +#include +#ifdef timeradd + yes +#endif +], [ AC_DEFINE(HAVE_TIMERADD, 1, + [Define if timeradd is defined in ]) + AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no) +) +fi + +if test "x$ac_cv_header_sys_time_h" = "xyes"; then + AC_MSG_CHECKING(for timercmp in sys/time.h) + AC_EGREP_CPP(yes, +[ +#include +#ifdef timercmp + yes +#endif +], [ AC_DEFINE(HAVE_TIMERCMP, 1, + [Define if timercmp is defined in ]) + AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no) +) +fi + +if test "x$ac_cv_header_sys_time_h" = "xyes"; then + AC_MSG_CHECKING(for timerclear in sys/time.h) + AC_EGREP_CPP(yes, +[ +#include +#ifdef timerclear + yes +#endif +], [ AC_DEFINE(HAVE_TIMERCLEAR, 1, + [Define if timerclear is defined in ]) + AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no) +) +fi + +if test "x$ac_cv_header_sys_time_h" = "xyes"; then + AC_MSG_CHECKING(for timerisset in sys/time.h) + AC_EGREP_CPP(yes, +[ +#include +#ifdef timerisset + yes +#endif +], [ AC_DEFINE(HAVE_TIMERISSET, 1, + [Define if timerisset is defined in ]) + AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no) +) +fi + +if test "x$ac_cv_header_sys_sysctl_h" = "xyes"; then + AC_CHECK_DECLS([CTL_KERN, KERN_RANDOM, RANDOM_UUID, KERN_ARND], [], [], + [[#include + #include ]] + ) +fi + +AM_CONDITIONAL(BUILD_WIN32, test x$bwin32 = xtrue) +AM_CONDITIONAL(BUILD_CYGWIN, test x$cygwin = xtrue) +AM_CONDITIONAL(BUILD_MIDIPIX, test x$midipix = xtrue) +AM_CONDITIONAL(BUILD_WITH_NO_UNDEFINED, test x$bwin32 = xtrue || test x$cygwin = xtrue || test x$midipix = xtrue) + +if test x$bwin32 = xtrue; then + AC_SEARCH_LIBS([getservbyname],[ws2_32]) +fi + +dnl Checks for typedefs, structures, and compiler characteristics. +AC_C_CONST +AC_C_INLINE +AC_HEADER_TIME + +dnl Checks for library functions. +AC_CHECK_FUNCS([ \ + accept4 \ + arc4random \ + arc4random_buf \ + arc4random_addrandom \ + eventfd \ + epoll_create1 \ + fcntl \ + getegid \ + geteuid \ + getifaddrs \ + getnameinfo \ + getprotobynumber \ + gettimeofday \ + inet_ntop \ + inet_pton \ + issetugid \ + mach_absolute_time \ + mmap \ + nanosleep \ + pipe \ + pipe2 \ + putenv \ + sendfile \ + setenv \ + setrlimit \ + sigaction \ + signal \ + splice \ + strlcpy \ + strsep \ + strtok_r \ + strtoll \ + sysctl \ + timerfd_create \ + umask \ + unsetenv \ + usleep \ + vasprintf \ + getservbyname \ +]) +AM_CONDITIONAL(STRLCPY_IMPL, [test x"$ac_cv_func_strlcpy" = xno]) + +AC_CACHE_CHECK( + [for getaddrinfo], + [libevent_cv_getaddrinfo], + [AC_LINK_IFELSE( + [AC_LANG_PROGRAM( + [[ + #ifdef HAVE_NETDB_H + #include + #endif + ]], + [[ + getaddrinfo; + ]] + )], + [libevent_cv_getaddrinfo=yes], + [libevent_cv_getaddrinfo=no] + )] +) +if test "$libevent_cv_getaddrinfo" = "yes" ; then + AC_DEFINE([HAVE_GETADDRINFO], [1], [Do we have getaddrinfo()?]) +else + +# Check for gethostbyname_r in all its glorious incompatible versions. +# (This is cut-and-pasted from Tor, which based its logic on +# Python's configure.in.) +AH_TEMPLATE(HAVE_GETHOSTBYNAME_R, + [Define this if you have any gethostbyname_r()]) + +AC_CHECK_FUNC(gethostbyname_r, [ + AC_MSG_CHECKING([how many arguments gethostbyname_r() wants]) + OLD_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS $MY_CPPFLAGS $MY_THREAD_CPPFLAGS $MY_CFLAGS" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([ +#include + ], [[ + char *cp1, *cp2; + struct hostent *h1, *h2; + int i1, i2; + (void)gethostbyname_r(cp1,h1,cp2,i1,&h2,&i2); + ]])],[ + AC_DEFINE(HAVE_GETHOSTBYNAME_R) + AC_DEFINE(HAVE_GETHOSTBYNAME_R_6_ARG, 1, + [Define this if gethostbyname_r takes 6 arguments]) + AC_MSG_RESULT(6) + ], [ + AC_TRY_COMPILE([ +#include + ], [ + char *cp1, *cp2; + struct hostent *h1; + int i1, i2; + (void)gethostbyname_r(cp1,h1,cp2,i1,&i2); + ], [ + AC_DEFINE(HAVE_GETHOSTBYNAME_R) + AC_DEFINE(HAVE_GETHOSTBYNAME_R_5_ARG, 1, + [Define this if gethostbyname_r takes 5 arguments]) + AC_MSG_RESULT(5) + ], [ + AC_TRY_COMPILE([ +#include + ], [ + char *cp1; + struct hostent *h1; + struct hostent_data hd; + (void) gethostbyname_r(cp1,h1,&hd); + ], [ + AC_DEFINE(HAVE_GETHOSTBYNAME_R) + AC_DEFINE(HAVE_GETHOSTBYNAME_R_3_ARG, 1, + [Define this if gethostbyname_r takes 3 arguments]) + AC_MSG_RESULT(3) + ], [ + AC_MSG_RESULT(0) + ]) + ]) + ]) + CFLAGS=$OLD_CFLAGS +]) + +fi + +AC_MSG_CHECKING(for F_SETFD in fcntl.h) +AC_EGREP_CPP(yes, +[ +#define _GNU_SOURCE 1 +#include +#ifdef F_SETFD +yes +#endif +], [ AC_DEFINE(HAVE_SETFD, 1, + [Define if F_SETFD is defined in ]) + AC_MSG_RESULT(yes) ], AC_MSG_RESULT(no)) + +needsignal=no +haveselect=no +if test x$bwin32 != xtrue; then + AC_CHECK_FUNCS(select, [haveselect=yes], ) + if test "x$haveselect" = "xyes" ; then + needsignal=yes + fi +fi +AM_CONDITIONAL(SELECT_BACKEND, [test "x$haveselect" = "xyes"]) + +havepoll=no +AC_CHECK_FUNCS(poll, [havepoll=yes], ) +if test "x$havepoll" = "xyes" ; then + needsignal=yes +fi +AM_CONDITIONAL(POLL_BACKEND, [test "x$havepoll" = "xyes"]) + +havedevpoll=no +if test "x$ac_cv_header_sys_devpoll_h" = "xyes"; then + AC_DEFINE(HAVE_DEVPOLL, 1, + [Define if /dev/poll is available]) +fi +AM_CONDITIONAL(DEVPOLL_BACKEND, [test "x$ac_cv_header_sys_devpoll_h" = "xyes"]) + +havekqueue=no +if test "x$ac_cv_header_sys_event_h" = "xyes"; then + AC_CHECK_FUNCS(kqueue, [havekqueue=yes], ) + if test "x$havekqueue" = "xyes" ; then + AC_MSG_CHECKING(for working kqueue) + AC_TRY_RUN( +#ifdef HAVE_STDLIB_H +#include +#endif +#ifdef HAVE_STRING_H +#include +#endif +#include +#include +#include +#include +#include +#include + +int +main(int argc, char **argv) +{ + int kq; + int n; + int fd[[2]]; + struct kevent ev; + struct timespec ts; + char buf[[80000]]; + + if (pipe(fd) == -1) + exit(1); + if (fcntl(fd[[1]], F_SETFL, O_NONBLOCK) == -1) + exit(1); + + while ((n = write(fd[[1]], buf, sizeof(buf))) == sizeof(buf)) + ; + + if ((kq = kqueue()) == -1) + exit(1); + + memset(&ev, 0, sizeof(ev)); + ev.ident = fd[[1]]; + ev.filter = EVFILT_WRITE; + ev.flags = EV_ADD | EV_ENABLE; + n = kevent(kq, &ev, 1, NULL, 0, NULL); + if (n == -1) + exit(1); + + read(fd[[0]], buf, sizeof(buf)); + + ts.tv_sec = 0; + ts.tv_nsec = 0; + n = kevent(kq, NULL, 0, &ev, 1, &ts); + if (n == -1 || n == 0) + exit(1); + + exit(0); +}, [AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_WORKING_KQUEUE, 1, + [Define if kqueue works correctly with pipes]) + havekqueue=yes + ], AC_MSG_RESULT(no), AC_MSG_RESULT(no)) + fi +fi +AM_CONDITIONAL(KQUEUE_BACKEND, [test "x$havekqueue" = "xyes"]) + +haveepollsyscall=no +haveepoll=no +AC_CHECK_FUNCS(epoll_ctl, [haveepoll=yes], ) +if test "x$haveepoll" = "xyes" ; then + AC_DEFINE(HAVE_EPOLL, 1, + [Define if your system supports the epoll system calls]) + needsignal=yes +fi +if test "x$ac_cv_header_sys_epoll_h" = "xyes"; then + if test "x$haveepoll" = "xno" ; then + AC_MSG_CHECKING(for epoll system call) + AC_TRY_RUN( +#include +#include +#include +#include +#include +#include + +int +epoll_create(int size) +{ + return (syscall(__NR_epoll_create, size)); +} + +int +main(int argc, char **argv) +{ + int epfd; + + epfd = epoll_create(256); + exit (epfd == -1 ? 1 : 0); +}, [AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_EPOLL, 1, + [Define if your system supports the epoll system calls]) + needsignal=yes + have_epoll=yes + AC_LIBOBJ(epoll_sub) + ], AC_MSG_RESULT(no), AC_MSG_RESULT(no)) + fi +fi +AM_CONDITIONAL(EPOLL_BACKEND, [test "x$haveepoll" = "xyes"]) + +haveeventports=no +AC_CHECK_FUNCS(port_create, [haveeventports=yes], ) +if test "x$haveeventports" = "xyes" ; then + AC_DEFINE(HAVE_EVENT_PORTS, 1, + [Define if your system supports event ports]) + needsignal=yes +fi +AM_CONDITIONAL(EVPORT_BACKEND, [test "x$haveeventports" = "xyes"]) + +if test "x$bwin32" = "xtrue"; then + needsignal=yes +fi + +AM_CONDITIONAL(SIGNAL_SUPPORT, [test "x$needsignal" = "xyes"]) + +AC_TYPE_PID_T +AC_TYPE_SIZE_T +AC_TYPE_SSIZE_T + +AC_CHECK_TYPES([uint64_t, uint32_t, uint16_t, uint8_t, uintptr_t], , , +[#ifdef HAVE_STDINT_H +#include +#elif defined(HAVE_INTTYPES_H) +#include +#endif +#ifdef HAVE_SYS_TYPES_H +#include +#endif]) + +AC_CHECK_TYPES([fd_mask], , , +[#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SELECT_H +#include +#endif]) + +AC_CHECK_SIZEOF(long long) +AC_CHECK_SIZEOF(long) +AC_CHECK_SIZEOF(int) +AC_CHECK_SIZEOF(short) +AC_CHECK_SIZEOF(size_t) +AC_CHECK_SIZEOF(void *) +AC_CHECK_SIZEOF(off_t) +AC_CHECK_SIZEOF(time_t) + +AC_CHECK_TYPES([struct in6_addr, struct sockaddr_in6, struct sockaddr_un, sa_family_t, struct addrinfo, struct sockaddr_storage], , , +[#define _GNU_SOURCE 1 +#include +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_NETINET_IN6_H +#include +#endif +#ifdef HAVE_SYS_UN_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#ifdef _WIN32 +#define WIN32_WINNT 0x400 +#define _WIN32_WINNT 0x400 +#define WIN32_LEAN_AND_MEAN +#if defined(_MSC_VER) && (_MSC_VER < 1300) +#include +#else +#include +#include +#endif +#endif +]) +AC_CHECK_MEMBERS([struct in6_addr.s6_addr32, struct in6_addr.s6_addr16, struct sockaddr_in.sin_len, struct sockaddr_in6.sin6_len, struct sockaddr_storage.ss_family, struct sockaddr_storage.__ss_family], , , +[#include +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_NETINET_IN6_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef _WIN32 +#define WIN32_WINNT 0x400 +#define _WIN32_WINNT 0x400 +#define WIN32_LEAN_AND_MEAN +#if defined(_MSC_VER) && (_MSC_VER < 1300) +#include +#else +#include +#include +#endif +#endif +]) + +AC_CHECK_TYPES([struct linger],,, +[ +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +]) + +AC_MSG_CHECKING([for socklen_t]) +AC_TRY_COMPILE([ + #include + #ifdef _WIN32 + #include + #else + #include + #endif], + [socklen_t x;], + AC_MSG_RESULT([yes]), + [AC_MSG_RESULT([no]) + AC_DEFINE(socklen_t, unsigned int, + [Define to unsigned int if you dont have it])] +) + +# __func__/__FUNCTION__ is not a macros in general +AC_MSG_CHECKING([whether our compiler supports __func__]) +AC_TRY_COMPILE([], + [ const char *cp = __func__; ], + [ AC_DEFINE(HAVE___func__, 1, [Define to 1 if compiler have __func__]) + AC_MSG_RESULT([yes]) + ], + AC_MSG_RESULT([no]) +) +AC_MSG_CHECKING([whether our compiler supports __FUNCTION__]) +AC_TRY_COMPILE([], + [ const char *cp = __FUNCTION__; ], + [ AC_DEFINE(HAVE___FUNCTION__, 1, [Define to 1 if compiler have __FUNCTION__]) + AC_MSG_RESULT([yes]) + ], + AC_MSG_RESULT([no]) +) + +# check if we can compile with pthreads +have_pthreads=no +if test x$bwin32 != xtrue && test "$enable_thread_support" != "no"; then + ACX_PTHREAD([ + AC_DEFINE(HAVE_PTHREADS, 1, + [Define if we have pthreads on this system]) + have_pthreads=yes]) + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + AC_CHECK_SIZEOF(pthread_t, , + [AC_INCLUDES_DEFAULT() + #include ] + ) +fi +AM_CONDITIONAL(THREADS, [test "$enable_thread_support" != "no"]) +AM_CONDITIONAL(PTHREADS, [test "$have_pthreads" != "no" && test "$enable_thread_support" != "no"]) + +# check if we should compile locking into the library +if test x$enable_thread_support = xno; then + AC_DEFINE(DISABLE_THREAD_SUPPORT, 1, + [Define if libevent should not be compiled with thread support]) +fi + +# check if we should hard-code the mm functions. +if test x$enable_malloc_replacement = xno; then + AC_DEFINE(DISABLE_MM_REPLACEMENT, 1, + [Define if libevent should not allow replacing the mm functions]) +fi + +# check if we should hard-code debugging out +if test x$enable_debug_mode = xno; then + AC_DEFINE(DISABLE_DEBUG_MODE, 1, + [Define if libevent should build without support for a debug mode]) +fi + +# check if we should enable verbose debugging +if test x$enable_verbose_debug = xyes; then + CFLAGS="$CFLAGS -DUSE_DEBUG" +fi + +# check if we have and should use openssl +AM_CONDITIONAL(OPENSSL, [test "$enable_openssl" != "no" && test "$have_openssl" = "yes"]) + +# Add some more warnings which we use in development but not in the +# released versions. (Some relevant gcc versions can't handle these.) +if test x$enable_gcc_warnings != xno && test "$GCC" = "yes"; then + + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ +#if !defined(__GNUC__) || (__GNUC__ < 4) +#error +#endif])], have_gcc4=yes, have_gcc4=no) + + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ +#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 2) +#error +#endif])], have_gcc42=yes, have_gcc42=no) + + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ +#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 5) +#error +#endif])], have_gcc45=yes, have_gcc45=no) + + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ +#if !defined(__clang__) +#error +#endif])], have_clang=yes, have_clang=no) + + # -W is the same as -Wextra + CFLAGS="$CFLAGS -W -Wfloat-equal -Wundef -Wpointer-arith -Wstrict-prototypes -Wmissing-prototypes -Wwrite-strings -Wredundant-decls -Wmissing-declarations -Wredundant-decls -Wnested-externs -Wbad-function-cast" + if test x$enable_gcc_warnings = xyes; then + CFLAGS="$CFLAGS -Werror" + fi + + CFLAGS="$CFLAGS -Wno-unused-parameter -Wstrict-aliasing" + + if test x$have_gcc4 = xyes ; then + # These warnings break gcc 3.3.5 and work on gcc 4.0.2 + CFLAGS="$CFLAGS -Winit-self -Wmissing-field-initializers -Wdeclaration-after-statement" + #CFLAGS="$CFLAGS -Wold-style-definition" + fi + + if test x$have_gcc42 = xyes ; then + # These warnings break gcc 4.0.2 and work on gcc 4.2 + CFLAGS="$CFLAGS -Waddress" + fi + + if test x$have_gcc42 = xyes && test x$have_clang = xno; then + # These warnings break gcc 4.0.2 and clang, but work on gcc 4.2 + CFLAGS="$CFLAGS -Wnormalized=id -Woverride-init" + fi + + if test x$have_gcc45 = xyes ; then + # These warnings work on gcc 4.5 + CFLAGS="$CFLAGS -Wlogical-op" + fi + + if test x$have_clang = xyes; then + # Disable the unused-function warnings, because these trigger + # for minheap-internal.h related code. + CFLAGS="$CFLAGS -Wno-unused-function" + + # clang on macosx emits warnings for each directory specified which + # isn't "used" generating a lot of build noise (typically 3 warnings + # per file + case "$host_os" in + darwin*) + CFLAGS="$CFLAGS -Qunused-arguments" + ;; + esac + fi + +##This will break the world on some 64-bit architectures +# CFLAGS="$CFLAGS -Winline" + +fi + +LIBEVENT_GC_SECTIONS= +if test "$GCC" = yes && test "$enable_function_sections" = yes ; then + AC_CACHE_CHECK( + [if linker supports omitting unused code and data], + [libevent_cv_gc_sections_runs], + [ + dnl NetBSD will link but likely not run with --gc-sections + dnl http://bugs.ntp.org/1844 + dnl http://gnats.netbsd.org/40401 + dnl --gc-sections causes attempt to load as linux elf, with + dnl wrong syscalls in place. Test a little gauntlet of + dnl simple stdio read code checking for errors, expecting + dnl enough syscall differences that the NetBSD code will + dnl fail even with Linux emulation working as designed. + dnl A shorter test could be refined by someone with access + dnl to a NetBSD host with Linux emulation working. + origCFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -Wl,--gc-sections" + AC_LINK_IFELSE( + [AC_LANG_PROGRAM( + [[ + #include + #include + ]], + [[ + FILE * fpC; + char buf[32]; + size_t cch; + int read_success_once; + + fpC = fopen("conftest.c", "r"); + if (NULL == fpC) + exit(1); + do { + cch = fread(buf, sizeof(buf), 1, fpC); + read_success_once |= (0 != cch); + } while (0 != cch); + if (!read_success_once) + exit(2); + if (!feof(fpC)) + exit(3); + if (0 != fclose(fpC)) + exit(4); + + exit(EXIT_SUCCESS); + ]] + )], + [ + dnl We have to do this invocation manually so that we can + dnl get the output of conftest.err to make sure it doesn't + dnl mention gc-sections. + if test "X$cross_compiling" = "Xyes" || grep gc-sections conftest.err ; then + libevent_cv_gc_sections_runs=no + else + libevent_cv_gc_sections_runs=no + ./conftest >/dev/null 2>&1 && libevent_cv_gc_sections_runs=yes + fi + ], + [libevent_cv_gc_sections_runs=no] + ) + CFLAGS="$origCFLAGS" + AS_UNSET([origCFLAGS]) + ] + ) + case "$libevent_cv_gc_sections_runs" in + yes) + CFLAGS="-ffunction-sections -fdata-sections $CFLAGS" + LIBEVENT_GC_SECTIONS="-Wl,--gc-sections" + ;; + esac +fi +AC_SUBST([LIBEVENT_GC_SECTIONS]) + +AM_CONDITIONAL([INSTALL_LIBEVENT], [test "$enable_libevent_install" = "yes"]) + +AC_CONFIG_FILES( [libevent.pc libevent_openssl.pc libevent_pthreads.pc libevent_core.pc libevent_extra.pc] ) +AC_OUTPUT(Makefile) diff --git a/probe-busybox/libevent-2.1.11-stable/configure.gnu b/probe-busybox/libevent-2.1.11-stable/configure.gnu new file mode 100755 index 00000000..07817610 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/configure.gnu @@ -0,0 +1,2 @@ +#!/bin/sh +exec "${0%.gnu}" --disable-shared --enable-static --disable-libevent-install --disable-samples "$@" diff --git a/probe-busybox/libevent-2.1.11-stable/defer-internal.h b/probe-busybox/libevent-2.1.11-stable/defer-internal.h new file mode 100644 index 00000000..d6f80a11 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/defer-internal.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef DEFER_INTERNAL_H_INCLUDED_ +#define DEFER_INTERNAL_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include + +struct event_callback; +typedef void (*deferred_cb_fn)(struct event_callback *, void *); + +/** + Initialize an empty, non-pending event_callback. + + @param deferred The struct event_callback structure to initialize. + @param priority The priority that the callback should run at. + @param cb The function to run when the struct event_callback executes. + @param arg The function's second argument. + */ +EVENT2_EXPORT_SYMBOL +void event_deferred_cb_init_(struct event_callback *, ev_uint8_t, deferred_cb_fn, void *); +/** + Change the priority of a non-pending event_callback. + */ +void event_deferred_cb_set_priority_(struct event_callback *, ev_uint8_t); +/** + Cancel a struct event_callback if it is currently scheduled in an event_base. + */ +EVENT2_EXPORT_SYMBOL +void event_deferred_cb_cancel_(struct event_base *, struct event_callback *); +/** + Activate a struct event_callback if it is not currently scheduled in an event_base. + + Return true if it was not previously scheduled. + */ +EVENT2_EXPORT_SYMBOL +int event_deferred_cb_schedule_(struct event_base *, struct event_callback *); + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT_INTERNAL_H_INCLUDED_ */ + diff --git a/probe-busybox/libevent-2.1.11-stable/devpoll.c b/probe-busybox/libevent-2.1.11-stable/devpoll.c new file mode 100644 index 00000000..3a2f86d6 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/devpoll.c @@ -0,0 +1,311 @@ +/* + * Copyright 2000-2009 Niels Provos + * Copyright 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef EVENT__HAVE_DEVPOLL + +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "event2/event.h" +#include "event2/event_struct.h" +#include "event2/thread.h" +#include "event-internal.h" +#include "evsignal-internal.h" +#include "log-internal.h" +#include "evmap-internal.h" +#include "evthread-internal.h" + +struct devpollop { + struct pollfd *events; + int nevents; + int dpfd; + struct pollfd *changes; + int nchanges; +}; + +static void *devpoll_init(struct event_base *); +static int devpoll_add(struct event_base *, int fd, short old, short events, void *); +static int devpoll_del(struct event_base *, int fd, short old, short events, void *); +static int devpoll_dispatch(struct event_base *, struct timeval *); +static void devpoll_dealloc(struct event_base *); + +const struct eventop devpollops = { + "devpoll", + devpoll_init, + devpoll_add, + devpoll_del, + devpoll_dispatch, + devpoll_dealloc, + 1, /* need reinit */ + EV_FEATURE_FDS|EV_FEATURE_O1, + 0 +}; + +#define NEVENT 32000 + +static int +devpoll_commit(struct devpollop *devpollop) +{ + /* + * Due to a bug in Solaris, we have to use pwrite with an offset of 0. + * Write is limited to 2GB of data, until it will fail. + */ + if (pwrite(devpollop->dpfd, devpollop->changes, + sizeof(struct pollfd) * devpollop->nchanges, 0) == -1) + return (-1); + + devpollop->nchanges = 0; + return (0); +} + +static int +devpoll_queue(struct devpollop *devpollop, int fd, int events) { + struct pollfd *pfd; + + if (devpollop->nchanges >= devpollop->nevents) { + /* + * Change buffer is full, must commit it to /dev/poll before + * adding more + */ + if (devpoll_commit(devpollop) != 0) + return (-1); + } + + pfd = &devpollop->changes[devpollop->nchanges++]; + pfd->fd = fd; + pfd->events = events; + pfd->revents = 0; + + return (0); +} + +static void * +devpoll_init(struct event_base *base) +{ + int dpfd, nfiles = NEVENT; + struct rlimit rl; + struct devpollop *devpollop; + + if (!(devpollop = mm_calloc(1, sizeof(struct devpollop)))) + return (NULL); + + if (getrlimit(RLIMIT_NOFILE, &rl) == 0 && + rl.rlim_cur != RLIM_INFINITY) + nfiles = rl.rlim_cur; + + /* Initialize the kernel queue */ + if ((dpfd = evutil_open_closeonexec_("/dev/poll", O_RDWR, 0)) == -1) { + event_warn("open: /dev/poll"); + mm_free(devpollop); + return (NULL); + } + + devpollop->dpfd = dpfd; + + /* Initialize fields */ + /* FIXME: allocating 'nfiles' worth of space here can be + * expensive and unnecessary. See how epoll.c does it instead. */ + devpollop->events = mm_calloc(nfiles, sizeof(struct pollfd)); + if (devpollop->events == NULL) { + mm_free(devpollop); + close(dpfd); + return (NULL); + } + devpollop->nevents = nfiles; + + devpollop->changes = mm_calloc(nfiles, sizeof(struct pollfd)); + if (devpollop->changes == NULL) { + mm_free(devpollop->events); + mm_free(devpollop); + close(dpfd); + return (NULL); + } + + evsig_init_(base); + + return (devpollop); +} + +static int +devpoll_dispatch(struct event_base *base, struct timeval *tv) +{ + struct devpollop *devpollop = base->evbase; + struct pollfd *events = devpollop->events; + struct dvpoll dvp; + int i, res, timeout = -1; + + if (devpollop->nchanges) + devpoll_commit(devpollop); + + if (tv != NULL) + timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000; + + dvp.dp_fds = devpollop->events; + dvp.dp_nfds = devpollop->nevents; + dvp.dp_timeout = timeout; + + EVBASE_RELEASE_LOCK(base, th_base_lock); + + res = ioctl(devpollop->dpfd, DP_POLL, &dvp); + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + if (res == -1) { + if (errno != EINTR) { + event_warn("ioctl: DP_POLL"); + return (-1); + } + + return (0); + } + + event_debug(("%s: devpoll_wait reports %d", __func__, res)); + + for (i = 0; i < res; i++) { + int which = 0; + int what = events[i].revents; + + if (what & POLLHUP) + what |= POLLIN | POLLOUT; + else if (what & POLLERR) + what |= POLLIN | POLLOUT; + + if (what & POLLIN) + which |= EV_READ; + if (what & POLLOUT) + which |= EV_WRITE; + + if (!which) + continue; + + /* XXX(niels): not sure if this works for devpoll */ + evmap_io_active_(base, events[i].fd, which); + } + + return (0); +} + + +static int +devpoll_add(struct event_base *base, int fd, short old, short events, void *p) +{ + struct devpollop *devpollop = base->evbase; + int res; + (void)p; + + /* + * It's not necessary to OR the existing read/write events that we + * are currently interested in with the new event we are adding. + * The /dev/poll driver ORs any new events with the existing events + * that it has cached for the fd. + */ + + res = 0; + if (events & EV_READ) + res |= POLLIN; + if (events & EV_WRITE) + res |= POLLOUT; + + if (devpoll_queue(devpollop, fd, res) != 0) + return (-1); + + return (0); +} + +static int +devpoll_del(struct event_base *base, int fd, short old, short events, void *p) +{ + struct devpollop *devpollop = base->evbase; + int res; + (void)p; + + res = 0; + if (events & EV_READ) + res |= POLLIN; + if (events & EV_WRITE) + res |= POLLOUT; + + /* + * The only way to remove an fd from the /dev/poll monitored set is + * to use POLLREMOVE by itself. This removes ALL events for the fd + * provided so if we care about two events and are only removing one + * we must re-add the other event after POLLREMOVE. + */ + + if (devpoll_queue(devpollop, fd, POLLREMOVE) != 0) + return (-1); + + if ((res & (POLLIN|POLLOUT)) != (POLLIN|POLLOUT)) { + /* + * We're not deleting all events, so we must resubmit the + * event that we are still interested in if one exists. + */ + + if ((res & POLLIN) && (old & EV_WRITE)) { + /* Deleting read, still care about write */ + devpoll_queue(devpollop, fd, POLLOUT); + } else if ((res & POLLOUT) && (old & EV_READ)) { + /* Deleting write, still care about read */ + devpoll_queue(devpollop, fd, POLLIN); + } + } + + return (0); +} + +static void +devpoll_dealloc(struct event_base *base) +{ + struct devpollop *devpollop = base->evbase; + + evsig_dealloc_(base); + if (devpollop->events) + mm_free(devpollop->events); + if (devpollop->changes) + mm_free(devpollop->changes); + if (devpollop->dpfd >= 0) + close(devpollop->dpfd); + + memset(devpollop, 0, sizeof(struct devpollop)); + mm_free(devpollop); +} + +#endif /* EVENT__HAVE_DEVPOLL */ diff --git a/probe-busybox/libevent-2.1.11-stable/epoll.c b/probe-busybox/libevent-2.1.11-stable/epoll.c new file mode 100644 index 00000000..a0df0d21 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/epoll.c @@ -0,0 +1,543 @@ +/* + * Copyright 2000-2007 Niels Provos + * Copyright 2007-2012 Niels Provos, Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef EVENT__HAVE_EPOLL + +#include +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef EVENT__HAVE_FCNTL_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIMERFD_H +#include +#endif + +#include "event-internal.h" +#include "evsignal-internal.h" +#include "event2/thread.h" +#include "evthread-internal.h" +#include "log-internal.h" +#include "evmap-internal.h" +#include "changelist-internal.h" +#include "time-internal.h" + +/* Since Linux 2.6.17, epoll is able to report about peer half-closed connection + using special EPOLLRDHUP flag on a read event. +*/ +#if !defined(EPOLLRDHUP) +#define EPOLLRDHUP 0 +#define EARLY_CLOSE_IF_HAVE_RDHUP 0 +#else +#define EARLY_CLOSE_IF_HAVE_RDHUP EV_FEATURE_EARLY_CLOSE +#endif + +#include "epolltable-internal.h" + +#if defined(EVENT__HAVE_SYS_TIMERFD_H) && \ + defined(EVENT__HAVE_TIMERFD_CREATE) && \ + defined(HAVE_POSIX_MONOTONIC) && defined(TFD_NONBLOCK) && \ + defined(TFD_CLOEXEC) +/* Note that we only use timerfd if TFD_NONBLOCK and TFD_CLOEXEC are available + and working. This means that we can't support it on 2.6.25 (where timerfd + was introduced) or 2.6.26, since 2.6.27 introduced those flags. + */ +#define USING_TIMERFD +#endif + +struct epollop { + struct epoll_event *events; + int nevents; + int epfd; +#ifdef USING_TIMERFD + int timerfd; +#endif +}; + +static void *epoll_init(struct event_base *); +static int epoll_dispatch(struct event_base *, struct timeval *); +static void epoll_dealloc(struct event_base *); + +static const struct eventop epollops_changelist = { + "epoll (with changelist)", + epoll_init, + event_changelist_add_, + event_changelist_del_, + epoll_dispatch, + epoll_dealloc, + 1, /* need reinit */ + EV_FEATURE_ET|EV_FEATURE_O1| EARLY_CLOSE_IF_HAVE_RDHUP, + EVENT_CHANGELIST_FDINFO_SIZE +}; + + +static int epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd, + short old, short events, void *p); +static int epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd, + short old, short events, void *p); + +const struct eventop epollops = { + "epoll", + epoll_init, + epoll_nochangelist_add, + epoll_nochangelist_del, + epoll_dispatch, + epoll_dealloc, + 1, /* need reinit */ + EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_EARLY_CLOSE, + 0 +}; + +#define INITIAL_NEVENT 32 +#define MAX_NEVENT 4096 + +/* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout + * values bigger than (LONG_MAX - 999ULL)/HZ. HZ in the wild can be + * as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the + * largest number of msec we can support here is 2147482. Let's + * round that down by 47 seconds. + */ +#define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000) + +static void * +epoll_init(struct event_base *base) +{ + int epfd = -1; + struct epollop *epollop; + +#ifdef EVENT__HAVE_EPOLL_CREATE1 + /* First, try the shiny new epoll_create1 interface, if we have it. */ + epfd = epoll_create1(EPOLL_CLOEXEC); +#endif + if (epfd == -1) { + /* Initialize the kernel queue using the old interface. (The + size field is ignored since 2.6.8.) */ + if ((epfd = epoll_create(32000)) == -1) { + if (errno != ENOSYS) + event_warn("epoll_create"); + return (NULL); + } + evutil_make_socket_closeonexec(epfd); + } + + if (!(epollop = mm_calloc(1, sizeof(struct epollop)))) { + close(epfd); + return (NULL); + } + + epollop->epfd = epfd; + + /* Initialize fields */ + epollop->events = mm_calloc(INITIAL_NEVENT, sizeof(struct epoll_event)); + if (epollop->events == NULL) { + mm_free(epollop); + close(epfd); + return (NULL); + } + epollop->nevents = INITIAL_NEVENT; + + if ((base->flags & EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST) != 0 || + ((base->flags & EVENT_BASE_FLAG_IGNORE_ENV) == 0 && + evutil_getenv_("EVENT_EPOLL_USE_CHANGELIST") != NULL)) { + + base->evsel = &epollops_changelist; + } + +#ifdef USING_TIMERFD + /* + The epoll interface ordinarily gives us one-millisecond precision, + so on Linux it makes perfect sense to use the CLOCK_MONOTONIC_COARSE + timer. But when the user has set the new PRECISE_TIMER flag for an + event_base, we can try to use timerfd to give them finer granularity. + */ + if ((base->flags & EVENT_BASE_FLAG_PRECISE_TIMER) && + base->monotonic_timer.monotonic_clock == CLOCK_MONOTONIC) { + int fd; + fd = epollop->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC); + if (epollop->timerfd >= 0) { + struct epoll_event epev; + memset(&epev, 0, sizeof(epev)); + epev.data.fd = epollop->timerfd; + epev.events = EPOLLIN; + if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, fd, &epev) < 0) { + event_warn("epoll_ctl(timerfd)"); + close(fd); + epollop->timerfd = -1; + } + } else { + if (errno != EINVAL && errno != ENOSYS) { + /* These errors probably mean that we were + * compiled with timerfd/TFD_* support, but + * we're running on a kernel that lacks those. + */ + event_warn("timerfd_create"); + } + epollop->timerfd = -1; + } + } else { + epollop->timerfd = -1; + } +#endif + + evsig_init_(base); + + return (epollop); +} + +static const char * +change_to_string(int change) +{ + change &= (EV_CHANGE_ADD|EV_CHANGE_DEL); + if (change == EV_CHANGE_ADD) { + return "add"; + } else if (change == EV_CHANGE_DEL) { + return "del"; + } else if (change == 0) { + return "none"; + } else { + return "???"; + } +} + +static const char * +epoll_op_to_string(int op) +{ + return op == EPOLL_CTL_ADD?"ADD": + op == EPOLL_CTL_DEL?"DEL": + op == EPOLL_CTL_MOD?"MOD": + "???"; +} + +#define PRINT_CHANGES(op, events, ch, status) \ + "Epoll %s(%d) on fd %d " status ". " \ + "Old events were %d; " \ + "read change was %d (%s); " \ + "write change was %d (%s); " \ + "close change was %d (%s)", \ + epoll_op_to_string(op), \ + events, \ + ch->fd, \ + ch->old_events, \ + ch->read_change, \ + change_to_string(ch->read_change), \ + ch->write_change, \ + change_to_string(ch->write_change), \ + ch->close_change, \ + change_to_string(ch->close_change) + +static int +epoll_apply_one_change(struct event_base *base, + struct epollop *epollop, + const struct event_change *ch) +{ + struct epoll_event epev; + int op, events = 0; + int idx; + + idx = EPOLL_OP_TABLE_INDEX(ch); + op = epoll_op_table[idx].op; + events = epoll_op_table[idx].events; + + if (!events) { + EVUTIL_ASSERT(op == 0); + return 0; + } + + if ((ch->read_change|ch->write_change) & EV_CHANGE_ET) + events |= EPOLLET; + + memset(&epev, 0, sizeof(epev)); + epev.data.fd = ch->fd; + epev.events = events; + if (epoll_ctl(epollop->epfd, op, ch->fd, &epev) == 0) { + event_debug((PRINT_CHANGES(op, epev.events, ch, "okay"))); + return 0; + } + + switch (op) { + case EPOLL_CTL_MOD: + if (errno == ENOENT) { + /* If a MOD operation fails with ENOENT, the + * fd was probably closed and re-opened. We + * should retry the operation as an ADD. + */ + if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, ch->fd, &epev) == -1) { + event_warn("Epoll MOD(%d) on %d retried as ADD; that failed too", + (int)epev.events, ch->fd); + return -1; + } else { + event_debug(("Epoll MOD(%d) on %d retried as ADD; succeeded.", + (int)epev.events, + ch->fd)); + return 0; + } + } + break; + case EPOLL_CTL_ADD: + if (errno == EEXIST) { + /* If an ADD operation fails with EEXIST, + * either the operation was redundant (as with a + * precautionary add), or we ran into a fun + * kernel bug where using dup*() to duplicate the + * same file into the same fd gives you the same epitem + * rather than a fresh one. For the second case, + * we must retry with MOD. */ + if (epoll_ctl(epollop->epfd, EPOLL_CTL_MOD, ch->fd, &epev) == -1) { + event_warn("Epoll ADD(%d) on %d retried as MOD; that failed too", + (int)epev.events, ch->fd); + return -1; + } else { + event_debug(("Epoll ADD(%d) on %d retried as MOD; succeeded.", + (int)epev.events, + ch->fd)); + return 0; + } + } + break; + case EPOLL_CTL_DEL: + if (errno == ENOENT || errno == EBADF || errno == EPERM) { + /* If a delete fails with one of these errors, + * that's fine too: we closed the fd before we + * got around to calling epoll_dispatch. */ + event_debug(("Epoll DEL(%d) on fd %d gave %s: DEL was unnecessary.", + (int)epev.events, + ch->fd, + strerror(errno))); + return 0; + } + break; + default: + break; + } + + event_warn(PRINT_CHANGES(op, epev.events, ch, "failed")); + return -1; +} + +static int +epoll_apply_changes(struct event_base *base) +{ + struct event_changelist *changelist = &base->changelist; + struct epollop *epollop = base->evbase; + struct event_change *ch; + + int r = 0; + int i; + + for (i = 0; i < changelist->n_changes; ++i) { + ch = &changelist->changes[i]; + if (epoll_apply_one_change(base, epollop, ch) < 0) + r = -1; + } + + return (r); +} + +static int +epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd, + short old, short events, void *p) +{ + struct event_change ch; + ch.fd = fd; + ch.old_events = old; + ch.read_change = ch.write_change = ch.close_change = 0; + if (events & EV_WRITE) + ch.write_change = EV_CHANGE_ADD | + (events & EV_ET); + if (events & EV_READ) + ch.read_change = EV_CHANGE_ADD | + (events & EV_ET); + if (events & EV_CLOSED) + ch.close_change = EV_CHANGE_ADD | + (events & EV_ET); + + return epoll_apply_one_change(base, base->evbase, &ch); +} + +static int +epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd, + short old, short events, void *p) +{ + struct event_change ch; + ch.fd = fd; + ch.old_events = old; + ch.read_change = ch.write_change = ch.close_change = 0; + if (events & EV_WRITE) + ch.write_change = EV_CHANGE_DEL | + (events & EV_ET); + if (events & EV_READ) + ch.read_change = EV_CHANGE_DEL | + (events & EV_ET); + if (events & EV_CLOSED) + ch.close_change = EV_CHANGE_DEL | + (events & EV_ET); + + return epoll_apply_one_change(base, base->evbase, &ch); +} + +static int +epoll_dispatch(struct event_base *base, struct timeval *tv) +{ + struct epollop *epollop = base->evbase; + struct epoll_event *events = epollop->events; + int i, res; + long timeout = -1; + +#ifdef USING_TIMERFD + if (epollop->timerfd >= 0) { + struct itimerspec is; + is.it_interval.tv_sec = 0; + is.it_interval.tv_nsec = 0; + if (tv == NULL) { + /* No timeout; disarm the timer. */ + is.it_value.tv_sec = 0; + is.it_value.tv_nsec = 0; + } else { + if (tv->tv_sec == 0 && tv->tv_usec == 0) { + /* we need to exit immediately; timerfd can't + * do that. */ + timeout = 0; + } + is.it_value.tv_sec = tv->tv_sec; + is.it_value.tv_nsec = tv->tv_usec * 1000; + } + /* TODO: we could avoid unnecessary syscalls here by only + calling timerfd_settime when the top timeout changes, or + when we're called with a different timeval. + */ + if (timerfd_settime(epollop->timerfd, 0, &is, NULL) < 0) { + event_warn("timerfd_settime"); + } + } else +#endif + if (tv != NULL) { + timeout = evutil_tv_to_msec_(tv); + if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) { + /* Linux kernels can wait forever if the timeout is + * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */ + timeout = MAX_EPOLL_TIMEOUT_MSEC; + } + } + + epoll_apply_changes(base); + event_changelist_remove_all_(&base->changelist, base); + + EVBASE_RELEASE_LOCK(base, th_base_lock); + + res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + if (res == -1) { + if (errno != EINTR) { + event_warn("epoll_wait"); + return (-1); + } + + return (0); + } + + event_debug(("%s: epoll_wait reports %d", __func__, res)); + EVUTIL_ASSERT(res <= epollop->nevents); + + for (i = 0; i < res; i++) { + int what = events[i].events; + short ev = 0; +#ifdef USING_TIMERFD + if (events[i].data.fd == epollop->timerfd) + continue; +#endif + + if (what & (EPOLLHUP|EPOLLERR)) { + ev = EV_READ | EV_WRITE; + } else { + if (what & EPOLLIN) + ev |= EV_READ; + if (what & EPOLLOUT) + ev |= EV_WRITE; + if (what & EPOLLRDHUP) + ev |= EV_CLOSED; + } + + if (!ev) + continue; + + evmap_io_active_(base, events[i].data.fd, ev | EV_ET); + } + + if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) { + /* We used all of the event space this time. We should + be ready for more events next time. */ + int new_nevents = epollop->nevents * 2; + struct epoll_event *new_events; + + new_events = mm_realloc(epollop->events, + new_nevents * sizeof(struct epoll_event)); + if (new_events) { + epollop->events = new_events; + epollop->nevents = new_nevents; + } + } + + return (0); +} + + +static void +epoll_dealloc(struct event_base *base) +{ + struct epollop *epollop = base->evbase; + + evsig_dealloc_(base); + if (epollop->events) + mm_free(epollop->events); + if (epollop->epfd >= 0) + close(epollop->epfd); +#ifdef USING_TIMERFD + if (epollop->timerfd >= 0) + close(epollop->timerfd); +#endif + + memset(epollop, 0, sizeof(struct epollop)); + mm_free(epollop); +} + +#endif /* EVENT__HAVE_EPOLL */ diff --git a/probe-busybox/libevent-2.1.11-stable/epoll_sub.c b/probe-busybox/libevent-2.1.11-stable/epoll_sub.c new file mode 100644 index 00000000..3f01f6a6 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/epoll_sub.c @@ -0,0 +1,66 @@ +/* + * Copyright 2003-2009 Niels Provos + * Copyright 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "evconfig-private.h" +#include + +#include +#include +#include +#include +#include +#include + +int +epoll_create(int size) +{ +#if !defined(__NR_epoll_create) && defined(__NR_epoll_create1) + if (size <= 0) { + errno = EINVAL; + return -1; + } + return (syscall(__NR_epoll_create1, 0)); +#else + return (syscall(__NR_epoll_create, size)); +#endif +} + +int +epoll_ctl(int epfd, int op, int fd, struct epoll_event *event) +{ + + return (syscall(__NR_epoll_ctl, epfd, op, fd, event)); +} + +int +epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout) +{ +#if !defined(__NR_epoll_wait) && defined(__NR_epoll_pwait) + return (syscall(__NR_epoll_pwait, epfd, events, maxevents, timeout, NULL, 0)); +#else + return (syscall(__NR_epoll_wait, epfd, events, maxevents, timeout)); +#endif +} diff --git a/probe-busybox/libevent-2.1.11-stable/epolltable-internal.h b/probe-busybox/libevent-2.1.11-stable/epolltable-internal.h new file mode 100644 index 00000000..73c2e364 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/epolltable-internal.h @@ -0,0 +1,1166 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EPOLLTABLE_INTERNAL_H_INCLUDED_ +#define EPOLLTABLE_INTERNAL_H_INCLUDED_ + +/* + Here are the values we're masking off to decide what operations to do. + Note that since EV_READ|EV_WRITE. + + Note also that this table is a little sparse, since ADD+DEL is + nonsensical ("xxx" in the list below.) + + Note also that we are shifting old_events by only 5 bits, since + EV_READ is 2 and EV_WRITE is 4. + + The table was auto-generated with a python script, according to this + pseudocode:[*0] + + If either the read or the write change is add+del: + This is impossible; Set op==-1, events=0. + Else, if either the read or the write change is add: + Set events to 0. + If the read change is add, or + (the read change is not del, and ev_read is in old_events): + Add EPOLLIN to events. + If the write change is add, or + (the write change is not del, and ev_write is in old_events): + Add EPOLLOUT to events. + + If old_events is set: + Set op to EPOLL_CTL_MOD [*1,*2] + Else: + Set op to EPOLL_CTL_ADD [*3] + + Else, if the read or the write change is del: + Set op to EPOLL_CTL_DEL. + If the read change is del: + If the write change is del: + Set events to EPOLLIN|EPOLLOUT + Else if ev_write is in old_events: + Set events to EPOLLOUT + Set op to EPOLL_CTL_MOD + Else + Set events to EPOLLIN + Else: + {The write change is del.} + If ev_read is in old_events: + Set events to EPOLLIN + Set op to EPOLL_CTL_MOD + Else: + Set the events to EPOLLOUT + + Else: + There is no read or write change; set op to 0 and events to 0. + + The logic is a little tricky, since we had no events set on the fd before, + we need to set op="ADD" and set events=the events we want to add. If we + had any events set on the fd before, and we want any events to remain on + the fd, we need to say op="MOD" and set events=the events we want to + remain. But if we want to delete the last event, we say op="DEL" and + set events=(any non-null pointer). + + [*0] Actually, the Python script has gotten a bit more complicated, to + support EPOLLRDHUP. + + [*1] This MOD is only a guess. MOD might fail with ENOENT if the file was + closed and a new file was opened with the same fd. If so, we'll retry + with ADD. + + [*2] We can't replace this with a no-op even if old_events is the same as + the new events: if the file was closed and reopened, we need to retry + with an ADD. (We do a MOD in this case since "no change" is more + common than "close and reopen", so we'll usually wind up doing 1 + syscalls instead of 2.) + + [*3] This ADD is only a guess. There is a fun Linux kernel issue where if + you have two fds for the same file (via dup) and you ADD one to an + epfd, then close it, then re-create it with the same fd (via dup2 or an + unlucky dup), then try to ADD it again, you'll get an EEXIST, since the + struct epitem is not actually removed from the struct eventpoll until + the file itself is closed. + + EV_CHANGE_ADD==1 + EV_CHANGE_DEL==2 + EV_READ ==2 + EV_WRITE ==4 + EV_CLOSED ==0x80 + + Bit 0: close change is add + Bit 1: close change is del + Bit 2: read change is add + Bit 3: read change is del + Bit 4: write change is add + Bit 5: write change is del + Bit 6: old events had EV_READ + Bit 7: old events had EV_WRITE + Bit 8: old events had EV_CLOSED +*/ + +#define EPOLL_OP_TABLE_INDEX(c) \ + ( (((c)->close_change&(EV_CHANGE_ADD|EV_CHANGE_DEL))) | \ + (((c)->read_change&(EV_CHANGE_ADD|EV_CHANGE_DEL)) << 2) | \ + (((c)->write_change&(EV_CHANGE_ADD|EV_CHANGE_DEL)) << 4) | \ + (((c)->old_events&(EV_READ|EV_WRITE)) << 5) | \ + (((c)->old_events&(EV_CLOSED)) << 1) \ + ) + +#if EV_READ != 2 || EV_WRITE != 4 || EV_CLOSED != 0x80 || EV_CHANGE_ADD != 1 || EV_CHANGE_DEL != 2 +#error "Libevent's internals changed! Regenerate the op_table in epolltable-internal.h" +#endif + +static const struct operation { + int events; + int op; +} epoll_op_table[] = { + /* old= 0, write: 0, read: 0, close: 0 */ + { 0, 0 }, + /* old= 0, write: 0, read: 0, close:add */ + { EPOLLRDHUP, EPOLL_CTL_ADD }, + /* old= 0, write: 0, read: 0, close:del */ + { EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= 0, write: 0, read: 0, close:xxx */ + { 0, 255 }, + /* old= 0, write: 0, read:add, close: 0 */ + { EPOLLIN, EPOLL_CTL_ADD }, + /* old= 0, write: 0, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_ADD }, + /* old= 0, write: 0, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_ADD }, + /* old= 0, write: 0, read:add, close:xxx */ + { 0, 255 }, + /* old= 0, write: 0, read:del, close: 0 */ + { EPOLLIN, EPOLL_CTL_DEL }, + /* old= 0, write: 0, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_ADD }, + /* old= 0, write: 0, read:del, close:del */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= 0, write: 0, read:del, close:xxx */ + { 0, 255 }, + /* old= 0, write: 0, read:xxx, close: 0 */ + { 0, 255 }, + /* old= 0, write: 0, read:xxx, close:add */ + { 0, 255 }, + /* old= 0, write: 0, read:xxx, close:del */ + { 0, 255 }, + /* old= 0, write: 0, read:xxx, close:xxx */ + { 0, 255 }, + /* old= 0, write:add, read: 0, close: 0 */ + { EPOLLOUT, EPOLL_CTL_ADD }, + /* old= 0, write:add, read: 0, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_ADD }, + /* old= 0, write:add, read: 0, close:del */ + { EPOLLOUT, EPOLL_CTL_ADD }, + /* old= 0, write:add, read: 0, close:xxx */ + { 0, 255 }, + /* old= 0, write:add, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_ADD }, + /* old= 0, write:add, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_ADD }, + /* old= 0, write:add, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_ADD }, + /* old= 0, write:add, read:add, close:xxx */ + { 0, 255 }, + /* old= 0, write:add, read:del, close: 0 */ + { EPOLLOUT, EPOLL_CTL_ADD }, + /* old= 0, write:add, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_ADD }, + /* old= 0, write:add, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_ADD }, + /* old= 0, write:add, read:del, close:xxx */ + { 0, 255 }, + /* old= 0, write:add, read:xxx, close: 0 */ + { 0, 255 }, + /* old= 0, write:add, read:xxx, close:add */ + { 0, 255 }, + /* old= 0, write:add, read:xxx, close:del */ + { 0, 255 }, + /* old= 0, write:add, read:xxx, close:xxx */ + { 0, 255 }, + /* old= 0, write:del, read: 0, close: 0 */ + { EPOLLOUT, EPOLL_CTL_DEL }, + /* old= 0, write:del, read: 0, close:add */ + { EPOLLRDHUP, EPOLL_CTL_ADD }, + /* old= 0, write:del, read: 0, close:del */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= 0, write:del, read: 0, close:xxx */ + { 0, 255 }, + /* old= 0, write:del, read:add, close: 0 */ + { EPOLLIN, EPOLL_CTL_ADD }, + /* old= 0, write:del, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_ADD }, + /* old= 0, write:del, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_ADD }, + /* old= 0, write:del, read:add, close:xxx */ + { 0, 255 }, + /* old= 0, write:del, read:del, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL }, + /* old= 0, write:del, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_ADD }, + /* old= 0, write:del, read:del, close:del */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= 0, write:del, read:del, close:xxx */ + { 0, 255 }, + /* old= 0, write:del, read:xxx, close: 0 */ + { 0, 255 }, + /* old= 0, write:del, read:xxx, close:add */ + { 0, 255 }, + /* old= 0, write:del, read:xxx, close:del */ + { 0, 255 }, + /* old= 0, write:del, read:xxx, close:xxx */ + { 0, 255 }, + /* old= 0, write:xxx, read: 0, close: 0 */ + { 0, 255 }, + /* old= 0, write:xxx, read: 0, close:add */ + { 0, 255 }, + /* old= 0, write:xxx, read: 0, close:del */ + { 0, 255 }, + /* old= 0, write:xxx, read: 0, close:xxx */ + { 0, 255 }, + /* old= 0, write:xxx, read:add, close: 0 */ + { 0, 255 }, + /* old= 0, write:xxx, read:add, close:add */ + { 0, 255 }, + /* old= 0, write:xxx, read:add, close:del */ + { 0, 255 }, + /* old= 0, write:xxx, read:add, close:xxx */ + { 0, 255 }, + /* old= 0, write:xxx, read:del, close: 0 */ + { 0, 255 }, + /* old= 0, write:xxx, read:del, close:add */ + { 0, 255 }, + /* old= 0, write:xxx, read:del, close:del */ + { 0, 255 }, + /* old= 0, write:xxx, read:del, close:xxx */ + { 0, 255 }, + /* old= 0, write:xxx, read:xxx, close: 0 */ + { 0, 255 }, + /* old= 0, write:xxx, read:xxx, close:add */ + { 0, 255 }, + /* old= 0, write:xxx, read:xxx, close:del */ + { 0, 255 }, + /* old= 0, write:xxx, read:xxx, close:xxx */ + { 0, 255 }, + /* old= r, write: 0, read: 0, close: 0 */ + { 0, 0 }, + /* old= r, write: 0, read: 0, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= r, write: 0, read: 0, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= r, write: 0, read: 0, close:xxx */ + { 0, 255 }, + /* old= r, write: 0, read:add, close: 0 */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= r, write: 0, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= r, write: 0, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= r, write: 0, read:add, close:xxx */ + { 0, 255 }, + /* old= r, write: 0, read:del, close: 0 */ + { EPOLLIN, EPOLL_CTL_DEL }, + /* old= r, write: 0, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= r, write: 0, read:del, close:del */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= r, write: 0, read:del, close:xxx */ + { 0, 255 }, + /* old= r, write: 0, read:xxx, close: 0 */ + { 0, 255 }, + /* old= r, write: 0, read:xxx, close:add */ + { 0, 255 }, + /* old= r, write: 0, read:xxx, close:del */ + { 0, 255 }, + /* old= r, write: 0, read:xxx, close:xxx */ + { 0, 255 }, + /* old= r, write:add, read: 0, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= r, write:add, read: 0, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= r, write:add, read: 0, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= r, write:add, read: 0, close:xxx */ + { 0, 255 }, + /* old= r, write:add, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= r, write:add, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= r, write:add, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= r, write:add, read:add, close:xxx */ + { 0, 255 }, + /* old= r, write:add, read:del, close: 0 */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= r, write:add, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= r, write:add, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= r, write:add, read:del, close:xxx */ + { 0, 255 }, + /* old= r, write:add, read:xxx, close: 0 */ + { 0, 255 }, + /* old= r, write:add, read:xxx, close:add */ + { 0, 255 }, + /* old= r, write:add, read:xxx, close:del */ + { 0, 255 }, + /* old= r, write:add, read:xxx, close:xxx */ + { 0, 255 }, + /* old= r, write:del, read: 0, close: 0 */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= r, write:del, read: 0, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= r, write:del, read: 0, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= r, write:del, read: 0, close:xxx */ + { 0, 255 }, + /* old= r, write:del, read:add, close: 0 */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= r, write:del, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= r, write:del, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= r, write:del, read:add, close:xxx */ + { 0, 255 }, + /* old= r, write:del, read:del, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL }, + /* old= r, write:del, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= r, write:del, read:del, close:del */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= r, write:del, read:del, close:xxx */ + { 0, 255 }, + /* old= r, write:del, read:xxx, close: 0 */ + { 0, 255 }, + /* old= r, write:del, read:xxx, close:add */ + { 0, 255 }, + /* old= r, write:del, read:xxx, close:del */ + { 0, 255 }, + /* old= r, write:del, read:xxx, close:xxx */ + { 0, 255 }, + /* old= r, write:xxx, read: 0, close: 0 */ + { 0, 255 }, + /* old= r, write:xxx, read: 0, close:add */ + { 0, 255 }, + /* old= r, write:xxx, read: 0, close:del */ + { 0, 255 }, + /* old= r, write:xxx, read: 0, close:xxx */ + { 0, 255 }, + /* old= r, write:xxx, read:add, close: 0 */ + { 0, 255 }, + /* old= r, write:xxx, read:add, close:add */ + { 0, 255 }, + /* old= r, write:xxx, read:add, close:del */ + { 0, 255 }, + /* old= r, write:xxx, read:add, close:xxx */ + { 0, 255 }, + /* old= r, write:xxx, read:del, close: 0 */ + { 0, 255 }, + /* old= r, write:xxx, read:del, close:add */ + { 0, 255 }, + /* old= r, write:xxx, read:del, close:del */ + { 0, 255 }, + /* old= r, write:xxx, read:del, close:xxx */ + { 0, 255 }, + /* old= r, write:xxx, read:xxx, close: 0 */ + { 0, 255 }, + /* old= r, write:xxx, read:xxx, close:add */ + { 0, 255 }, + /* old= r, write:xxx, read:xxx, close:del */ + { 0, 255 }, + /* old= r, write:xxx, read:xxx, close:xxx */ + { 0, 255 }, + /* old= w, write: 0, read: 0, close: 0 */ + { 0, 0 }, + /* old= w, write: 0, read: 0, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= w, write: 0, read: 0, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write: 0, read: 0, close:xxx */ + { 0, 255 }, + /* old= w, write: 0, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write: 0, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= w, write: 0, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write: 0, read:add, close:xxx */ + { 0, 255 }, + /* old= w, write: 0, read:del, close: 0 */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write: 0, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= w, write: 0, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write: 0, read:del, close:xxx */ + { 0, 255 }, + /* old= w, write: 0, read:xxx, close: 0 */ + { 0, 255 }, + /* old= w, write: 0, read:xxx, close:add */ + { 0, 255 }, + /* old= w, write: 0, read:xxx, close:del */ + { 0, 255 }, + /* old= w, write: 0, read:xxx, close:xxx */ + { 0, 255 }, + /* old= w, write:add, read: 0, close: 0 */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write:add, read: 0, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= w, write:add, read: 0, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write:add, read: 0, close:xxx */ + { 0, 255 }, + /* old= w, write:add, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write:add, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= w, write:add, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write:add, read:add, close:xxx */ + { 0, 255 }, + /* old= w, write:add, read:del, close: 0 */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write:add, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= w, write:add, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= w, write:add, read:del, close:xxx */ + { 0, 255 }, + /* old= w, write:add, read:xxx, close: 0 */ + { 0, 255 }, + /* old= w, write:add, read:xxx, close:add */ + { 0, 255 }, + /* old= w, write:add, read:xxx, close:del */ + { 0, 255 }, + /* old= w, write:add, read:xxx, close:xxx */ + { 0, 255 }, + /* old= w, write:del, read: 0, close: 0 */ + { EPOLLOUT, EPOLL_CTL_DEL }, + /* old= w, write:del, read: 0, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= w, write:del, read: 0, close:del */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= w, write:del, read: 0, close:xxx */ + { 0, 255 }, + /* old= w, write:del, read:add, close: 0 */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= w, write:del, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= w, write:del, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= w, write:del, read:add, close:xxx */ + { 0, 255 }, + /* old= w, write:del, read:del, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL }, + /* old= w, write:del, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= w, write:del, read:del, close:del */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= w, write:del, read:del, close:xxx */ + { 0, 255 }, + /* old= w, write:del, read:xxx, close: 0 */ + { 0, 255 }, + /* old= w, write:del, read:xxx, close:add */ + { 0, 255 }, + /* old= w, write:del, read:xxx, close:del */ + { 0, 255 }, + /* old= w, write:del, read:xxx, close:xxx */ + { 0, 255 }, + /* old= w, write:xxx, read: 0, close: 0 */ + { 0, 255 }, + /* old= w, write:xxx, read: 0, close:add */ + { 0, 255 }, + /* old= w, write:xxx, read: 0, close:del */ + { 0, 255 }, + /* old= w, write:xxx, read: 0, close:xxx */ + { 0, 255 }, + /* old= w, write:xxx, read:add, close: 0 */ + { 0, 255 }, + /* old= w, write:xxx, read:add, close:add */ + { 0, 255 }, + /* old= w, write:xxx, read:add, close:del */ + { 0, 255 }, + /* old= w, write:xxx, read:add, close:xxx */ + { 0, 255 }, + /* old= w, write:xxx, read:del, close: 0 */ + { 0, 255 }, + /* old= w, write:xxx, read:del, close:add */ + { 0, 255 }, + /* old= w, write:xxx, read:del, close:del */ + { 0, 255 }, + /* old= w, write:xxx, read:del, close:xxx */ + { 0, 255 }, + /* old= w, write:xxx, read:xxx, close: 0 */ + { 0, 255 }, + /* old= w, write:xxx, read:xxx, close:add */ + { 0, 255 }, + /* old= w, write:xxx, read:xxx, close:del */ + { 0, 255 }, + /* old= w, write:xxx, read:xxx, close:xxx */ + { 0, 255 }, + /* old= rw, write: 0, read: 0, close: 0 */ + { 0, 0 }, + /* old= rw, write: 0, read: 0, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= rw, write: 0, read: 0, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write: 0, read: 0, close:xxx */ + { 0, 255 }, + /* old= rw, write: 0, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write: 0, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= rw, write: 0, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write: 0, read:add, close:xxx */ + { 0, 255 }, + /* old= rw, write: 0, read:del, close: 0 */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write: 0, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= rw, write: 0, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write: 0, read:del, close:xxx */ + { 0, 255 }, + /* old= rw, write: 0, read:xxx, close: 0 */ + { 0, 255 }, + /* old= rw, write: 0, read:xxx, close:add */ + { 0, 255 }, + /* old= rw, write: 0, read:xxx, close:del */ + { 0, 255 }, + /* old= rw, write: 0, read:xxx, close:xxx */ + { 0, 255 }, + /* old= rw, write:add, read: 0, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write:add, read: 0, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= rw, write:add, read: 0, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write:add, read: 0, close:xxx */ + { 0, 255 }, + /* old= rw, write:add, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write:add, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= rw, write:add, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write:add, read:add, close:xxx */ + { 0, 255 }, + /* old= rw, write:add, read:del, close: 0 */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write:add, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= rw, write:add, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= rw, write:add, read:del, close:xxx */ + { 0, 255 }, + /* old= rw, write:add, read:xxx, close: 0 */ + { 0, 255 }, + /* old= rw, write:add, read:xxx, close:add */ + { 0, 255 }, + /* old= rw, write:add, read:xxx, close:del */ + { 0, 255 }, + /* old= rw, write:add, read:xxx, close:xxx */ + { 0, 255 }, + /* old= rw, write:del, read: 0, close: 0 */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= rw, write:del, read: 0, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= rw, write:del, read: 0, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= rw, write:del, read: 0, close:xxx */ + { 0, 255 }, + /* old= rw, write:del, read:add, close: 0 */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= rw, write:del, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= rw, write:del, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= rw, write:del, read:add, close:xxx */ + { 0, 255 }, + /* old= rw, write:del, read:del, close: 0 */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL }, + /* old= rw, write:del, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= rw, write:del, read:del, close:del */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= rw, write:del, read:del, close:xxx */ + { 0, 255 }, + /* old= rw, write:del, read:xxx, close: 0 */ + { 0, 255 }, + /* old= rw, write:del, read:xxx, close:add */ + { 0, 255 }, + /* old= rw, write:del, read:xxx, close:del */ + { 0, 255 }, + /* old= rw, write:del, read:xxx, close:xxx */ + { 0, 255 }, + /* old= rw, write:xxx, read: 0, close: 0 */ + { 0, 255 }, + /* old= rw, write:xxx, read: 0, close:add */ + { 0, 255 }, + /* old= rw, write:xxx, read: 0, close:del */ + { 0, 255 }, + /* old= rw, write:xxx, read: 0, close:xxx */ + { 0, 255 }, + /* old= rw, write:xxx, read:add, close: 0 */ + { 0, 255 }, + /* old= rw, write:xxx, read:add, close:add */ + { 0, 255 }, + /* old= rw, write:xxx, read:add, close:del */ + { 0, 255 }, + /* old= rw, write:xxx, read:add, close:xxx */ + { 0, 255 }, + /* old= rw, write:xxx, read:del, close: 0 */ + { 0, 255 }, + /* old= rw, write:xxx, read:del, close:add */ + { 0, 255 }, + /* old= rw, write:xxx, read:del, close:del */ + { 0, 255 }, + /* old= rw, write:xxx, read:del, close:xxx */ + { 0, 255 }, + /* old= rw, write:xxx, read:xxx, close: 0 */ + { 0, 255 }, + /* old= rw, write:xxx, read:xxx, close:add */ + { 0, 255 }, + /* old= rw, write:xxx, read:xxx, close:del */ + { 0, 255 }, + /* old= rw, write:xxx, read:xxx, close:xxx */ + { 0, 255 }, + /* old= c, write: 0, read: 0, close: 0 */ + { 0, 0 }, + /* old= c, write: 0, read: 0, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write: 0, read: 0, close:del */ + { EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= c, write: 0, read: 0, close:xxx */ + { 0, 255 }, + /* old= c, write: 0, read:add, close: 0 */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write: 0, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write: 0, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= c, write: 0, read:add, close:xxx */ + { 0, 255 }, + /* old= c, write: 0, read:del, close: 0 */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write: 0, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write: 0, read:del, close:del */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= c, write: 0, read:del, close:xxx */ + { 0, 255 }, + /* old= c, write: 0, read:xxx, close: 0 */ + { 0, 255 }, + /* old= c, write: 0, read:xxx, close:add */ + { 0, 255 }, + /* old= c, write: 0, read:xxx, close:del */ + { 0, 255 }, + /* old= c, write: 0, read:xxx, close:xxx */ + { 0, 255 }, + /* old= c, write:add, read: 0, close: 0 */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:add, read: 0, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:add, read: 0, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= c, write:add, read: 0, close:xxx */ + { 0, 255 }, + /* old= c, write:add, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:add, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:add, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= c, write:add, read:add, close:xxx */ + { 0, 255 }, + /* old= c, write:add, read:del, close: 0 */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:add, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:add, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= c, write:add, read:del, close:xxx */ + { 0, 255 }, + /* old= c, write:add, read:xxx, close: 0 */ + { 0, 255 }, + /* old= c, write:add, read:xxx, close:add */ + { 0, 255 }, + /* old= c, write:add, read:xxx, close:del */ + { 0, 255 }, + /* old= c, write:add, read:xxx, close:xxx */ + { 0, 255 }, + /* old= c, write:del, read: 0, close: 0 */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:del, read: 0, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:del, read: 0, close:del */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= c, write:del, read: 0, close:xxx */ + { 0, 255 }, + /* old= c, write:del, read:add, close: 0 */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:del, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:del, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= c, write:del, read:add, close:xxx */ + { 0, 255 }, + /* old= c, write:del, read:del, close: 0 */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:del, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= c, write:del, read:del, close:del */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= c, write:del, read:del, close:xxx */ + { 0, 255 }, + /* old= c, write:del, read:xxx, close: 0 */ + { 0, 255 }, + /* old= c, write:del, read:xxx, close:add */ + { 0, 255 }, + /* old= c, write:del, read:xxx, close:del */ + { 0, 255 }, + /* old= c, write:del, read:xxx, close:xxx */ + { 0, 255 }, + /* old= c, write:xxx, read: 0, close: 0 */ + { 0, 255 }, + /* old= c, write:xxx, read: 0, close:add */ + { 0, 255 }, + /* old= c, write:xxx, read: 0, close:del */ + { 0, 255 }, + /* old= c, write:xxx, read: 0, close:xxx */ + { 0, 255 }, + /* old= c, write:xxx, read:add, close: 0 */ + { 0, 255 }, + /* old= c, write:xxx, read:add, close:add */ + { 0, 255 }, + /* old= c, write:xxx, read:add, close:del */ + { 0, 255 }, + /* old= c, write:xxx, read:add, close:xxx */ + { 0, 255 }, + /* old= c, write:xxx, read:del, close: 0 */ + { 0, 255 }, + /* old= c, write:xxx, read:del, close:add */ + { 0, 255 }, + /* old= c, write:xxx, read:del, close:del */ + { 0, 255 }, + /* old= c, write:xxx, read:del, close:xxx */ + { 0, 255 }, + /* old= c, write:xxx, read:xxx, close: 0 */ + { 0, 255 }, + /* old= c, write:xxx, read:xxx, close:add */ + { 0, 255 }, + /* old= c, write:xxx, read:xxx, close:del */ + { 0, 255 }, + /* old= c, write:xxx, read:xxx, close:xxx */ + { 0, 255 }, + /* old= cr, write: 0, read: 0, close: 0 */ + { 0, 0 }, + /* old= cr, write: 0, read: 0, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write: 0, read: 0, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= cr, write: 0, read: 0, close:xxx */ + { 0, 255 }, + /* old= cr, write: 0, read:add, close: 0 */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write: 0, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write: 0, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= cr, write: 0, read:add, close:xxx */ + { 0, 255 }, + /* old= cr, write: 0, read:del, close: 0 */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write: 0, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write: 0, read:del, close:del */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= cr, write: 0, read:del, close:xxx */ + { 0, 255 }, + /* old= cr, write: 0, read:xxx, close: 0 */ + { 0, 255 }, + /* old= cr, write: 0, read:xxx, close:add */ + { 0, 255 }, + /* old= cr, write: 0, read:xxx, close:del */ + { 0, 255 }, + /* old= cr, write: 0, read:xxx, close:xxx */ + { 0, 255 }, + /* old= cr, write:add, read: 0, close: 0 */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:add, read: 0, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:add, read: 0, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= cr, write:add, read: 0, close:xxx */ + { 0, 255 }, + /* old= cr, write:add, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:add, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:add, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= cr, write:add, read:add, close:xxx */ + { 0, 255 }, + /* old= cr, write:add, read:del, close: 0 */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:add, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:add, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= cr, write:add, read:del, close:xxx */ + { 0, 255 }, + /* old= cr, write:add, read:xxx, close: 0 */ + { 0, 255 }, + /* old= cr, write:add, read:xxx, close:add */ + { 0, 255 }, + /* old= cr, write:add, read:xxx, close:del */ + { 0, 255 }, + /* old= cr, write:add, read:xxx, close:xxx */ + { 0, 255 }, + /* old= cr, write:del, read: 0, close: 0 */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:del, read: 0, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:del, read: 0, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= cr, write:del, read: 0, close:xxx */ + { 0, 255 }, + /* old= cr, write:del, read:add, close: 0 */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:del, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:del, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= cr, write:del, read:add, close:xxx */ + { 0, 255 }, + /* old= cr, write:del, read:del, close: 0 */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:del, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cr, write:del, read:del, close:del */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= cr, write:del, read:del, close:xxx */ + { 0, 255 }, + /* old= cr, write:del, read:xxx, close: 0 */ + { 0, 255 }, + /* old= cr, write:del, read:xxx, close:add */ + { 0, 255 }, + /* old= cr, write:del, read:xxx, close:del */ + { 0, 255 }, + /* old= cr, write:del, read:xxx, close:xxx */ + { 0, 255 }, + /* old= cr, write:xxx, read: 0, close: 0 */ + { 0, 255 }, + /* old= cr, write:xxx, read: 0, close:add */ + { 0, 255 }, + /* old= cr, write:xxx, read: 0, close:del */ + { 0, 255 }, + /* old= cr, write:xxx, read: 0, close:xxx */ + { 0, 255 }, + /* old= cr, write:xxx, read:add, close: 0 */ + { 0, 255 }, + /* old= cr, write:xxx, read:add, close:add */ + { 0, 255 }, + /* old= cr, write:xxx, read:add, close:del */ + { 0, 255 }, + /* old= cr, write:xxx, read:add, close:xxx */ + { 0, 255 }, + /* old= cr, write:xxx, read:del, close: 0 */ + { 0, 255 }, + /* old= cr, write:xxx, read:del, close:add */ + { 0, 255 }, + /* old= cr, write:xxx, read:del, close:del */ + { 0, 255 }, + /* old= cr, write:xxx, read:del, close:xxx */ + { 0, 255 }, + /* old= cr, write:xxx, read:xxx, close: 0 */ + { 0, 255 }, + /* old= cr, write:xxx, read:xxx, close:add */ + { 0, 255 }, + /* old= cr, write:xxx, read:xxx, close:del */ + { 0, 255 }, + /* old= cr, write:xxx, read:xxx, close:xxx */ + { 0, 255 }, + /* old= cw, write: 0, read: 0, close: 0 */ + { 0, 0 }, + /* old= cw, write: 0, read: 0, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write: 0, read: 0, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= cw, write: 0, read: 0, close:xxx */ + { 0, 255 }, + /* old= cw, write: 0, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write: 0, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write: 0, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= cw, write: 0, read:add, close:xxx */ + { 0, 255 }, + /* old= cw, write: 0, read:del, close: 0 */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write: 0, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write: 0, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= cw, write: 0, read:del, close:xxx */ + { 0, 255 }, + /* old= cw, write: 0, read:xxx, close: 0 */ + { 0, 255 }, + /* old= cw, write: 0, read:xxx, close:add */ + { 0, 255 }, + /* old= cw, write: 0, read:xxx, close:del */ + { 0, 255 }, + /* old= cw, write: 0, read:xxx, close:xxx */ + { 0, 255 }, + /* old= cw, write:add, read: 0, close: 0 */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:add, read: 0, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:add, read: 0, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= cw, write:add, read: 0, close:xxx */ + { 0, 255 }, + /* old= cw, write:add, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:add, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:add, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old= cw, write:add, read:add, close:xxx */ + { 0, 255 }, + /* old= cw, write:add, read:del, close: 0 */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:add, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:add, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old= cw, write:add, read:del, close:xxx */ + { 0, 255 }, + /* old= cw, write:add, read:xxx, close: 0 */ + { 0, 255 }, + /* old= cw, write:add, read:xxx, close:add */ + { 0, 255 }, + /* old= cw, write:add, read:xxx, close:del */ + { 0, 255 }, + /* old= cw, write:add, read:xxx, close:xxx */ + { 0, 255 }, + /* old= cw, write:del, read: 0, close: 0 */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:del, read: 0, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:del, read: 0, close:del */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= cw, write:del, read: 0, close:xxx */ + { 0, 255 }, + /* old= cw, write:del, read:add, close: 0 */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:del, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:del, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old= cw, write:del, read:add, close:xxx */ + { 0, 255 }, + /* old= cw, write:del, read:del, close: 0 */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:del, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old= cw, write:del, read:del, close:del */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old= cw, write:del, read:del, close:xxx */ + { 0, 255 }, + /* old= cw, write:del, read:xxx, close: 0 */ + { 0, 255 }, + /* old= cw, write:del, read:xxx, close:add */ + { 0, 255 }, + /* old= cw, write:del, read:xxx, close:del */ + { 0, 255 }, + /* old= cw, write:del, read:xxx, close:xxx */ + { 0, 255 }, + /* old= cw, write:xxx, read: 0, close: 0 */ + { 0, 255 }, + /* old= cw, write:xxx, read: 0, close:add */ + { 0, 255 }, + /* old= cw, write:xxx, read: 0, close:del */ + { 0, 255 }, + /* old= cw, write:xxx, read: 0, close:xxx */ + { 0, 255 }, + /* old= cw, write:xxx, read:add, close: 0 */ + { 0, 255 }, + /* old= cw, write:xxx, read:add, close:add */ + { 0, 255 }, + /* old= cw, write:xxx, read:add, close:del */ + { 0, 255 }, + /* old= cw, write:xxx, read:add, close:xxx */ + { 0, 255 }, + /* old= cw, write:xxx, read:del, close: 0 */ + { 0, 255 }, + /* old= cw, write:xxx, read:del, close:add */ + { 0, 255 }, + /* old= cw, write:xxx, read:del, close:del */ + { 0, 255 }, + /* old= cw, write:xxx, read:del, close:xxx */ + { 0, 255 }, + /* old= cw, write:xxx, read:xxx, close: 0 */ + { 0, 255 }, + /* old= cw, write:xxx, read:xxx, close:add */ + { 0, 255 }, + /* old= cw, write:xxx, read:xxx, close:del */ + { 0, 255 }, + /* old= cw, write:xxx, read:xxx, close:xxx */ + { 0, 255 }, + /* old=crw, write: 0, read: 0, close: 0 */ + { 0, 0 }, + /* old=crw, write: 0, read: 0, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write: 0, read: 0, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old=crw, write: 0, read: 0, close:xxx */ + { 0, 255 }, + /* old=crw, write: 0, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write: 0, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write: 0, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old=crw, write: 0, read:add, close:xxx */ + { 0, 255 }, + /* old=crw, write: 0, read:del, close: 0 */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write: 0, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write: 0, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old=crw, write: 0, read:del, close:xxx */ + { 0, 255 }, + /* old=crw, write: 0, read:xxx, close: 0 */ + { 0, 255 }, + /* old=crw, write: 0, read:xxx, close:add */ + { 0, 255 }, + /* old=crw, write: 0, read:xxx, close:del */ + { 0, 255 }, + /* old=crw, write: 0, read:xxx, close:xxx */ + { 0, 255 }, + /* old=crw, write:add, read: 0, close: 0 */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:add, read: 0, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:add, read: 0, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old=crw, write:add, read: 0, close:xxx */ + { 0, 255 }, + /* old=crw, write:add, read:add, close: 0 */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:add, read:add, close:add */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:add, read:add, close:del */ + { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD }, + /* old=crw, write:add, read:add, close:xxx */ + { 0, 255 }, + /* old=crw, write:add, read:del, close: 0 */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:add, read:del, close:add */ + { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:add, read:del, close:del */ + { EPOLLOUT, EPOLL_CTL_MOD }, + /* old=crw, write:add, read:del, close:xxx */ + { 0, 255 }, + /* old=crw, write:add, read:xxx, close: 0 */ + { 0, 255 }, + /* old=crw, write:add, read:xxx, close:add */ + { 0, 255 }, + /* old=crw, write:add, read:xxx, close:del */ + { 0, 255 }, + /* old=crw, write:add, read:xxx, close:xxx */ + { 0, 255 }, + /* old=crw, write:del, read: 0, close: 0 */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:del, read: 0, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:del, read: 0, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old=crw, write:del, read: 0, close:xxx */ + { 0, 255 }, + /* old=crw, write:del, read:add, close: 0 */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:del, read:add, close:add */ + { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:del, read:add, close:del */ + { EPOLLIN, EPOLL_CTL_MOD }, + /* old=crw, write:del, read:add, close:xxx */ + { 0, 255 }, + /* old=crw, write:del, read:del, close: 0 */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:del, read:del, close:add */ + { EPOLLRDHUP, EPOLL_CTL_MOD }, + /* old=crw, write:del, read:del, close:del */ + { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL }, + /* old=crw, write:del, read:del, close:xxx */ + { 0, 255 }, + /* old=crw, write:del, read:xxx, close: 0 */ + { 0, 255 }, + /* old=crw, write:del, read:xxx, close:add */ + { 0, 255 }, + /* old=crw, write:del, read:xxx, close:del */ + { 0, 255 }, + /* old=crw, write:del, read:xxx, close:xxx */ + { 0, 255 }, + /* old=crw, write:xxx, read: 0, close: 0 */ + { 0, 255 }, + /* old=crw, write:xxx, read: 0, close:add */ + { 0, 255 }, + /* old=crw, write:xxx, read: 0, close:del */ + { 0, 255 }, + /* old=crw, write:xxx, read: 0, close:xxx */ + { 0, 255 }, + /* old=crw, write:xxx, read:add, close: 0 */ + { 0, 255 }, + /* old=crw, write:xxx, read:add, close:add */ + { 0, 255 }, + /* old=crw, write:xxx, read:add, close:del */ + { 0, 255 }, + /* old=crw, write:xxx, read:add, close:xxx */ + { 0, 255 }, + /* old=crw, write:xxx, read:del, close: 0 */ + { 0, 255 }, + /* old=crw, write:xxx, read:del, close:add */ + { 0, 255 }, + /* old=crw, write:xxx, read:del, close:del */ + { 0, 255 }, + /* old=crw, write:xxx, read:del, close:xxx */ + { 0, 255 }, + /* old=crw, write:xxx, read:xxx, close: 0 */ + { 0, 255 }, + /* old=crw, write:xxx, read:xxx, close:add */ + { 0, 255 }, + /* old=crw, write:xxx, read:xxx, close:del */ + { 0, 255 }, + /* old=crw, write:xxx, read:xxx, close:xxx */ + { 0, 255 }, +}; + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/evbuffer-internal.h b/probe-busybox/libevent-2.1.11-stable/evbuffer-internal.h new file mode 100644 index 00000000..d09b4f1d --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evbuffer-internal.h @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVBUFFER_INTERNAL_H_INCLUDED_ +#define EVBUFFER_INTERNAL_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "event2/event-config.h" +#include "evconfig-private.h" +#include "event2/util.h" +#include "event2/event_struct.h" +#include "util-internal.h" +#include "defer-internal.h" + +/* Experimental cb flag: "never deferred." Implementation note: + * these callbacks may get an inaccurate view of n_del/n_added in their + * arguments. */ +#define EVBUFFER_CB_NODEFER 2 + +#ifdef _WIN32 +#include +#endif +#include + +/* Minimum allocation for a chain. We define this so that we're burning no + * more than 5% of each allocation on overhead. It would be nice to lose even + * less space, though. */ +#if EVENT__SIZEOF_VOID_P < 8 +#define MIN_BUFFER_SIZE 512 +#else +#define MIN_BUFFER_SIZE 1024 +#endif + +/** A single evbuffer callback for an evbuffer. This function will be invoked + * when bytes are added to or removed from the evbuffer. */ +struct evbuffer_cb_entry { + /** Structures to implement a doubly-linked queue of callbacks */ + LIST_ENTRY(evbuffer_cb_entry) next; + /** The callback function to invoke when this callback is called. + If EVBUFFER_CB_OBSOLETE is set in flags, the cb_obsolete field is + valid; otherwise, cb_func is valid. */ + union { + evbuffer_cb_func cb_func; + evbuffer_cb cb_obsolete; + } cb; + /** Argument to pass to cb. */ + void *cbarg; + /** Currently set flags on this callback. */ + ev_uint32_t flags; +}; + +struct bufferevent; +struct evbuffer_chain; +struct evbuffer { + /** The first chain in this buffer's linked list of chains. */ + struct evbuffer_chain *first; + /** The last chain in this buffer's linked list of chains. */ + struct evbuffer_chain *last; + + /** Pointer to the next pointer pointing at the 'last_with_data' chain. + * + * To unpack: + * + * The last_with_data chain is the last chain that has any data in it. + * If all chains in the buffer are empty, it is the first chain. + * If the buffer has no chains, it is NULL. + * + * The last_with_datap pointer points at _whatever 'next' pointer_ + * pointing at the last_with_data chain. If the last_with_data chain + * is the first chain, or it is NULL, then the last_with_datap pointer + * is &buf->first. + */ + struct evbuffer_chain **last_with_datap; + + /** Total amount of bytes stored in all chains.*/ + size_t total_len; + + /** Number of bytes we have added to the buffer since we last tried to + * invoke callbacks. */ + size_t n_add_for_cb; + /** Number of bytes we have removed from the buffer since we last + * tried to invoke callbacks. */ + size_t n_del_for_cb; + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + /** A lock used to mediate access to this buffer. */ + void *lock; +#endif + /** True iff we should free the lock field when we free this + * evbuffer. */ + unsigned own_lock : 1; + /** True iff we should not allow changes to the front of the buffer + * (drains or prepends). */ + unsigned freeze_start : 1; + /** True iff we should not allow changes to the end of the buffer + * (appends) */ + unsigned freeze_end : 1; + /** True iff this evbuffer's callbacks are not invoked immediately + * upon a change in the buffer, but instead are deferred to be invoked + * from the event_base's loop. Useful for preventing enormous stack + * overflows when we have mutually recursive callbacks, and for + * serializing callbacks in a single thread. */ + unsigned deferred_cbs : 1; +#ifdef _WIN32 + /** True iff this buffer is set up for overlapped IO. */ + unsigned is_overlapped : 1; +#endif + /** Zero or more EVBUFFER_FLAG_* bits */ + ev_uint32_t flags; + + /** Used to implement deferred callbacks. */ + struct event_base *cb_queue; + + /** A reference count on this evbuffer. When the reference count + * reaches 0, the buffer is destroyed. Manipulated with + * evbuffer_incref and evbuffer_decref_and_unlock and + * evbuffer_free. */ + int refcnt; + + /** A struct event_callback handle to make all of this buffer's callbacks + * invoked from the event loop. */ + struct event_callback deferred; + + /** A doubly-linked-list of callback functions */ + LIST_HEAD(evbuffer_cb_queue, evbuffer_cb_entry) callbacks; + + /** The parent bufferevent object this evbuffer belongs to. + * NULL if the evbuffer stands alone. */ + struct bufferevent *parent; +}; + +#if EVENT__SIZEOF_OFF_T < EVENT__SIZEOF_SIZE_T +typedef ev_ssize_t ev_misalign_t; +#define EVBUFFER_CHAIN_MAX ((size_t)EV_SSIZE_MAX) +#else +typedef ev_off_t ev_misalign_t; +#if EVENT__SIZEOF_OFF_T > EVENT__SIZEOF_SIZE_T +#define EVBUFFER_CHAIN_MAX EV_SIZE_MAX +#else +#define EVBUFFER_CHAIN_MAX ((size_t)EV_SSIZE_MAX) +#endif +#endif + +/** A single item in an evbuffer. */ +struct evbuffer_chain { + /** points to next buffer in the chain */ + struct evbuffer_chain *next; + + /** total allocation available in the buffer field. */ + size_t buffer_len; + + /** unused space at the beginning of buffer or an offset into a + * file for sendfile buffers. */ + ev_misalign_t misalign; + + /** Offset into buffer + misalign at which to start writing. + * In other words, the total number of bytes actually stored + * in buffer. */ + size_t off; + + /** Set if special handling is required for this chain */ + unsigned flags; +#define EVBUFFER_FILESEGMENT 0x0001 /**< A chain used for a file segment */ +#define EVBUFFER_SENDFILE 0x0002 /**< a chain used with sendfile */ +#define EVBUFFER_REFERENCE 0x0004 /**< a chain with a mem reference */ +#define EVBUFFER_IMMUTABLE 0x0008 /**< read-only chain */ + /** a chain that mustn't be reallocated or freed, or have its contents + * memmoved, until the chain is un-pinned. */ +#define EVBUFFER_MEM_PINNED_R 0x0010 +#define EVBUFFER_MEM_PINNED_W 0x0020 +#define EVBUFFER_MEM_PINNED_ANY (EVBUFFER_MEM_PINNED_R|EVBUFFER_MEM_PINNED_W) + /** a chain that should be freed, but can't be freed until it is + * un-pinned. */ +#define EVBUFFER_DANGLING 0x0040 + /** a chain that is a referenced copy of another chain */ +#define EVBUFFER_MULTICAST 0x0080 + + /** number of references to this chain */ + int refcnt; + + /** Usually points to the read-write memory belonging to this + * buffer allocated as part of the evbuffer_chain allocation. + * For mmap, this can be a read-only buffer and + * EVBUFFER_IMMUTABLE will be set in flags. For sendfile, it + * may point to NULL. + */ + unsigned char *buffer; +}; + +/** callback for a reference chain; lets us know what to do with it when + * we're done with it. Lives at the end of an evbuffer_chain with the + * EVBUFFER_REFERENCE flag set */ +struct evbuffer_chain_reference { + evbuffer_ref_cleanup_cb cleanupfn; + void *extra; +}; + +/** File segment for a file-segment chain. Lives at the end of an + * evbuffer_chain with the EVBUFFER_FILESEGMENT flag set. */ +struct evbuffer_chain_file_segment { + struct evbuffer_file_segment *segment; +#ifdef _WIN32 + /** If we're using CreateFileMapping, this is the handle to the view. */ + HANDLE view_handle; +#endif +}; + +/* Declared in event2/buffer.h; defined here. */ +struct evbuffer_file_segment { + void *lock; /**< lock prevent concurrent access to refcnt */ + int refcnt; /**< Reference count for this file segment */ + unsigned flags; /**< combination of EVBUF_FS_* flags */ + + /** What kind of file segment is this? */ + unsigned can_sendfile : 1; + unsigned is_mapping : 1; + + /** The fd that we read the data from. */ + int fd; + /** If we're using mmap, this is the raw mapped memory. */ + void *mapping; +#ifdef _WIN32 + /** If we're using CreateFileMapping, this is the mapping */ + HANDLE mapping_handle; +#endif + /** If we're using mmap or IO, this is the content of the file + * segment. */ + char *contents; + /** Position of this segment within the file. */ + ev_off_t file_offset; + /** If we're using mmap, this is the offset within 'mapping' where + * this data segment begins. */ + ev_off_t mmap_offset; + /** The length of this segment. */ + ev_off_t length; + /** Cleanup callback function */ + evbuffer_file_segment_cleanup_cb cleanup_cb; + /** Argument to be pass to cleanup callback function */ + void *cleanup_cb_arg; +}; + +/** Information about the multicast parent of a chain. Lives at the + * end of an evbuffer_chain with the EVBUFFER_MULTICAST flag set. */ +struct evbuffer_multicast_parent { + /** source buffer the multicast parent belongs to */ + struct evbuffer *source; + /** multicast parent for this chain */ + struct evbuffer_chain *parent; +}; + +#define EVBUFFER_CHAIN_SIZE sizeof(struct evbuffer_chain) +/** Return a pointer to extra data allocated along with an evbuffer. */ +#define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1) + +/** Assert that we are holding the lock on an evbuffer */ +#define ASSERT_EVBUFFER_LOCKED(buffer) \ + EVLOCK_ASSERT_LOCKED((buffer)->lock) + +#define EVBUFFER_LOCK(buffer) \ + do { \ + EVLOCK_LOCK((buffer)->lock, 0); \ + } while (0) +#define EVBUFFER_UNLOCK(buffer) \ + do { \ + EVLOCK_UNLOCK((buffer)->lock, 0); \ + } while (0) +#define EVBUFFER_LOCK2(buffer1, buffer2) \ + do { \ + EVLOCK_LOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \ + } while (0) +#define EVBUFFER_UNLOCK2(buffer1, buffer2) \ + do { \ + EVLOCK_UNLOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \ + } while (0) + +/** Increase the reference count of buf by one. */ +void evbuffer_incref_(struct evbuffer *buf); +/** Increase the reference count of buf by one and acquire the lock. */ +void evbuffer_incref_and_lock_(struct evbuffer *buf); +/** Pin a single buffer chain using a given flag. A pinned chunk may not be + * moved or freed until it is unpinned. */ +void evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag); +/** Unpin a single buffer chain using a given flag. */ +void evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag); +/** As evbuffer_free, but requires that we hold a lock on the buffer, and + * releases the lock before freeing it and the buffer. */ +void evbuffer_decref_and_unlock_(struct evbuffer *buffer); + +/** As evbuffer_expand, but does not guarantee that the newly allocated memory + * is contiguous. Instead, it may be split across two or more chunks. */ +int evbuffer_expand_fast_(struct evbuffer *, size_t, int); + +/** Helper: prepares for a readv/WSARecv call by expanding the buffer to + * hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory. + * Sets up the one or two iovecs in 'vecs' to point to the free memory and its + * extent, and *chainp to point to the first chain that we'll try to read into. + * Returns the number of vecs used. + */ +int evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, + struct evbuffer_iovec *vecs, int n_vecs, struct evbuffer_chain ***chainp, + int exact); + +/* Helper macro: copies an evbuffer_iovec in ei to a win32 WSABUF in i. */ +#define WSABUF_FROM_EVBUFFER_IOV(i,ei) do { \ + (i)->buf = (ei)->iov_base; \ + (i)->len = (unsigned long)(ei)->iov_len; \ + } while (0) +/* XXXX the cast above is safe for now, but not if we allow mmaps on win64. + * See note in buffer_iocp's launch_write function */ + +/** Set the parent bufferevent object for buf to bev */ +void evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev); + +void evbuffer_invoke_callbacks_(struct evbuffer *buf); + + +int evbuffer_get_callbacks_(struct evbuffer *buffer, + struct event_callback **cbs, + int max_cbs); + +#ifdef __cplusplus +} +#endif + +#endif /* EVBUFFER_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/evconfig-private.h.cmake b/probe-busybox/libevent-2.1.11-stable/evconfig-private.h.cmake new file mode 100644 index 00000000..1adf9c03 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evconfig-private.h.cmake @@ -0,0 +1,40 @@ + +#ifndef EVCONFIG_PRIVATE_H_INCLUDED_ +#define EVCONFIG_PRIVATE_H_INCLUDED_ + +/* Enable extensions on AIX 3, Interix. */ +#cmakedefine _ALL_SOURCE + +/* Enable GNU extensions on systems that have them. */ +#cmakedefine _GNU_SOURCE 1 + +/* Enable threading extensions on Solaris. */ +#cmakedefine _POSIX_PTHREAD_SEMANTICS 1 + +/* Enable extensions on HP NonStop. */ +#cmakedefine _TANDEM_SOURCE 1 + +/* Enable general extensions on Solaris. */ +#cmakedefine __EXTENSIONS__ + +/* Number of bits in a file offset, on hosts where this is settable. */ +#cmakedefine _FILE_OFFSET_BITS 1 +/* Define for large files, on AIX-style hosts. */ +#cmakedefine _LARGE_FILES 1 + +/* Define to 1 if on MINIX. */ +#cmakedefine _MINIX 1 + +/* Define to 2 if the system does not provide POSIX.1 features except with + this defined. */ +#cmakedefine _POSIX_1_SOURCE 1 + +/* Define to 1 if you need to in order for `stat' and other things to work. */ +#cmakedefine _POSIX_SOURCE 1 + +/* Enable POSIX.2 extensions on QNX for getopt */ +#ifdef __QNX__ +#cmakedefine __EXT_POSIX2 1 +#endif + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/evconfig-private.h.in b/probe-busybox/libevent-2.1.11-stable/evconfig-private.h.in new file mode 100644 index 00000000..8cd64787 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evconfig-private.h.in @@ -0,0 +1,55 @@ +/* evconfig-private.h template - see "Configuration Header Templates" */ +/* in AC manual. Kevin Bowling +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" 3. The name of the author may not be used to endorse or promote products +.\" derived from this software without specific prior written permission. +.\" +.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, +.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +.\" EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +.\" +.Dd October 7, 2006 +.Dt EVDNS 3 +.Os +.Sh NAME +.Nm evdns_init +.Nm evdns_shutdown +.Nm evdns_err_to_string +.Nm evdns_nameserver_add +.Nm evdns_count_nameservers +.Nm evdns_clear_nameservers_and_suspend +.Nm evdns_resume +.Nm evdns_nameserver_ip_add +.Nm evdns_resolve_ipv4 +.Nm evdns_resolve_reverse +.Nm evdns_resolv_conf_parse +.Nm evdns_config_windows_nameservers +.Nm evdns_search_clear +.Nm evdns_search_add +.Nm evdns_search_ndots_set +.Nm evdns_set_log_fn +.Nd asynchronous functions for DNS resolution. +.Sh SYNOPSIS +.Fd #include +.Fd #include +.Fd #include +.Ft int +.Fn evdns_init +.Ft void +.Fn evdns_shutdown "int fail_requests" +.Ft "const char *" +.Fn evdns_err_to_string "int err" +.Ft int +.Fn evdns_nameserver_add "unsigned long int address" +.Ft int +.Fn evdns_count_nameservers +.Ft int +.Fn evdns_clear_nameservers_and_suspend +.Ft int +.Fn evdns_resume +.Ft int +.Fn evdns_nameserver_ip_add(const char *ip_as_string); +.Ft int +.Fn evdns_resolve_ipv4 "const char *name" "int flags" "evdns_callback_type callback" "void *ptr" +.Ft int +.Fn evdns_resolve_reverse "struct in_addr *in" "int flags" "evdns_callback_type callback" "void *ptr" +.Ft int +.Fn evdns_resolv_conf_parse "int flags" "const char *" +.Ft void +.Fn evdns_search_clear +.Ft void +.Fn evdns_search_add "const char *domain" +.Ft void +.Fn evdns_search_ndots_set "const int ndots" +.Ft void +.Fn evdns_set_log_fn "evdns_debug_log_fn_type fn" +.Ft int +.Fn evdns_config_windows_nameservers +.Sh DESCRIPTION +Welcome, gentle reader +.Pp +Async DNS lookups are really a whole lot harder than they should be, +mostly stemming from the fact that the libc resolver has never been +very good at them. Before you use this library you should see if libc +can do the job for you with the modern async call getaddrinfo_a +(see http://www.imperialviolet.org/page25.html#e498). Otherwise, +please continue. +.Pp +This code is based on libevent and you must call event_init before +any of the APIs in this file. You must also seed the OpenSSL random +source if you are using OpenSSL for ids (see below). +.Pp +This library is designed to be included and shipped with your source +code. You statically link with it. You should also test for the +existence of strtok_r and define HAVE_STRTOK_R if you have it. +.Pp +The DNS protocol requires a good source of id numbers and these +numbers should be unpredictable for spoofing reasons. There are +three methods for generating them here and you must define exactly +one of them. In increasing order of preference: +.Pp +.Bl -tag -width "DNS_USE_GETTIMEOFDAY_FOR_ID" -compact -offset indent +.It DNS_USE_GETTIMEOFDAY_FOR_ID +Using the bottom 16 bits of the usec result from gettimeofday. This +is a pretty poor solution but should work anywhere. +.It DNS_USE_CPU_CLOCK_FOR_ID +Using the bottom 16 bits of the nsec result from the CPU's time +counter. This is better, but may not work everywhere. Requires +POSIX realtime support and you'll need to link against -lrt on +glibc systems at least. +.It DNS_USE_OPENSSL_FOR_ID +Uses the OpenSSL RAND_bytes call to generate the data. You must +have seeded the pool before making any calls to this library. +.El +.Pp +The library keeps track of the state of nameservers and will avoid +them when they go down. Otherwise it will round robin between them. +.Pp +Quick start guide: + #include "evdns.h" + void callback(int result, char type, int count, int ttl, + void *addresses, void *arg); + evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf"); + evdns_resolve("www.hostname.com", 0, callback, NULL); +.Pp +When the lookup is complete the callback function is called. The +first argument will be one of the DNS_ERR_* defines in evdns.h. +Hopefully it will be DNS_ERR_NONE, in which case type will be +DNS_IPv4_A, count will be the number of IP addresses, ttl is the time +which the data can be cached for (in seconds), addresses will point +to an array of uint32_t's and arg will be whatever you passed to +evdns_resolve. +.Pp +Searching: +.Pp +In order for this library to be a good replacement for glibc's resolver it +supports searching. This involves setting a list of default domains, in +which names will be queried for. The number of dots in the query name +determines the order in which this list is used. +.Pp +Searching appears to be a single lookup from the point of view of the API, +although many DNS queries may be generated from a single call to +evdns_resolve. Searching can also drastically slow down the resolution +of names. +.Pp +To disable searching: +.Bl -enum -compact -offset indent +.It +Never set it up. If you never call +.Fn evdns_resolv_conf_parse, +.Fn evdns_init, +or +.Fn evdns_search_add +then no searching will occur. +.It +If you do call +.Fn evdns_resolv_conf_parse +then don't pass +.Va DNS_OPTION_SEARCH +(or +.Va DNS_OPTIONS_ALL, +which implies it). +.It +When calling +.Fn evdns_resolve, +pass the +.Va DNS_QUERY_NO_SEARCH +flag. +.El +.Pp +The order of searches depends on the number of dots in the name. If the +number is greater than the ndots setting then the names is first tried +globally. Otherwise each search domain is appended in turn. +.Pp +The ndots setting can either be set from a resolv.conf, or by calling +evdns_search_ndots_set. +.Pp +For example, with ndots set to 1 (the default) and a search domain list of +["myhome.net"]: + Query: www + Order: www.myhome.net, www. +.Pp + Query: www.abc + Order: www.abc., www.abc.myhome.net +.Pp +.Sh API reference +.Pp +.Bl -tag -width 0123456 +.It Ft int Fn evdns_init +Initializes support for non-blocking name resolution by calling +.Fn evdns_resolv_conf_parse +on UNIX and +.Fn evdns_config_windows_nameservers +on Windows. +.It Ft int Fn evdns_nameserver_add "unsigned long int address" +Add a nameserver. The address should be an IP address in +network byte order. The type of address is chosen so that +it matches in_addr.s_addr. +Returns non-zero on error. +.It Ft int Fn evdns_nameserver_ip_add "const char *ip_as_string" +This wraps the above function by parsing a string as an IP +address and adds it as a nameserver. +Returns non-zero on error +.It Ft int Fn evdns_resolve "const char *name" "int flags" "evdns_callback_type callback" "void *ptr" +Resolve a name. The name parameter should be a DNS name. +The flags parameter should be 0, or DNS_QUERY_NO_SEARCH +which disables searching for this query. (see defn of +searching above). +.Pp +The callback argument is a function which is called when +this query completes and ptr is an argument which is passed +to that callback function. +.Pp +Returns non-zero on error +.It Ft void Fn evdns_search_clear +Clears the list of search domains +.It Ft void Fn evdns_search_add "const char *domain" +Add a domain to the list of search domains +.It Ft void Fn evdns_search_ndots_set "int ndots" +Set the number of dots which, when found in a name, causes +the first query to be without any search domain. +.It Ft int Fn evdns_count_nameservers "void" +Return the number of configured nameservers (not necessarily the +number of running nameservers). This is useful for double-checking +whether our calls to the various nameserver configuration functions +have been successful. +.It Ft int Fn evdns_clear_nameservers_and_suspend "void" +Remove all currently configured nameservers, and suspend all pending +resolves. Resolves will not necessarily be re-attempted until +evdns_resume() is called. +.It Ft int Fn evdns_resume "void" +Re-attempt resolves left in limbo after an earlier call to +evdns_clear_nameservers_and_suspend(). +.It Ft int Fn evdns_config_windows_nameservers "void" +Attempt to configure a set of nameservers based on platform settings on +a win32 host. Preferentially tries to use GetNetworkParams; if that fails, +looks in the registry. Returns 0 on success, nonzero on failure. +.It Ft int Fn evdns_resolv_conf_parse "int flags" "const char *filename" +Parse a resolv.conf like file from the given filename. +.Pp +See the man page for resolv.conf for the format of this file. +The flags argument determines what information is parsed from +this file: +.Bl -tag -width "DNS_OPTION_NAMESERVERS" -offset indent -compact -nested +.It DNS_OPTION_SEARCH +domain, search and ndots options +.It DNS_OPTION_NAMESERVERS +nameserver lines +.It DNS_OPTION_MISC +timeout and attempts options +.It DNS_OPTIONS_ALL +all of the above +.El +.Pp +The following directives are not parsed from the file: + sortlist, rotate, no-check-names, inet6, debug +.Pp +Returns non-zero on error: +.Bl -tag -width "0" -offset indent -compact -nested +.It 0 +no errors +.It 1 +failed to open file +.It 2 +failed to stat file +.It 3 +file too large +.It 4 +out of memory +.It 5 +short read from file +.El +.El +.Sh Internals: +Requests are kept in two queues. The first is the inflight queue. In +this queue requests have an allocated transaction id and nameserver. +They will soon be transmitted if they haven't already been. +.Pp +The second is the waiting queue. The size of the inflight ring is +limited and all other requests wait in waiting queue for space. This +bounds the number of concurrent requests so that we don't flood the +nameserver. Several algorithms require a full walk of the inflight +queue and so bounding its size keeps thing going nicely under huge +(many thousands of requests) loads. +.Pp +If a nameserver loses too many requests it is considered down and we +try not to use it. After a while we send a probe to that nameserver +(a lookup for google.com) and, if it replies, we consider it working +again. If the nameserver fails a probe we wait longer to try again +with the next probe. +.Sh SEE ALSO +.Xr event 3 , +.Xr gethostbyname 3 , +.Xr resolv.conf 5 +.Sh HISTORY +The +.Nm evdns +API was developed by Adam Langley on top of the +.Nm libevent +API. +The code was integrate into +.Nm Tor +by Nick Mathewson and finally put into +.Nm libevent +itself by Niels Provos. +.Sh AUTHORS +The +.Nm evdns +API and code was written by Adam Langley with significant +contributions by Nick Mathewson. +.Sh BUGS +This documentation is neither complete nor authoritative. +If you are in doubt about the usage of this API then +check the source code to find out how it works, write +up the missing piece of documentation and send it to +me for inclusion in this man page. diff --git a/probe-busybox/libevent-2.1.11-stable/evdns.c b/probe-busybox/libevent-2.1.11-stable/evdns.c new file mode 100644 index 00000000..11a50702 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evdns.c @@ -0,0 +1,4887 @@ +/* Copyright 2006-2007 Niels Provos + * Copyright 2007-2012 Nick Mathewson and Niels Provos + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Based on software by Adam Langly. Adam's original message: + * + * Async DNS Library + * Adam Langley + * http://www.imperialviolet.org/eventdns.html + * Public Domain code + * + * This software is Public Domain. To view a copy of the public domain dedication, + * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to + * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA. + * + * I ask and expect, but do not require, that all derivative works contain an + * attribution similar to: + * Parts developed by Adam Langley + * + * You may wish to replace the word "Parts" with something else depending on + * the amount of original code. + * + * (Derivative works does not include programs which link against, run or include + * the source verbatim in their source distributions) + * + * Version: 0.1b + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include + +#ifndef _FORTIFY_SOURCE +#define _FORTIFY_SOURCE 3 +#endif + +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#ifdef EVENT__HAVE_STDINT_H +#include +#endif +#include +#include +#include +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif +#include +#include +#include +#include +#ifdef _WIN32 +#include +#include +#include +#ifndef _WIN32_IE +#define _WIN32_IE 0x400 +#endif +#include +#endif + +#include "event2/dns.h" +#include "event2/dns_struct.h" +#include "event2/dns_compat.h" +#include "event2/util.h" +#include "event2/event.h" +#include "event2/event_struct.h" +#include "event2/thread.h" + +#include "defer-internal.h" +#include "log-internal.h" +#include "mm-internal.h" +#include "strlcpy-internal.h" +#include "ipv6-internal.h" +#include "util-internal.h" +#include "evthread-internal.h" +#ifdef _WIN32 +#include +#include +#include +#include +#include +#else +#include +#include +#include +#endif + +#ifdef EVENT__HAVE_NETINET_IN6_H +#include +#endif + +#define EVDNS_LOG_DEBUG EVENT_LOG_DEBUG +#define EVDNS_LOG_WARN EVENT_LOG_WARN +#define EVDNS_LOG_MSG EVENT_LOG_MSG + +#ifndef HOST_NAME_MAX +#define HOST_NAME_MAX 255 +#endif + +#include + +#undef MIN +#define MIN(a,b) ((a)<(b)?(a):(b)) + +#define ASSERT_VALID_REQUEST(req) \ + EVUTIL_ASSERT((req)->handle && (req)->handle->current_req == (req)) + +#define u64 ev_uint64_t +#define u32 ev_uint32_t +#define u16 ev_uint16_t +#define u8 ev_uint8_t + +/* maximum number of addresses from a single packet */ +/* that we bother recording */ +#define MAX_V4_ADDRS 32 +#define MAX_V6_ADDRS 32 + + +#define TYPE_A EVDNS_TYPE_A +#define TYPE_CNAME 5 +#define TYPE_PTR EVDNS_TYPE_PTR +#define TYPE_SOA EVDNS_TYPE_SOA +#define TYPE_AAAA EVDNS_TYPE_AAAA + +#define CLASS_INET EVDNS_CLASS_INET + +/* Persistent handle. We keep this separate from 'struct request' since we + * need some object to last for as long as an evdns_request is outstanding so + * that it can be canceled, whereas a search request can lead to multiple + * 'struct request' instances being created over its lifetime. */ +struct evdns_request { + struct request *current_req; + struct evdns_base *base; + + int pending_cb; /* Waiting for its callback to be invoked; not + * owned by event base any more. */ + + /* elements used by the searching code */ + int search_index; + struct search_state *search_state; + char *search_origname; /* needs to be free()ed */ + int search_flags; +}; + +struct request { + u8 *request; /* the dns packet data */ + u8 request_type; /* TYPE_PTR or TYPE_A or TYPE_AAAA */ + unsigned int request_len; + int reissue_count; + int tx_count; /* the number of times that this packet has been sent */ + void *user_pointer; /* the pointer given to us for this request */ + evdns_callback_type user_callback; + struct nameserver *ns; /* the server which we last sent it */ + + /* these objects are kept in a circular list */ + /* XXX We could turn this into a CIRCLEQ. */ + struct request *next, *prev; + + struct event timeout_event; + + u16 trans_id; /* the transaction id */ + unsigned request_appended :1; /* true if the request pointer is data which follows this struct */ + unsigned transmit_me :1; /* needs to be transmitted */ + + /* XXXX This is a horrible hack. */ + char **put_cname_in_ptr; /* store the cname here if we get one. */ + + struct evdns_base *base; + + struct evdns_request *handle; +}; + +struct reply { + unsigned int type; + unsigned int have_answer : 1; + union { + struct { + u32 addrcount; + u32 addresses[MAX_V4_ADDRS]; + } a; + struct { + u32 addrcount; + struct in6_addr addresses[MAX_V6_ADDRS]; + } aaaa; + struct { + char name[HOST_NAME_MAX]; + } ptr; + } data; +}; + +struct nameserver { + evutil_socket_t socket; /* a connected UDP socket */ + struct sockaddr_storage address; + ev_socklen_t addrlen; + int failed_times; /* number of times which we have given this server a chance */ + int timedout; /* number of times in a row a request has timed out */ + struct event event; + /* these objects are kept in a circular list */ + struct nameserver *next, *prev; + struct event timeout_event; /* used to keep the timeout for */ + /* when we next probe this server. */ + /* Valid if state == 0 */ + /* Outstanding probe request for this nameserver, if any */ + struct evdns_request *probe_request; + char state; /* zero if we think that this server is down */ + char choked; /* true if we have an EAGAIN from this server's socket */ + char write_waiting; /* true if we are waiting for EV_WRITE events */ + struct evdns_base *base; + + /* Number of currently inflight requests: used + * to track when we should add/del the event. */ + int requests_inflight; +}; + + +/* Represents a local port where we're listening for DNS requests. Right now, */ +/* only UDP is supported. */ +struct evdns_server_port { + evutil_socket_t socket; /* socket we use to read queries and write replies. */ + int refcnt; /* reference count. */ + char choked; /* Are we currently blocked from writing? */ + char closing; /* Are we trying to close this port, pending writes? */ + evdns_request_callback_fn_type user_callback; /* Fn to handle requests */ + void *user_data; /* Opaque pointer passed to user_callback */ + struct event event; /* Read/write event */ + /* circular list of replies that we want to write. */ + struct server_request *pending_replies; + struct event_base *event_base; + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + void *lock; +#endif +}; + +/* Represents part of a reply being built. (That is, a single RR.) */ +struct server_reply_item { + struct server_reply_item *next; /* next item in sequence. */ + char *name; /* name part of the RR */ + u16 type; /* The RR type */ + u16 class; /* The RR class (usually CLASS_INET) */ + u32 ttl; /* The RR TTL */ + char is_name; /* True iff data is a label */ + u16 datalen; /* Length of data; -1 if data is a label */ + void *data; /* The contents of the RR */ +}; + +/* Represents a request that we've received as a DNS server, and holds */ +/* the components of the reply as we're constructing it. */ +struct server_request { + /* Pointers to the next and previous entries on the list of replies */ + /* that we're waiting to write. Only set if we have tried to respond */ + /* and gotten EAGAIN. */ + struct server_request *next_pending; + struct server_request *prev_pending; + + u16 trans_id; /* Transaction id. */ + struct evdns_server_port *port; /* Which port received this request on? */ + struct sockaddr_storage addr; /* Where to send the response */ + ev_socklen_t addrlen; /* length of addr */ + + int n_answer; /* how many answer RRs have been set? */ + int n_authority; /* how many authority RRs have been set? */ + int n_additional; /* how many additional RRs have been set? */ + + struct server_reply_item *answer; /* linked list of answer RRs */ + struct server_reply_item *authority; /* linked list of authority RRs */ + struct server_reply_item *additional; /* linked list of additional RRs */ + + /* Constructed response. Only set once we're ready to send a reply. */ + /* Once this is set, the RR fields are cleared, and no more should be set. */ + char *response; + size_t response_len; + + /* Caller-visible fields: flags, questions. */ + struct evdns_server_request base; +}; + +struct evdns_base { + /* An array of n_req_heads circular lists for inflight requests. + * Each inflight request req is in req_heads[req->trans_id % n_req_heads]. + */ + struct request **req_heads; + /* A circular list of requests that we're waiting to send, but haven't + * sent yet because there are too many requests inflight */ + struct request *req_waiting_head; + /* A circular list of nameservers. */ + struct nameserver *server_head; + int n_req_heads; + + struct event_base *event_base; + + char *interface_name; + + /* The number of good nameservers that we have */ + int global_good_nameservers; + + /* inflight requests are contained in the req_head list */ + /* and are actually going out across the network */ + int global_requests_inflight; + /* requests which aren't inflight are in the waiting list */ + /* and are counted here */ + int global_requests_waiting; + + int global_max_requests_inflight; + + struct timeval global_timeout; /* 5 seconds by default */ + int global_max_reissues; /* a reissue occurs when we get some errors from the server */ + int global_max_retransmits; /* number of times we'll retransmit a request which timed out */ + /* number of timeouts in a row before we consider this server to be down */ + int global_max_nameserver_timeout; + /* true iff we will use the 0x20 hack to prevent poisoning attacks. */ + int global_randomize_case; + + /* The first time that a nameserver fails, how long do we wait before + * probing to see if it has returned? */ + struct timeval global_nameserver_probe_initial_timeout; + + /** Port to bind to for outgoing DNS packets. */ + struct sockaddr_storage global_outgoing_address; + /** ev_socklen_t for global_outgoing_address. 0 if it isn't set. */ + ev_socklen_t global_outgoing_addrlen; + + struct timeval global_getaddrinfo_allow_skew; + + int so_rcvbuf; + int so_sndbuf; + + int getaddrinfo_ipv4_timeouts; + int getaddrinfo_ipv6_timeouts; + int getaddrinfo_ipv4_answered; + int getaddrinfo_ipv6_answered; + + struct search_state *global_search_state; + + TAILQ_HEAD(hosts_list, hosts_entry) hostsdb; + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + void *lock; +#endif + + int disable_when_inactive; +}; + +struct hosts_entry { + TAILQ_ENTRY(hosts_entry) next; + union { + struct sockaddr sa; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; + } addr; + int addrlen; + char hostname[1]; +}; + +static struct evdns_base *current_base = NULL; + +struct evdns_base * +evdns_get_global_base(void) +{ + return current_base; +} + +/* Given a pointer to an evdns_server_request, get the corresponding */ +/* server_request. */ +#define TO_SERVER_REQUEST(base_ptr) \ + ((struct server_request*) \ + (((char*)(base_ptr) - evutil_offsetof(struct server_request, base)))) + +#define REQ_HEAD(base, id) ((base)->req_heads[id % (base)->n_req_heads]) + +static struct nameserver *nameserver_pick(struct evdns_base *base); +static void evdns_request_insert(struct request *req, struct request **head); +static void evdns_request_remove(struct request *req, struct request **head); +static void nameserver_ready_callback(evutil_socket_t fd, short events, void *arg); +static int evdns_transmit(struct evdns_base *base); +static int evdns_request_transmit(struct request *req); +static void nameserver_send_probe(struct nameserver *const ns); +static void search_request_finished(struct evdns_request *const); +static int search_try_next(struct evdns_request *const req); +static struct request *search_request_new(struct evdns_base *base, struct evdns_request *handle, int type, const char *const name, int flags, evdns_callback_type user_callback, void *user_arg); +static void evdns_requests_pump_waiting_queue(struct evdns_base *base); +static u16 transaction_id_pick(struct evdns_base *base); +static struct request *request_new(struct evdns_base *base, struct evdns_request *handle, int type, const char *name, int flags, evdns_callback_type callback, void *ptr); +static void request_submit(struct request *const req); + +static int server_request_free(struct server_request *req); +static void server_request_free_answers(struct server_request *req); +static void server_port_free(struct evdns_server_port *port); +static void server_port_ready_callback(evutil_socket_t fd, short events, void *arg); +static int evdns_base_resolv_conf_parse_impl(struct evdns_base *base, int flags, const char *const filename); +static int evdns_base_set_option_impl(struct evdns_base *base, + const char *option, const char *val, int flags); +static void evdns_base_free_and_unlock(struct evdns_base *base, int fail_requests); +static void evdns_request_timeout_callback(evutil_socket_t fd, short events, void *arg); + +static int strtoint(const char *const str); + +#ifdef EVENT__DISABLE_THREAD_SUPPORT +#define EVDNS_LOCK(base) EVUTIL_NIL_STMT_ +#define EVDNS_UNLOCK(base) EVUTIL_NIL_STMT_ +#define ASSERT_LOCKED(base) EVUTIL_NIL_STMT_ +#else +#define EVDNS_LOCK(base) \ + EVLOCK_LOCK((base)->lock, 0) +#define EVDNS_UNLOCK(base) \ + EVLOCK_UNLOCK((base)->lock, 0) +#define ASSERT_LOCKED(base) \ + EVLOCK_ASSERT_LOCKED((base)->lock) +#endif + +static evdns_debug_log_fn_type evdns_log_fn = NULL; + +void +evdns_set_log_fn(evdns_debug_log_fn_type fn) +{ + evdns_log_fn = fn; +} + +#ifdef __GNUC__ +#define EVDNS_LOG_CHECK __attribute__ ((format(printf, 2, 3))) +#else +#define EVDNS_LOG_CHECK +#endif + +static void evdns_log_(int severity, const char *fmt, ...) EVDNS_LOG_CHECK; +static void +evdns_log_(int severity, const char *fmt, ...) +{ + va_list args; + va_start(args,fmt); + if (evdns_log_fn) { + char buf[512]; + int is_warn = (severity == EVDNS_LOG_WARN); + evutil_vsnprintf(buf, sizeof(buf), fmt, args); + evdns_log_fn(is_warn, buf); + } else { + event_logv_(severity, NULL, fmt, args); + } + va_end(args); +} + +#define log evdns_log_ + +/* This walks the list of inflight requests to find the */ +/* one with a matching transaction id. Returns NULL on */ +/* failure */ +static struct request * +request_find_from_trans_id(struct evdns_base *base, u16 trans_id) { + struct request *req = REQ_HEAD(base, trans_id); + struct request *const started_at = req; + + ASSERT_LOCKED(base); + + if (req) { + do { + if (req->trans_id == trans_id) return req; + req = req->next; + } while (req != started_at); + } + + return NULL; +} + +/* a libevent callback function which is called when a nameserver */ +/* has gone down and we want to test if it has came back to life yet */ +static void +nameserver_prod_callback(evutil_socket_t fd, short events, void *arg) { + struct nameserver *const ns = (struct nameserver *) arg; + (void)fd; + (void)events; + + EVDNS_LOCK(ns->base); + nameserver_send_probe(ns); + EVDNS_UNLOCK(ns->base); +} + +/* a libevent callback which is called when a nameserver probe (to see if */ +/* it has come back to life) times out. We increment the count of failed_times */ +/* and wait longer to send the next probe packet. */ +static void +nameserver_probe_failed(struct nameserver *const ns) { + struct timeval timeout; + int i; + + ASSERT_LOCKED(ns->base); + (void) evtimer_del(&ns->timeout_event); + if (ns->state == 1) { + /* This can happen if the nameserver acts in a way which makes us mark */ + /* it as bad and then starts sending good replies. */ + return; + } + +#define MAX_PROBE_TIMEOUT 3600 +#define TIMEOUT_BACKOFF_FACTOR 3 + + memcpy(&timeout, &ns->base->global_nameserver_probe_initial_timeout, + sizeof(struct timeval)); + for (i=ns->failed_times; i > 0 && timeout.tv_sec < MAX_PROBE_TIMEOUT; --i) { + timeout.tv_sec *= TIMEOUT_BACKOFF_FACTOR; + timeout.tv_usec *= TIMEOUT_BACKOFF_FACTOR; + if (timeout.tv_usec > 1000000) { + timeout.tv_sec += timeout.tv_usec / 1000000; + timeout.tv_usec %= 1000000; + } + } + if (timeout.tv_sec > MAX_PROBE_TIMEOUT) { + timeout.tv_sec = MAX_PROBE_TIMEOUT; + timeout.tv_usec = 0; + } + + ns->failed_times++; + + if (evtimer_add(&ns->timeout_event, &timeout) < 0) { + char addrbuf[128]; + log(EVDNS_LOG_WARN, + "Error from libevent when adding timer event for %s", + evutil_format_sockaddr_port_( + (struct sockaddr *)&ns->address, + addrbuf, sizeof(addrbuf))); + } +} + +static void +request_swap_ns(struct request *req, struct nameserver *ns) { + if (ns && req->ns != ns) { + EVUTIL_ASSERT(req->ns->requests_inflight > 0); + req->ns->requests_inflight--; + ns->requests_inflight++; + + req->ns = ns; + } +} + +/* called when a nameserver has been deemed to have failed. For example, too */ +/* many packets have timed out etc */ +static void +nameserver_failed(struct nameserver *const ns, const char *msg) { + struct request *req, *started_at; + struct evdns_base *base = ns->base; + int i; + char addrbuf[128]; + + ASSERT_LOCKED(base); + /* if this nameserver has already been marked as failed */ + /* then don't do anything */ + if (!ns->state) return; + + log(EVDNS_LOG_MSG, "Nameserver %s has failed: %s", + evutil_format_sockaddr_port_( + (struct sockaddr *)&ns->address, + addrbuf, sizeof(addrbuf)), + msg); + + base->global_good_nameservers--; + EVUTIL_ASSERT(base->global_good_nameservers >= 0); + if (base->global_good_nameservers == 0) { + log(EVDNS_LOG_MSG, "All nameservers have failed"); + } + + ns->state = 0; + ns->failed_times = 1; + + if (evtimer_add(&ns->timeout_event, + &base->global_nameserver_probe_initial_timeout) < 0) { + log(EVDNS_LOG_WARN, + "Error from libevent when adding timer event for %s", + evutil_format_sockaddr_port_( + (struct sockaddr *)&ns->address, + addrbuf, sizeof(addrbuf))); + /* ???? Do more? */ + } + + /* walk the list of inflight requests to see if any can be reassigned to */ + /* a different server. Requests in the waiting queue don't have a */ + /* nameserver assigned yet */ + + /* if we don't have *any* good nameservers then there's no point */ + /* trying to reassign requests to one */ + if (!base->global_good_nameservers) return; + + for (i = 0; i < base->n_req_heads; ++i) { + req = started_at = base->req_heads[i]; + if (req) { + do { + if (req->tx_count == 0 && req->ns == ns) { + /* still waiting to go out, can be moved */ + /* to another server */ + request_swap_ns(req, nameserver_pick(base)); + } + req = req->next; + } while (req != started_at); + } + } +} + +static void +nameserver_up(struct nameserver *const ns) +{ + char addrbuf[128]; + ASSERT_LOCKED(ns->base); + if (ns->state) return; + log(EVDNS_LOG_MSG, "Nameserver %s is back up", + evutil_format_sockaddr_port_( + (struct sockaddr *)&ns->address, + addrbuf, sizeof(addrbuf))); + evtimer_del(&ns->timeout_event); + if (ns->probe_request) { + evdns_cancel_request(ns->base, ns->probe_request); + ns->probe_request = NULL; + } + ns->state = 1; + ns->failed_times = 0; + ns->timedout = 0; + ns->base->global_good_nameservers++; +} + +static void +request_trans_id_set(struct request *const req, const u16 trans_id) { + req->trans_id = trans_id; + *((u16 *) req->request) = htons(trans_id); +} + +/* Called to remove a request from a list and dealloc it. */ +/* head is a pointer to the head of the list it should be */ +/* removed from or NULL if the request isn't in a list. */ +/* when free_handle is one, free the handle as well. */ +static void +request_finished(struct request *const req, struct request **head, int free_handle) { + struct evdns_base *base = req->base; + int was_inflight = (head != &base->req_waiting_head); + EVDNS_LOCK(base); + ASSERT_VALID_REQUEST(req); + + if (head) + evdns_request_remove(req, head); + + log(EVDNS_LOG_DEBUG, "Removing timeout for request %p", req); + if (was_inflight) { + evtimer_del(&req->timeout_event); + base->global_requests_inflight--; + req->ns->requests_inflight--; + } else { + base->global_requests_waiting--; + } + /* it was initialized during request_new / evtimer_assign */ + event_debug_unassign(&req->timeout_event); + + if (req->ns && + req->ns->requests_inflight == 0 && + req->base->disable_when_inactive) { + event_del(&req->ns->event); + evtimer_del(&req->ns->timeout_event); + } + + if (!req->request_appended) { + /* need to free the request data on it's own */ + mm_free(req->request); + } else { + /* the request data is appended onto the header */ + /* so everything gets free()ed when we: */ + } + + if (req->handle) { + EVUTIL_ASSERT(req->handle->current_req == req); + + if (free_handle) { + search_request_finished(req->handle); + req->handle->current_req = NULL; + if (! req->handle->pending_cb) { + /* If we're planning to run the callback, + * don't free the handle until later. */ + mm_free(req->handle); + } + req->handle = NULL; /* If we have a bug, let's crash + * early */ + } else { + req->handle->current_req = NULL; + } + } + + mm_free(req); + + evdns_requests_pump_waiting_queue(base); + EVDNS_UNLOCK(base); +} + +/* This is called when a server returns a funny error code. */ +/* We try the request again with another server. */ +/* */ +/* return: */ +/* 0 ok */ +/* 1 failed/reissue is pointless */ +static int +request_reissue(struct request *req) { + const struct nameserver *const last_ns = req->ns; + ASSERT_LOCKED(req->base); + ASSERT_VALID_REQUEST(req); + /* the last nameserver should have been marked as failing */ + /* by the caller of this function, therefore pick will try */ + /* not to return it */ + request_swap_ns(req, nameserver_pick(req->base)); + if (req->ns == last_ns) { + /* ... but pick did return it */ + /* not a lot of point in trying again with the */ + /* same server */ + return 1; + } + + req->reissue_count++; + req->tx_count = 0; + req->transmit_me = 1; + + return 0; +} + +/* this function looks for space on the inflight queue and promotes */ +/* requests from the waiting queue if it can. */ +/* */ +/* TODO: */ +/* add return code, see at nameserver_pick() and other functions. */ +static void +evdns_requests_pump_waiting_queue(struct evdns_base *base) { + ASSERT_LOCKED(base); + while (base->global_requests_inflight < base->global_max_requests_inflight && + base->global_requests_waiting) { + struct request *req; + + EVUTIL_ASSERT(base->req_waiting_head); + req = base->req_waiting_head; + + req->ns = nameserver_pick(base); + if (!req->ns) + return; + + /* move a request from the waiting queue to the inflight queue */ + req->ns->requests_inflight++; + + evdns_request_remove(req, &base->req_waiting_head); + + base->global_requests_waiting--; + base->global_requests_inflight++; + + request_trans_id_set(req, transaction_id_pick(base)); + + evdns_request_insert(req, &REQ_HEAD(base, req->trans_id)); + evdns_request_transmit(req); + evdns_transmit(base); + } +} + +/* TODO(nickm) document */ +struct deferred_reply_callback { + struct event_callback deferred; + struct evdns_request *handle; + u8 request_type; + u8 have_reply; + u32 ttl; + u32 err; + evdns_callback_type user_callback; + struct reply reply; +}; + +static void +reply_run_callback(struct event_callback *d, void *user_pointer) +{ + struct deferred_reply_callback *cb = + EVUTIL_UPCAST(d, struct deferred_reply_callback, deferred); + + switch (cb->request_type) { + case TYPE_A: + if (cb->have_reply) + cb->user_callback(DNS_ERR_NONE, DNS_IPv4_A, + cb->reply.data.a.addrcount, cb->ttl, + cb->reply.data.a.addresses, + user_pointer); + else + cb->user_callback(cb->err, 0, 0, cb->ttl, NULL, user_pointer); + break; + case TYPE_PTR: + if (cb->have_reply) { + char *name = cb->reply.data.ptr.name; + cb->user_callback(DNS_ERR_NONE, DNS_PTR, 1, cb->ttl, + &name, user_pointer); + } else { + cb->user_callback(cb->err, 0, 0, cb->ttl, NULL, user_pointer); + } + break; + case TYPE_AAAA: + if (cb->have_reply) + cb->user_callback(DNS_ERR_NONE, DNS_IPv6_AAAA, + cb->reply.data.aaaa.addrcount, cb->ttl, + cb->reply.data.aaaa.addresses, + user_pointer); + else + cb->user_callback(cb->err, 0, 0, cb->ttl, NULL, user_pointer); + break; + default: + EVUTIL_ASSERT(0); + } + + if (cb->handle && cb->handle->pending_cb) { + mm_free(cb->handle); + } + + mm_free(cb); +} + +static void +reply_schedule_callback(struct request *const req, u32 ttl, u32 err, struct reply *reply) +{ + struct deferred_reply_callback *d = mm_calloc(1, sizeof(*d)); + + if (!d) { + event_warn("%s: Couldn't allocate space for deferred callback.", + __func__); + return; + } + + ASSERT_LOCKED(req->base); + + d->request_type = req->request_type; + d->user_callback = req->user_callback; + d->ttl = ttl; + d->err = err; + if (reply) { + d->have_reply = 1; + memcpy(&d->reply, reply, sizeof(struct reply)); + } + + if (req->handle) { + req->handle->pending_cb = 1; + d->handle = req->handle; + } + + event_deferred_cb_init_( + &d->deferred, + event_get_priority(&req->timeout_event), + reply_run_callback, + req->user_pointer); + event_deferred_cb_schedule_( + req->base->event_base, + &d->deferred); +} + + +#define _QR_MASK 0x8000U +#define _OP_MASK 0x7800U +#define _AA_MASK 0x0400U +#define _TC_MASK 0x0200U +#define _RD_MASK 0x0100U +#define _RA_MASK 0x0080U +#define _Z_MASK 0x0040U +#define _AD_MASK 0x0020U +#define _CD_MASK 0x0010U +#define _RCODE_MASK 0x000fU +#define _Z_MASK_DEPRECATED 0x0070U + +/* this processes a parsed reply packet */ +static void +reply_handle(struct request *const req, u16 flags, u32 ttl, struct reply *reply) { + int error; + char addrbuf[128]; + static const int error_codes[] = { + DNS_ERR_FORMAT, DNS_ERR_SERVERFAILED, DNS_ERR_NOTEXIST, + DNS_ERR_NOTIMPL, DNS_ERR_REFUSED + }; + + ASSERT_LOCKED(req->base); + ASSERT_VALID_REQUEST(req); + + if (flags & (_RCODE_MASK | _TC_MASK) || !reply || !reply->have_answer) { + /* there was an error */ + if (flags & _TC_MASK) { + error = DNS_ERR_TRUNCATED; + } else if (flags & _RCODE_MASK) { + u16 error_code = (flags & _RCODE_MASK) - 1; + if (error_code > 4) { + error = DNS_ERR_UNKNOWN; + } else { + error = error_codes[error_code]; + } + } else if (reply && !reply->have_answer) { + error = DNS_ERR_NODATA; + } else { + error = DNS_ERR_UNKNOWN; + } + + switch (error) { + case DNS_ERR_NOTIMPL: + case DNS_ERR_REFUSED: + /* we regard these errors as marking a bad nameserver */ + if (req->reissue_count < req->base->global_max_reissues) { + char msg[64]; + evutil_snprintf(msg, sizeof(msg), "Bad response %d (%s)", + error, evdns_err_to_string(error)); + nameserver_failed(req->ns, msg); + if (!request_reissue(req)) return; + } + break; + case DNS_ERR_SERVERFAILED: + /* rcode 2 (servfailed) sometimes means "we + * are broken" and sometimes (with some binds) + * means "that request was very confusing." + * Treat this as a timeout, not a failure. + */ + log(EVDNS_LOG_DEBUG, "Got a SERVERFAILED from nameserver" + "at %s; will allow the request to time out.", + evutil_format_sockaddr_port_( + (struct sockaddr *)&req->ns->address, + addrbuf, sizeof(addrbuf))); + /* Call the timeout function */ + evdns_request_timeout_callback(0, 0, req); + return; + default: + /* we got a good reply from the nameserver: it is up. */ + if (req->handle == req->ns->probe_request) { + /* Avoid double-free */ + req->ns->probe_request = NULL; + } + + nameserver_up(req->ns); + } + + if (req->handle->search_state && + req->request_type != TYPE_PTR) { + /* if we have a list of domains to search in, + * try the next one */ + if (!search_try_next(req->handle)) { + /* a new request was issued so this + * request is finished and */ + /* the user callback will be made when + * that request (or a */ + /* child of it) finishes. */ + return; + } + } + + /* all else failed. Pass the failure up */ + reply_schedule_callback(req, ttl, error, NULL); + request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1); + } else { + /* all ok, tell the user */ + reply_schedule_callback(req, ttl, 0, reply); + if (req->handle == req->ns->probe_request) + req->ns->probe_request = NULL; /* Avoid double-free */ + nameserver_up(req->ns); + request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1); + } +} + +static int +name_parse(u8 *packet, int length, int *idx, char *name_out, int name_out_len) { + int name_end = -1; + int j = *idx; + int ptr_count = 0; +#define GET32(x) do { if (j + 4 > length) goto err; memcpy(&t32_, packet + j, 4); j += 4; x = ntohl(t32_); } while (0) +#define GET16(x) do { if (j + 2 > length) goto err; memcpy(&t_, packet + j, 2); j += 2; x = ntohs(t_); } while (0) +#define GET8(x) do { if (j >= length) goto err; x = packet[j++]; } while (0) + + char *cp = name_out; + const char *const end = name_out + name_out_len; + + /* Normally, names are a series of length prefixed strings terminated */ + /* with a length of 0 (the lengths are u8's < 63). */ + /* However, the length can start with a pair of 1 bits and that */ + /* means that the next 14 bits are a pointer within the current */ + /* packet. */ + + for (;;) { + u8 label_len; + GET8(label_len); + if (!label_len) break; + if (label_len & 0xc0) { + u8 ptr_low; + GET8(ptr_low); + if (name_end < 0) name_end = j; + j = (((int)label_len & 0x3f) << 8) + ptr_low; + /* Make sure that the target offset is in-bounds. */ + if (j < 0 || j >= length) return -1; + /* If we've jumped more times than there are characters in the + * message, we must have a loop. */ + if (++ptr_count > length) return -1; + continue; + } + if (label_len > 63) return -1; + if (cp != name_out) { + if (cp + 1 >= end) return -1; + *cp++ = '.'; + } + if (cp + label_len >= end) return -1; + if (j + label_len > length) return -1; + memcpy(cp, packet + j, label_len); + cp += label_len; + j += label_len; + } + if (cp >= end) return -1; + *cp = '\0'; + if (name_end < 0) + *idx = j; + else + *idx = name_end; + return 0; + err: + return -1; +} + +/* parses a raw request from a nameserver */ +static int +reply_parse(struct evdns_base *base, u8 *packet, int length) { + int j = 0, k = 0; /* index into packet */ + u16 t_; /* used by the macros */ + u32 t32_; /* used by the macros */ + char tmp_name[256], cmp_name[256]; /* used by the macros */ + int name_matches = 0; + + u16 trans_id, questions, answers, authority, additional, datalength; + u16 flags = 0; + u32 ttl, ttl_r = 0xffffffff; + struct reply reply; + struct request *req = NULL; + unsigned int i; + + ASSERT_LOCKED(base); + + GET16(trans_id); + GET16(flags); + GET16(questions); + GET16(answers); + GET16(authority); + GET16(additional); + (void) authority; /* suppress "unused variable" warnings. */ + (void) additional; /* suppress "unused variable" warnings. */ + + req = request_find_from_trans_id(base, trans_id); + if (!req) return -1; + EVUTIL_ASSERT(req->base == base); + + memset(&reply, 0, sizeof(reply)); + + /* If it's not an answer, it doesn't correspond to any request. */ + if (!(flags & _QR_MASK)) return -1; /* must be an answer */ + if ((flags & (_RCODE_MASK|_TC_MASK)) && (flags & (_RCODE_MASK|_TC_MASK)) != DNS_ERR_NOTEXIST) { + /* there was an error and it's not NXDOMAIN */ + goto err; + } + /* if (!answers) return; */ /* must have an answer of some form */ + + /* This macro skips a name in the DNS reply. */ +#define SKIP_NAME \ + do { tmp_name[0] = '\0'; \ + if (name_parse(packet, length, &j, tmp_name, \ + sizeof(tmp_name))<0) \ + goto err; \ + } while (0) + + reply.type = req->request_type; + + /* skip over each question in the reply */ + for (i = 0; i < questions; ++i) { + /* the question looks like + * + */ + tmp_name[0] = '\0'; + cmp_name[0] = '\0'; + k = j; + if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name)) < 0) + goto err; + if (name_parse(req->request, req->request_len, &k, + cmp_name, sizeof(cmp_name))<0) + goto err; + if (!base->global_randomize_case) { + if (strcmp(tmp_name, cmp_name) == 0) + name_matches = 1; + } else { + if (evutil_ascii_strcasecmp(tmp_name, cmp_name) == 0) + name_matches = 1; + } + + j += 4; + if (j > length) + goto err; + } + + if (!name_matches) + goto err; + + /* now we have the answer section which looks like + * + */ + + for (i = 0; i < answers; ++i) { + u16 type, class; + + SKIP_NAME; + GET16(type); + GET16(class); + GET32(ttl); + GET16(datalength); + + if (type == TYPE_A && class == CLASS_INET) { + int addrcount, addrtocopy; + if (req->request_type != TYPE_A) { + j += datalength; continue; + } + if ((datalength & 3) != 0) /* not an even number of As. */ + goto err; + addrcount = datalength >> 2; + addrtocopy = MIN(MAX_V4_ADDRS - reply.data.a.addrcount, (unsigned)addrcount); + + ttl_r = MIN(ttl_r, ttl); + /* we only bother with the first four addresses. */ + if (j + 4*addrtocopy > length) goto err; + memcpy(&reply.data.a.addresses[reply.data.a.addrcount], + packet + j, 4*addrtocopy); + j += 4*addrtocopy; + reply.data.a.addrcount += addrtocopy; + reply.have_answer = 1; + if (reply.data.a.addrcount == MAX_V4_ADDRS) break; + } else if (type == TYPE_PTR && class == CLASS_INET) { + if (req->request_type != TYPE_PTR) { + j += datalength; continue; + } + if (name_parse(packet, length, &j, reply.data.ptr.name, + sizeof(reply.data.ptr.name))<0) + goto err; + ttl_r = MIN(ttl_r, ttl); + reply.have_answer = 1; + break; + } else if (type == TYPE_CNAME) { + char cname[HOST_NAME_MAX]; + if (!req->put_cname_in_ptr || *req->put_cname_in_ptr) { + j += datalength; continue; + } + if (name_parse(packet, length, &j, cname, + sizeof(cname))<0) + goto err; + *req->put_cname_in_ptr = mm_strdup(cname); + } else if (type == TYPE_AAAA && class == CLASS_INET) { + int addrcount, addrtocopy; + if (req->request_type != TYPE_AAAA) { + j += datalength; continue; + } + if ((datalength & 15) != 0) /* not an even number of AAAAs. */ + goto err; + addrcount = datalength >> 4; /* each address is 16 bytes long */ + addrtocopy = MIN(MAX_V6_ADDRS - reply.data.aaaa.addrcount, (unsigned)addrcount); + ttl_r = MIN(ttl_r, ttl); + + /* we only bother with the first four addresses. */ + if (j + 16*addrtocopy > length) goto err; + memcpy(&reply.data.aaaa.addresses[reply.data.aaaa.addrcount], + packet + j, 16*addrtocopy); + reply.data.aaaa.addrcount += addrtocopy; + j += 16*addrtocopy; + reply.have_answer = 1; + if (reply.data.aaaa.addrcount == MAX_V6_ADDRS) break; + } else { + /* skip over any other type of resource */ + j += datalength; + } + } + + if (!reply.have_answer) { + for (i = 0; i < authority; ++i) { + u16 type, class; + SKIP_NAME; + GET16(type); + GET16(class); + GET32(ttl); + GET16(datalength); + if (type == TYPE_SOA && class == CLASS_INET) { + u32 serial, refresh, retry, expire, minimum; + SKIP_NAME; + SKIP_NAME; + GET32(serial); + GET32(refresh); + GET32(retry); + GET32(expire); + GET32(minimum); + (void)expire; + (void)retry; + (void)refresh; + (void)serial; + ttl_r = MIN(ttl_r, ttl); + ttl_r = MIN(ttl_r, minimum); + } else { + /* skip over any other type of resource */ + j += datalength; + } + } + } + + if (ttl_r == 0xffffffff) + ttl_r = 0; + + reply_handle(req, flags, ttl_r, &reply); + return 0; + err: + if (req) + reply_handle(req, flags, 0, NULL); + return -1; +} + +/* Parse a raw request (packet,length) sent to a nameserver port (port) from */ +/* a DNS client (addr,addrlen), and if it's well-formed, call the corresponding */ +/* callback. */ +static int +request_parse(u8 *packet, int length, struct evdns_server_port *port, struct sockaddr *addr, ev_socklen_t addrlen) +{ + int j = 0; /* index into packet */ + u16 t_; /* used by the macros */ + char tmp_name[256]; /* used by the macros */ + + int i; + u16 trans_id, flags, questions, answers, authority, additional; + struct server_request *server_req = NULL; + + ASSERT_LOCKED(port); + + /* Get the header fields */ + GET16(trans_id); + GET16(flags); + GET16(questions); + GET16(answers); + GET16(authority); + GET16(additional); + (void)answers; + (void)additional; + (void)authority; + + if (flags & _QR_MASK) return -1; /* Must not be an answer. */ + flags &= (_RD_MASK|_CD_MASK); /* Only RD and CD get preserved. */ + + server_req = mm_malloc(sizeof(struct server_request)); + if (server_req == NULL) return -1; + memset(server_req, 0, sizeof(struct server_request)); + + server_req->trans_id = trans_id; + memcpy(&server_req->addr, addr, addrlen); + server_req->addrlen = addrlen; + + server_req->base.flags = flags; + server_req->base.nquestions = 0; + server_req->base.questions = mm_calloc(sizeof(struct evdns_server_question *), questions); + if (server_req->base.questions == NULL) + goto err; + + for (i = 0; i < questions; ++i) { + u16 type, class; + struct evdns_server_question *q; + int namelen; + if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name))<0) + goto err; + GET16(type); + GET16(class); + namelen = (int)strlen(tmp_name); + q = mm_malloc(sizeof(struct evdns_server_question) + namelen); + if (!q) + goto err; + q->type = type; + q->dns_question_class = class; + memcpy(q->name, tmp_name, namelen+1); + server_req->base.questions[server_req->base.nquestions++] = q; + } + + /* Ignore answers, authority, and additional. */ + + server_req->port = port; + port->refcnt++; + + /* Only standard queries are supported. */ + if (flags & _OP_MASK) { + evdns_server_request_respond(&(server_req->base), DNS_ERR_NOTIMPL); + return -1; + } + + port->user_callback(&(server_req->base), port->user_data); + + return 0; +err: + if (server_req->base.questions) { + for (i = 0; i < server_req->base.nquestions; ++i) + mm_free(server_req->base.questions[i]); + mm_free(server_req->base.questions); + } + mm_free(server_req); + return -1; + +#undef SKIP_NAME +#undef GET32 +#undef GET16 +#undef GET8 +} + + +void +evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void)) +{ +} + +void +evdns_set_random_bytes_fn(void (*fn)(char *, size_t)) +{ +} + +/* Try to choose a strong transaction id which isn't already in flight */ +static u16 +transaction_id_pick(struct evdns_base *base) { + ASSERT_LOCKED(base); + for (;;) { + u16 trans_id; + evutil_secure_rng_get_bytes(&trans_id, sizeof(trans_id)); + + if (trans_id == 0xffff) continue; + /* now check to see if that id is already inflight */ + if (request_find_from_trans_id(base, trans_id) == NULL) + return trans_id; + } +} + +/* choose a namesever to use. This function will try to ignore */ +/* nameservers which we think are down and load balance across the rest */ +/* by updating the server_head global each time. */ +static struct nameserver * +nameserver_pick(struct evdns_base *base) { + struct nameserver *started_at = base->server_head, *picked; + ASSERT_LOCKED(base); + if (!base->server_head) return NULL; + + /* if we don't have any good nameservers then there's no */ + /* point in trying to find one. */ + if (!base->global_good_nameservers) { + base->server_head = base->server_head->next; + return base->server_head; + } + + /* remember that nameservers are in a circular list */ + for (;;) { + if (base->server_head->state) { + /* we think this server is currently good */ + picked = base->server_head; + base->server_head = base->server_head->next; + return picked; + } + + base->server_head = base->server_head->next; + if (base->server_head == started_at) { + /* all the nameservers seem to be down */ + /* so we just return this one and hope for the */ + /* best */ + EVUTIL_ASSERT(base->global_good_nameservers == 0); + picked = base->server_head; + base->server_head = base->server_head->next; + return picked; + } + } +} + +/* this is called when a namesever socket is ready for reading */ +static void +nameserver_read(struct nameserver *ns) { + struct sockaddr_storage ss; + ev_socklen_t addrlen = sizeof(ss); + u8 packet[1500]; + char addrbuf[128]; + ASSERT_LOCKED(ns->base); + + for (;;) { + const int r = recvfrom(ns->socket, (void*)packet, + sizeof(packet), 0, + (struct sockaddr*)&ss, &addrlen); + if (r < 0) { + int err = evutil_socket_geterror(ns->socket); + if (EVUTIL_ERR_RW_RETRIABLE(err)) + return; + nameserver_failed(ns, + evutil_socket_error_to_string(err)); + return; + } + if (evutil_sockaddr_cmp((struct sockaddr*)&ss, + (struct sockaddr*)&ns->address, 0)) { + log(EVDNS_LOG_WARN, "Address mismatch on received " + "DNS packet. Apparent source was %s", + evutil_format_sockaddr_port_( + (struct sockaddr *)&ss, + addrbuf, sizeof(addrbuf))); + return; + } + + ns->timedout = 0; + reply_parse(ns->base, packet, r); + } +} + +/* Read a packet from a DNS client on a server port s, parse it, and */ +/* act accordingly. */ +static void +server_port_read(struct evdns_server_port *s) { + u8 packet[1500]; + struct sockaddr_storage addr; + ev_socklen_t addrlen; + int r; + ASSERT_LOCKED(s); + + for (;;) { + addrlen = sizeof(struct sockaddr_storage); + r = recvfrom(s->socket, (void*)packet, sizeof(packet), 0, + (struct sockaddr*) &addr, &addrlen); + if (r < 0) { + int err = evutil_socket_geterror(s->socket); + if (EVUTIL_ERR_RW_RETRIABLE(err)) + return; + log(EVDNS_LOG_WARN, + "Error %s (%d) while reading request.", + evutil_socket_error_to_string(err), err); + return; + } + request_parse(packet, r, s, (struct sockaddr*) &addr, addrlen); + } +} + +/* Try to write all pending replies on a given DNS server port. */ +static void +server_port_flush(struct evdns_server_port *port) +{ + struct server_request *req = port->pending_replies; + ASSERT_LOCKED(port); + while (req) { + int r = sendto(port->socket, req->response, (int)req->response_len, 0, + (struct sockaddr*) &req->addr, (ev_socklen_t)req->addrlen); + if (r < 0) { + int err = evutil_socket_geterror(port->socket); + if (EVUTIL_ERR_RW_RETRIABLE(err)) + return; + log(EVDNS_LOG_WARN, "Error %s (%d) while writing response to port; dropping", evutil_socket_error_to_string(err), err); + } + if (server_request_free(req)) { + /* we released the last reference to req->port. */ + return; + } else { + EVUTIL_ASSERT(req != port->pending_replies); + req = port->pending_replies; + } + } + + /* We have no more pending requests; stop listening for 'writeable' events. */ + (void) event_del(&port->event); + event_assign(&port->event, port->event_base, + port->socket, EV_READ | EV_PERSIST, + server_port_ready_callback, port); + + if (event_add(&port->event, NULL) < 0) { + log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server."); + /* ???? Do more? */ + } +} + +/* set if we are waiting for the ability to write to this server. */ +/* if waiting is true then we ask libevent for EV_WRITE events, otherwise */ +/* we stop these events. */ +static void +nameserver_write_waiting(struct nameserver *ns, char waiting) { + ASSERT_LOCKED(ns->base); + if (ns->write_waiting == waiting) return; + + ns->write_waiting = waiting; + (void) event_del(&ns->event); + event_assign(&ns->event, ns->base->event_base, + ns->socket, EV_READ | (waiting ? EV_WRITE : 0) | EV_PERSIST, + nameserver_ready_callback, ns); + if (event_add(&ns->event, NULL) < 0) { + char addrbuf[128]; + log(EVDNS_LOG_WARN, "Error from libevent when adding event for %s", + evutil_format_sockaddr_port_( + (struct sockaddr *)&ns->address, + addrbuf, sizeof(addrbuf))); + /* ???? Do more? */ + } +} + +/* a callback function. Called by libevent when the kernel says that */ +/* a nameserver socket is ready for writing or reading */ +static void +nameserver_ready_callback(evutil_socket_t fd, short events, void *arg) { + struct nameserver *ns = (struct nameserver *) arg; + (void)fd; + + EVDNS_LOCK(ns->base); + if (events & EV_WRITE) { + ns->choked = 0; + if (!evdns_transmit(ns->base)) { + nameserver_write_waiting(ns, 0); + } + } + if (events & EV_READ) { + nameserver_read(ns); + } + EVDNS_UNLOCK(ns->base); +} + +/* a callback function. Called by libevent when the kernel says that */ +/* a server socket is ready for writing or reading. */ +static void +server_port_ready_callback(evutil_socket_t fd, short events, void *arg) { + struct evdns_server_port *port = (struct evdns_server_port *) arg; + (void) fd; + + EVDNS_LOCK(port); + if (events & EV_WRITE) { + port->choked = 0; + server_port_flush(port); + } + if (events & EV_READ) { + server_port_read(port); + } + EVDNS_UNLOCK(port); +} + +/* This is an inefficient representation; only use it via the dnslabel_table_* + * functions, so that is can be safely replaced with something smarter later. */ +#define MAX_LABELS 128 +/* Structures used to implement name compression */ +struct dnslabel_entry { char *v; off_t pos; }; +struct dnslabel_table { + int n_labels; /* number of current entries */ + /* map from name to position in message */ + struct dnslabel_entry labels[MAX_LABELS]; +}; + +/* Initialize dnslabel_table. */ +static void +dnslabel_table_init(struct dnslabel_table *table) +{ + table->n_labels = 0; +} + +/* Free all storage held by table, but not the table itself. */ +static void +dnslabel_clear(struct dnslabel_table *table) +{ + int i; + for (i = 0; i < table->n_labels; ++i) + mm_free(table->labels[i].v); + table->n_labels = 0; +} + +/* return the position of the label in the current message, or -1 if the label */ +/* hasn't been used yet. */ +static int +dnslabel_table_get_pos(const struct dnslabel_table *table, const char *label) +{ + int i; + for (i = 0; i < table->n_labels; ++i) { + if (!strcmp(label, table->labels[i].v)) + return table->labels[i].pos; + } + return -1; +} + +/* remember that we've used the label at position pos */ +static int +dnslabel_table_add(struct dnslabel_table *table, const char *label, off_t pos) +{ + char *v; + int p; + if (table->n_labels == MAX_LABELS) + return (-1); + v = mm_strdup(label); + if (v == NULL) + return (-1); + p = table->n_labels++; + table->labels[p].v = v; + table->labels[p].pos = pos; + + return (0); +} + +/* Converts a string to a length-prefixed set of DNS labels, starting */ +/* at buf[j]. name and buf must not overlap. name_len should be the length */ +/* of name. table is optional, and is used for compression. */ +/* */ +/* Input: abc.def */ +/* Output: <3>abc<3>def<0> */ +/* */ +/* Returns the first index after the encoded name, or negative on error. */ +/* -1 label was > 63 bytes */ +/* -2 name too long to fit in buffer. */ +/* */ +static off_t +dnsname_to_labels(u8 *const buf, size_t buf_len, off_t j, + const char *name, const size_t name_len, + struct dnslabel_table *table) { + const char *end = name + name_len; + int ref = 0; + u16 t_; + +#define APPEND16(x) do { \ + if (j + 2 > (off_t)buf_len) \ + goto overflow; \ + t_ = htons(x); \ + memcpy(buf + j, &t_, 2); \ + j += 2; \ + } while (0) +#define APPEND32(x) do { \ + if (j + 4 > (off_t)buf_len) \ + goto overflow; \ + t32_ = htonl(x); \ + memcpy(buf + j, &t32_, 4); \ + j += 4; \ + } while (0) + + if (name_len > 255) return -2; + + for (;;) { + const char *const start = name; + if (table && (ref = dnslabel_table_get_pos(table, name)) >= 0) { + APPEND16(ref | 0xc000); + return j; + } + name = strchr(name, '.'); + if (!name) { + const size_t label_len = end - start; + if (label_len > 63) return -1; + if ((size_t)(j+label_len+1) > buf_len) return -2; + if (table) dnslabel_table_add(table, start, j); + buf[j++] = (ev_uint8_t)label_len; + + memcpy(buf + j, start, label_len); + j += (int) label_len; + break; + } else { + /* append length of the label. */ + const size_t label_len = name - start; + if (label_len > 63) return -1; + if ((size_t)(j+label_len+1) > buf_len) return -2; + if (table) dnslabel_table_add(table, start, j); + buf[j++] = (ev_uint8_t)label_len; + + memcpy(buf + j, start, label_len); + j += (int) label_len; + /* hop over the '.' */ + name++; + } + } + + /* the labels must be terminated by a 0. */ + /* It's possible that the name ended in a . */ + /* in which case the zero is already there */ + if (!j || buf[j-1]) buf[j++] = 0; + return j; + overflow: + return (-2); +} + +/* Finds the length of a dns request for a DNS name of the given */ +/* length. The actual request may be smaller than the value returned */ +/* here */ +static size_t +evdns_request_len(const size_t name_len) { + return 96 + /* length of the DNS standard header */ + name_len + 2 + + 4; /* space for the resource type */ +} + +/* build a dns request packet into buf. buf should be at least as long */ +/* as evdns_request_len told you it should be. */ +/* */ +/* Returns the amount of space used. Negative on error. */ +static int +evdns_request_data_build(const char *const name, const size_t name_len, + const u16 trans_id, const u16 type, const u16 class, + u8 *const buf, size_t buf_len) { + off_t j = 0; /* current offset into buf */ + u16 t_; /* used by the macros */ + + APPEND16(trans_id); + APPEND16(0x0100); /* standard query, recusion needed */ + APPEND16(1); /* one question */ + APPEND16(0); /* no answers */ + APPEND16(0); /* no authority */ + APPEND16(0); /* no additional */ + + j = dnsname_to_labels(buf, buf_len, j, name, name_len, NULL); + if (j < 0) { + return (int)j; + } + + APPEND16(type); + APPEND16(class); + + return (int)j; + overflow: + return (-1); +} + +/* exported function */ +struct evdns_server_port * +evdns_add_server_port_with_base(struct event_base *base, evutil_socket_t socket, int flags, evdns_request_callback_fn_type cb, void *user_data) +{ + struct evdns_server_port *port; + if (flags) + return NULL; /* flags not yet implemented */ + if (!(port = mm_malloc(sizeof(struct evdns_server_port)))) + return NULL; + memset(port, 0, sizeof(struct evdns_server_port)); + + + port->socket = socket; + port->refcnt = 1; + port->choked = 0; + port->closing = 0; + port->user_callback = cb; + port->user_data = user_data; + port->pending_replies = NULL; + port->event_base = base; + + event_assign(&port->event, port->event_base, + port->socket, EV_READ | EV_PERSIST, + server_port_ready_callback, port); + if (event_add(&port->event, NULL) < 0) { + mm_free(port); + return NULL; + } + EVTHREAD_ALLOC_LOCK(port->lock, EVTHREAD_LOCKTYPE_RECURSIVE); + return port; +} + +struct evdns_server_port * +evdns_add_server_port(evutil_socket_t socket, int flags, evdns_request_callback_fn_type cb, void *user_data) +{ + return evdns_add_server_port_with_base(NULL, socket, flags, cb, user_data); +} + +/* exported function */ +void +evdns_close_server_port(struct evdns_server_port *port) +{ + EVDNS_LOCK(port); + if (--port->refcnt == 0) { + EVDNS_UNLOCK(port); + server_port_free(port); + } else { + port->closing = 1; + EVDNS_UNLOCK(port); + } +} + +/* exported function */ +int +evdns_server_request_add_reply(struct evdns_server_request *req_, int section, const char *name, int type, int class, int ttl, int datalen, int is_name, const char *data) +{ + struct server_request *req = TO_SERVER_REQUEST(req_); + struct server_reply_item **itemp, *item; + int *countp; + int result = -1; + + EVDNS_LOCK(req->port); + if (req->response) /* have we already answered? */ + goto done; + + switch (section) { + case EVDNS_ANSWER_SECTION: + itemp = &req->answer; + countp = &req->n_answer; + break; + case EVDNS_AUTHORITY_SECTION: + itemp = &req->authority; + countp = &req->n_authority; + break; + case EVDNS_ADDITIONAL_SECTION: + itemp = &req->additional; + countp = &req->n_additional; + break; + default: + goto done; + } + while (*itemp) { + itemp = &((*itemp)->next); + } + item = mm_malloc(sizeof(struct server_reply_item)); + if (!item) + goto done; + item->next = NULL; + if (!(item->name = mm_strdup(name))) { + mm_free(item); + goto done; + } + item->type = type; + item->dns_question_class = class; + item->ttl = ttl; + item->is_name = is_name != 0; + item->datalen = 0; + item->data = NULL; + if (data) { + if (item->is_name) { + if (!(item->data = mm_strdup(data))) { + mm_free(item->name); + mm_free(item); + goto done; + } + item->datalen = (u16)-1; + } else { + if (!(item->data = mm_malloc(datalen))) { + mm_free(item->name); + mm_free(item); + goto done; + } + item->datalen = datalen; + memcpy(item->data, data, datalen); + } + } + + *itemp = item; + ++(*countp); + result = 0; +done: + EVDNS_UNLOCK(req->port); + return result; +} + +/* exported function */ +int +evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl) +{ + return evdns_server_request_add_reply( + req, EVDNS_ANSWER_SECTION, name, TYPE_A, CLASS_INET, + ttl, n*4, 0, addrs); +} + +/* exported function */ +int +evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl) +{ + return evdns_server_request_add_reply( + req, EVDNS_ANSWER_SECTION, name, TYPE_AAAA, CLASS_INET, + ttl, n*16, 0, addrs); +} + +/* exported function */ +int +evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl) +{ + u32 a; + char buf[32]; + if (in && inaddr_name) + return -1; + else if (!in && !inaddr_name) + return -1; + if (in) { + a = ntohl(in->s_addr); + evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa", + (int)(u8)((a )&0xff), + (int)(u8)((a>>8 )&0xff), + (int)(u8)((a>>16)&0xff), + (int)(u8)((a>>24)&0xff)); + inaddr_name = buf; + } + return evdns_server_request_add_reply( + req, EVDNS_ANSWER_SECTION, inaddr_name, TYPE_PTR, CLASS_INET, + ttl, -1, 1, hostname); +} + +/* exported function */ +int +evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl) +{ + return evdns_server_request_add_reply( + req, EVDNS_ANSWER_SECTION, name, TYPE_CNAME, CLASS_INET, + ttl, -1, 1, cname); +} + +/* exported function */ +void +evdns_server_request_set_flags(struct evdns_server_request *exreq, int flags) +{ + struct server_request *req = TO_SERVER_REQUEST(exreq); + req->base.flags &= ~(EVDNS_FLAGS_AA|EVDNS_FLAGS_RD); + req->base.flags |= flags; +} + +static int +evdns_server_request_format_response(struct server_request *req, int err) +{ + unsigned char buf[1500]; + size_t buf_len = sizeof(buf); + off_t j = 0, r; + u16 t_; + u32 t32_; + int i; + u16 flags; + struct dnslabel_table table; + + if (err < 0 || err > 15) return -1; + + /* Set response bit and error code; copy OPCODE and RD fields from + * question; copy RA and AA if set by caller. */ + flags = req->base.flags; + flags |= (_QR_MASK | err); + + dnslabel_table_init(&table); + APPEND16(req->trans_id); + APPEND16(flags); + APPEND16(req->base.nquestions); + APPEND16(req->n_answer); + APPEND16(req->n_authority); + APPEND16(req->n_additional); + + /* Add questions. */ + for (i=0; i < req->base.nquestions; ++i) { + const char *s = req->base.questions[i]->name; + j = dnsname_to_labels(buf, buf_len, j, s, strlen(s), &table); + if (j < 0) { + dnslabel_clear(&table); + return (int) j; + } + APPEND16(req->base.questions[i]->type); + APPEND16(req->base.questions[i]->dns_question_class); + } + + /* Add answer, authority, and additional sections. */ + for (i=0; i<3; ++i) { + struct server_reply_item *item; + if (i==0) + item = req->answer; + else if (i==1) + item = req->authority; + else + item = req->additional; + while (item) { + r = dnsname_to_labels(buf, buf_len, j, item->name, strlen(item->name), &table); + if (r < 0) + goto overflow; + j = r; + + APPEND16(item->type); + APPEND16(item->dns_question_class); + APPEND32(item->ttl); + if (item->is_name) { + off_t len_idx = j, name_start; + j += 2; + name_start = j; + r = dnsname_to_labels(buf, buf_len, j, item->data, strlen(item->data), &table); + if (r < 0) + goto overflow; + j = r; + t_ = htons( (short) (j-name_start) ); + memcpy(buf+len_idx, &t_, 2); + } else { + APPEND16(item->datalen); + if (j+item->datalen > (off_t)buf_len) + goto overflow; + memcpy(buf+j, item->data, item->datalen); + j += item->datalen; + } + item = item->next; + } + } + + if (j > 512) { +overflow: + j = 512; + buf[2] |= 0x02; /* set the truncated bit. */ + } + + req->response_len = j; + + if (!(req->response = mm_malloc(req->response_len))) { + server_request_free_answers(req); + dnslabel_clear(&table); + return (-1); + } + memcpy(req->response, buf, req->response_len); + server_request_free_answers(req); + dnslabel_clear(&table); + return (0); +} + +/* exported function */ +int +evdns_server_request_respond(struct evdns_server_request *req_, int err) +{ + struct server_request *req = TO_SERVER_REQUEST(req_); + struct evdns_server_port *port = req->port; + int r = -1; + + EVDNS_LOCK(port); + if (!req->response) { + if ((r = evdns_server_request_format_response(req, err))<0) + goto done; + } + + r = sendto(port->socket, req->response, (int)req->response_len, 0, + (struct sockaddr*) &req->addr, (ev_socklen_t)req->addrlen); + if (r<0) { + int sock_err = evutil_socket_geterror(port->socket); + if (EVUTIL_ERR_RW_RETRIABLE(sock_err)) + goto done; + + if (port->pending_replies) { + req->prev_pending = port->pending_replies->prev_pending; + req->next_pending = port->pending_replies; + req->prev_pending->next_pending = + req->next_pending->prev_pending = req; + } else { + req->prev_pending = req->next_pending = req; + port->pending_replies = req; + port->choked = 1; + + (void) event_del(&port->event); + event_assign(&port->event, port->event_base, port->socket, (port->closing?0:EV_READ) | EV_WRITE | EV_PERSIST, server_port_ready_callback, port); + + if (event_add(&port->event, NULL) < 0) { + log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server"); + } + + } + + r = 1; + goto done; + } + if (server_request_free(req)) { + r = 0; + goto done; + } + + if (port->pending_replies) + server_port_flush(port); + + r = 0; +done: + EVDNS_UNLOCK(port); + return r; +} + +/* Free all storage held by RRs in req. */ +static void +server_request_free_answers(struct server_request *req) +{ + struct server_reply_item *victim, *next, **list; + int i; + for (i = 0; i < 3; ++i) { + if (i==0) + list = &req->answer; + else if (i==1) + list = &req->authority; + else + list = &req->additional; + + victim = *list; + while (victim) { + next = victim->next; + mm_free(victim->name); + if (victim->data) + mm_free(victim->data); + mm_free(victim); + victim = next; + } + *list = NULL; + } +} + +/* Free all storage held by req, and remove links to it. */ +/* return true iff we just wound up freeing the server_port. */ +static int +server_request_free(struct server_request *req) +{ + int i, rc=1, lock=0; + if (req->base.questions) { + for (i = 0; i < req->base.nquestions; ++i) + mm_free(req->base.questions[i]); + mm_free(req->base.questions); + } + + if (req->port) { + EVDNS_LOCK(req->port); + lock=1; + if (req->port->pending_replies == req) { + if (req->next_pending && req->next_pending != req) + req->port->pending_replies = req->next_pending; + else + req->port->pending_replies = NULL; + } + rc = --req->port->refcnt; + } + + if (req->response) { + mm_free(req->response); + } + + server_request_free_answers(req); + + if (req->next_pending && req->next_pending != req) { + req->next_pending->prev_pending = req->prev_pending; + req->prev_pending->next_pending = req->next_pending; + } + + if (rc == 0) { + EVDNS_UNLOCK(req->port); /* ????? nickm */ + server_port_free(req->port); + mm_free(req); + return (1); + } + if (lock) + EVDNS_UNLOCK(req->port); + mm_free(req); + return (0); +} + +/* Free all storage held by an evdns_server_port. Only called when */ +static void +server_port_free(struct evdns_server_port *port) +{ + EVUTIL_ASSERT(port); + EVUTIL_ASSERT(!port->refcnt); + EVUTIL_ASSERT(!port->pending_replies); + if (port->socket > 0) { + evutil_closesocket(port->socket); + port->socket = -1; + } + (void) event_del(&port->event); + event_debug_unassign(&port->event); + EVTHREAD_FREE_LOCK(port->lock, EVTHREAD_LOCKTYPE_RECURSIVE); + mm_free(port); +} + +/* exported function */ +int +evdns_server_request_drop(struct evdns_server_request *req_) +{ + struct server_request *req = TO_SERVER_REQUEST(req_); + server_request_free(req); + return 0; +} + +/* exported function */ +int +evdns_server_request_get_requesting_addr(struct evdns_server_request *req_, struct sockaddr *sa, int addr_len) +{ + struct server_request *req = TO_SERVER_REQUEST(req_); + if (addr_len < (int)req->addrlen) + return -1; + memcpy(sa, &(req->addr), req->addrlen); + return req->addrlen; +} + +#undef APPEND16 +#undef APPEND32 + +/* this is a libevent callback function which is called when a request */ +/* has timed out. */ +static void +evdns_request_timeout_callback(evutil_socket_t fd, short events, void *arg) { + struct request *const req = (struct request *) arg; + struct evdns_base *base = req->base; + + (void) fd; + (void) events; + + log(EVDNS_LOG_DEBUG, "Request %p timed out", arg); + EVDNS_LOCK(base); + + if (req->tx_count >= req->base->global_max_retransmits) { + struct nameserver *ns = req->ns; + /* this request has failed */ + log(EVDNS_LOG_DEBUG, "Giving up on request %p; tx_count==%d", + arg, req->tx_count); + reply_schedule_callback(req, 0, DNS_ERR_TIMEOUT, NULL); + + request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1); + nameserver_failed(ns, "request timed out."); + } else { + /* retransmit it */ + log(EVDNS_LOG_DEBUG, "Retransmitting request %p; tx_count==%d", + arg, req->tx_count); + (void) evtimer_del(&req->timeout_event); + request_swap_ns(req, nameserver_pick(base)); + evdns_request_transmit(req); + + req->ns->timedout++; + if (req->ns->timedout > req->base->global_max_nameserver_timeout) { + req->ns->timedout = 0; + nameserver_failed(req->ns, "request timed out."); + } + } + + EVDNS_UNLOCK(base); +} + +/* try to send a request to a given server. */ +/* */ +/* return: */ +/* 0 ok */ +/* 1 temporary failure */ +/* 2 other failure */ +static int +evdns_request_transmit_to(struct request *req, struct nameserver *server) { + int r; + ASSERT_LOCKED(req->base); + ASSERT_VALID_REQUEST(req); + + if (server->requests_inflight == 1 && + req->base->disable_when_inactive && + event_add(&server->event, NULL) < 0) { + return 1; + } + + r = sendto(server->socket, (void*)req->request, req->request_len, 0, + (struct sockaddr *)&server->address, server->addrlen); + if (r < 0) { + int err = evutil_socket_geterror(server->socket); + if (EVUTIL_ERR_RW_RETRIABLE(err)) + return 1; + nameserver_failed(req->ns, evutil_socket_error_to_string(err)); + return 2; + } else if (r != (int)req->request_len) { + return 1; /* short write */ + } else { + return 0; + } +} + +/* try to send a request, updating the fields of the request */ +/* as needed */ +/* */ +/* return: */ +/* 0 ok */ +/* 1 failed */ +static int +evdns_request_transmit(struct request *req) { + int retcode = 0, r; + + ASSERT_LOCKED(req->base); + ASSERT_VALID_REQUEST(req); + /* if we fail to send this packet then this flag marks it */ + /* for evdns_transmit */ + req->transmit_me = 1; + EVUTIL_ASSERT(req->trans_id != 0xffff); + + if (!req->ns) + { + /* unable to transmit request if no nameservers */ + return 1; + } + + if (req->ns->choked) { + /* don't bother trying to write to a socket */ + /* which we have had EAGAIN from */ + return 1; + } + + r = evdns_request_transmit_to(req, req->ns); + switch (r) { + case 1: + /* temp failure */ + req->ns->choked = 1; + nameserver_write_waiting(req->ns, 1); + return 1; + case 2: + /* failed to transmit the request entirely. we can fallthrough since + * we'll set a timeout, which will time out, and make us retransmit the + * request anyway. */ + retcode = 1; + EVUTIL_FALLTHROUGH; + default: + /* all ok */ + log(EVDNS_LOG_DEBUG, + "Setting timeout for request %p, sent to nameserver %p", req, req->ns); + if (evtimer_add(&req->timeout_event, &req->base->global_timeout) < 0) { + log(EVDNS_LOG_WARN, + "Error from libevent when adding timer for request %p", + req); + /* ???? Do more? */ + } + req->tx_count++; + req->transmit_me = 0; + return retcode; + } +} + +static void +nameserver_probe_callback(int result, char type, int count, int ttl, void *addresses, void *arg) { + struct nameserver *const ns = (struct nameserver *) arg; + (void) type; + (void) count; + (void) ttl; + (void) addresses; + + if (result == DNS_ERR_CANCEL) { + /* We canceled this request because the nameserver came up + * for some other reason. Do not change our opinion about + * the nameserver. */ + return; + } + + EVDNS_LOCK(ns->base); + ns->probe_request = NULL; + if (result == DNS_ERR_NONE || result == DNS_ERR_NOTEXIST) { + /* this is a good reply */ + nameserver_up(ns); + } else { + nameserver_probe_failed(ns); + } + EVDNS_UNLOCK(ns->base); +} + +static void +nameserver_send_probe(struct nameserver *const ns) { + struct evdns_request *handle; + struct request *req; + char addrbuf[128]; + /* here we need to send a probe to a given nameserver */ + /* in the hope that it is up now. */ + + ASSERT_LOCKED(ns->base); + log(EVDNS_LOG_DEBUG, "Sending probe to %s", + evutil_format_sockaddr_port_( + (struct sockaddr *)&ns->address, + addrbuf, sizeof(addrbuf))); + handle = mm_calloc(1, sizeof(*handle)); + if (!handle) return; + req = request_new(ns->base, handle, TYPE_A, "google.com", DNS_QUERY_NO_SEARCH, nameserver_probe_callback, ns); + if (!req) { + mm_free(handle); + return; + } + ns->probe_request = handle; + /* we force this into the inflight queue no matter what */ + request_trans_id_set(req, transaction_id_pick(ns->base)); + req->ns = ns; + request_submit(req); +} + +/* returns: */ +/* 0 didn't try to transmit anything */ +/* 1 tried to transmit something */ +static int +evdns_transmit(struct evdns_base *base) { + char did_try_to_transmit = 0; + int i; + + ASSERT_LOCKED(base); + for (i = 0; i < base->n_req_heads; ++i) { + if (base->req_heads[i]) { + struct request *const started_at = base->req_heads[i], *req = started_at; + /* first transmit all the requests which are currently waiting */ + do { + if (req->transmit_me) { + did_try_to_transmit = 1; + evdns_request_transmit(req); + } + + req = req->next; + } while (req != started_at); + } + } + + return did_try_to_transmit; +} + +/* exported function */ +int +evdns_base_count_nameservers(struct evdns_base *base) +{ + const struct nameserver *server; + int n = 0; + + EVDNS_LOCK(base); + server = base->server_head; + if (!server) + goto done; + do { + ++n; + server = server->next; + } while (server != base->server_head); +done: + EVDNS_UNLOCK(base); + return n; +} + +int +evdns_count_nameservers(void) +{ + return evdns_base_count_nameservers(current_base); +} + +/* exported function */ +int +evdns_base_clear_nameservers_and_suspend(struct evdns_base *base) +{ + struct nameserver *server, *started_at; + int i; + + EVDNS_LOCK(base); + server = base->server_head; + started_at = base->server_head; + if (!server) { + EVDNS_UNLOCK(base); + return 0; + } + while (1) { + struct nameserver *next = server->next; + (void) event_del(&server->event); + if (evtimer_initialized(&server->timeout_event)) + (void) evtimer_del(&server->timeout_event); + if (server->probe_request) { + evdns_cancel_request(server->base, server->probe_request); + server->probe_request = NULL; + } + if (server->socket >= 0) + evutil_closesocket(server->socket); + mm_free(server); + if (next == started_at) + break; + server = next; + } + base->server_head = NULL; + base->global_good_nameservers = 0; + + for (i = 0; i < base->n_req_heads; ++i) { + struct request *req, *req_started_at; + req = req_started_at = base->req_heads[i]; + while (req) { + struct request *next = req->next; + req->tx_count = req->reissue_count = 0; + req->ns = NULL; + /* ???? What to do about searches? */ + (void) evtimer_del(&req->timeout_event); + req->trans_id = 0; + req->transmit_me = 0; + + base->global_requests_waiting++; + evdns_request_insert(req, &base->req_waiting_head); + /* We want to insert these suspended elements at the front of + * the waiting queue, since they were pending before any of + * the waiting entries were added. This is a circular list, + * so we can just shift the start back by one.*/ + base->req_waiting_head = base->req_waiting_head->prev; + + if (next == req_started_at) + break; + req = next; + } + base->req_heads[i] = NULL; + } + + base->global_requests_inflight = 0; + + EVDNS_UNLOCK(base); + return 0; +} + +int +evdns_clear_nameservers_and_suspend(void) +{ + return evdns_base_clear_nameservers_and_suspend(current_base); +} + + +/* exported function */ +int +evdns_base_resume(struct evdns_base *base) +{ + EVDNS_LOCK(base); + evdns_requests_pump_waiting_queue(base); + EVDNS_UNLOCK(base); + + return 0; +} + +int +evdns_resume(void) +{ + return evdns_base_resume(current_base); +} + +static int +evdns_nameserver_add_impl_(struct evdns_base *base, const struct sockaddr *address, int addrlen) { + /* first check to see if we already have this nameserver */ + + const struct nameserver *server = base->server_head, *const started_at = base->server_head; + struct nameserver *ns; + int err = 0; + char addrbuf[128]; + + ASSERT_LOCKED(base); + if (server) { + do { + if (!evutil_sockaddr_cmp((struct sockaddr*)&server->address, address, 1)) return 3; + server = server->next; + } while (server != started_at); + } + if (addrlen > (int)sizeof(ns->address)) { + log(EVDNS_LOG_DEBUG, "Addrlen %d too long.", (int)addrlen); + return 2; + } + + ns = (struct nameserver *) mm_malloc(sizeof(struct nameserver)); + if (!ns) return -1; + + memset(ns, 0, sizeof(struct nameserver)); + ns->base = base; + + evtimer_assign(&ns->timeout_event, ns->base->event_base, nameserver_prod_callback, ns); + + ns->socket = evutil_socket_(address->sa_family, + SOCK_DGRAM|EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC, 0); + if (ns->socket < 0) { err = 1; goto out1; } + if (ns->base->interface_name) + { + if (setsockopt(ns->socket, SOL_SOCKET, SO_BINDTODEVICE, + ns->base->interface_name, + strlen(ns->base->interface_name)+1) == -1) + { + err= 2; + goto out2; + } + } + + if (base->global_outgoing_addrlen && + !evutil_sockaddr_is_loopback_(address)) { + if (bind(ns->socket, + (struct sockaddr*)&base->global_outgoing_address, + base->global_outgoing_addrlen) < 0) { + log(EVDNS_LOG_WARN,"Couldn't bind to outgoing address"); + err = 2; + goto out2; + } + } + + if (base->so_rcvbuf) { + if (setsockopt(ns->socket, SOL_SOCKET, SO_RCVBUF, + (void *)&base->so_rcvbuf, sizeof(base->so_rcvbuf))) { + log(EVDNS_LOG_WARN, "Couldn't set SO_RCVBUF to %i", base->so_rcvbuf); + err = -SO_RCVBUF; + goto out2; + } + } + if (base->so_sndbuf) { + if (setsockopt(ns->socket, SOL_SOCKET, SO_SNDBUF, + (void *)&base->so_sndbuf, sizeof(base->so_sndbuf))) { + log(EVDNS_LOG_WARN, "Couldn't set SO_SNDBUF to %i", base->so_sndbuf); + err = -SO_SNDBUF; + goto out2; + } + } + + memcpy(&ns->address, address, addrlen); + ns->addrlen = addrlen; + ns->state = 1; + event_assign(&ns->event, ns->base->event_base, ns->socket, + EV_READ | EV_PERSIST, nameserver_ready_callback, ns); + if (!base->disable_when_inactive && event_add(&ns->event, NULL) < 0) { + err = 2; + goto out2; + } + + log(EVDNS_LOG_DEBUG, "Added nameserver %s as %p", + evutil_format_sockaddr_port_(address, addrbuf, sizeof(addrbuf)), ns); + + /* insert this nameserver into the list of them */ + if (!base->server_head) { + ns->next = ns->prev = ns; + base->server_head = ns; + } else { + ns->next = base->server_head->next; + ns->prev = base->server_head; + base->server_head->next = ns; + ns->next->prev = ns; + } + + base->global_good_nameservers++; + + return 0; + +out2: + evutil_closesocket(ns->socket); +out1: + event_debug_unassign(&ns->event); + mm_free(ns); + log(EVDNS_LOG_WARN, "Unable to add nameserver %s: error %d", + evutil_format_sockaddr_port_(address, addrbuf, sizeof(addrbuf)), err); + return err; +} + +/* exported function */ +int +evdns_base_nameserver_add(struct evdns_base *base, unsigned long int address) +{ + struct sockaddr_in sin; + int res; + memset(&sin, 0, sizeof(sin)); + sin.sin_addr.s_addr = address; + sin.sin_port = htons(53); + sin.sin_family = AF_INET; + EVDNS_LOCK(base); + res = evdns_nameserver_add_impl_(base, (struct sockaddr*)&sin, sizeof(sin)); + EVDNS_UNLOCK(base); + return res; +} + +int +evdns_nameserver_add(unsigned long int address) { + if (!current_base) + current_base = evdns_base_new(NULL, 0); + return evdns_base_nameserver_add(current_base, address); +} + +static void +sockaddr_setport(struct sockaddr *sa, ev_uint16_t port) +{ + if (sa->sa_family == AF_INET) { + ((struct sockaddr_in *)sa)->sin_port = htons(port); + } else if (sa->sa_family == AF_INET6) { + ((struct sockaddr_in6 *)sa)->sin6_port = htons(port); + } +} + +static ev_uint16_t +sockaddr_getport(struct sockaddr *sa) +{ + if (sa->sa_family == AF_INET) { + return ntohs(((struct sockaddr_in *)sa)->sin_port); + } else if (sa->sa_family == AF_INET6) { + return ntohs(((struct sockaddr_in6 *)sa)->sin6_port); + } else { + return 0; + } +} + +/* exported function */ +int +evdns_base_nameserver_ip_add(struct evdns_base *base, const char *ip_as_string) { + struct sockaddr_storage ss; + struct sockaddr *sa; + int len = sizeof(ss); + int res; + if (evutil_parse_sockaddr_port(ip_as_string, (struct sockaddr *)&ss, + &len)) { + log(EVDNS_LOG_WARN, "Unable to parse nameserver address %s", + ip_as_string); + return 4; + } + sa = (struct sockaddr *) &ss; + if (sockaddr_getport(sa) == 0) + sockaddr_setport(sa, 53); + + EVDNS_LOCK(base); + res = evdns_nameserver_add_impl_(base, sa, len); + EVDNS_UNLOCK(base); + return res; +} + +int +evdns_nameserver_ip_add(const char *ip_as_string) { + if (!current_base) + current_base = evdns_base_new(NULL, 0); + return evdns_base_nameserver_ip_add(current_base, ip_as_string); +} + +int +evdns_base_nameserver_sockaddr_add(struct evdns_base *base, + const struct sockaddr *sa, ev_socklen_t len, unsigned flags) +{ + int res; + EVUTIL_ASSERT(base); + EVDNS_LOCK(base); + res = evdns_nameserver_add_impl_(base, sa, len); + EVDNS_UNLOCK(base); + return res; +} + +int +evdns_base_get_nameserver_addr(struct evdns_base *base, int idx, + struct sockaddr *sa, ev_socklen_t len) +{ + int result = -1; + int i; + struct nameserver *server; + EVDNS_LOCK(base); + server = base->server_head; + for (i = 0; i < idx && server; ++i, server = server->next) { + if (server->next == base->server_head) + goto done; + } + if (! server) + goto done; + + if (server->addrlen > len) { + result = (int) server->addrlen; + goto done; + } + + memcpy(sa, &server->address, server->addrlen); + result = (int) server->addrlen; +done: + EVDNS_UNLOCK(base); + return result; +} + +/* remove from the queue */ +static void +evdns_request_remove(struct request *req, struct request **head) +{ + ASSERT_LOCKED(req->base); + ASSERT_VALID_REQUEST(req); + +#if 0 + { + struct request *ptr; + int found = 0; + EVUTIL_ASSERT(*head != NULL); + + ptr = *head; + do { + if (ptr == req) { + found = 1; + break; + } + ptr = ptr->next; + } while (ptr != *head); + EVUTIL_ASSERT(found); + + EVUTIL_ASSERT(req->next); + } +#endif + + if (req->next == req) { + /* only item in the list */ + *head = NULL; + } else { + req->next->prev = req->prev; + req->prev->next = req->next; + if (*head == req) *head = req->next; + } + req->next = req->prev = NULL; +} + +/* insert into the tail of the queue */ +static void +evdns_request_insert(struct request *req, struct request **head) { + ASSERT_LOCKED(req->base); + ASSERT_VALID_REQUEST(req); + if (!*head) { + *head = req; + req->next = req->prev = req; + return; + } + + req->prev = (*head)->prev; + req->prev->next = req; + req->next = *head; + (*head)->prev = req; +} + +static int +string_num_dots(const char *s) { + int count = 0; + while ((s = strchr(s, '.'))) { + s++; + count++; + } + return count; +} + +static struct request * +request_new(struct evdns_base *base, struct evdns_request *handle, int type, + const char *name, int flags, evdns_callback_type callback, + void *user_ptr) { + + const char issuing_now = + (base->global_requests_inflight < base->global_max_requests_inflight) ? 1 : 0; + + const size_t name_len = strlen(name); + const size_t request_max_len = evdns_request_len(name_len); + const u16 trans_id = issuing_now ? transaction_id_pick(base) : 0xffff; + /* the request data is alloced in a single block with the header */ + struct request *const req = + mm_malloc(sizeof(struct request) + request_max_len); + int rlen; + char namebuf[256]; + (void) flags; + + ASSERT_LOCKED(base); + + if (!req) return NULL; + + if (name_len >= sizeof(namebuf)) { + mm_free(req); + return NULL; + } + + memset(req, 0, sizeof(struct request)); + req->base = base; + + evtimer_assign(&req->timeout_event, req->base->event_base, evdns_request_timeout_callback, req); + + if (base->global_randomize_case) { + unsigned i; + char randbits[(sizeof(namebuf)+7)/8]; + strlcpy(namebuf, name, sizeof(namebuf)); + evutil_secure_rng_get_bytes(randbits, (name_len+7)/8); + for (i = 0; i < name_len; ++i) { + if (EVUTIL_ISALPHA_(namebuf[i])) { + if ((randbits[i >> 3] & (1<<(i & 7)))) + namebuf[i] |= 0x20; + else + namebuf[i] &= ~0x20; + } + } + name = namebuf; + } + + /* request data lives just after the header */ + req->request = ((u8 *) req) + sizeof(struct request); + /* denotes that the request data shouldn't be free()ed */ + req->request_appended = 1; + rlen = evdns_request_data_build(name, name_len, trans_id, + type, CLASS_INET, req->request, request_max_len); + if (rlen < 0) + goto err1; + + req->request_len = rlen; + req->trans_id = trans_id; + req->tx_count = 0; + req->request_type = type; + req->user_pointer = user_ptr; + req->user_callback = callback; + req->ns = issuing_now ? nameserver_pick(base) : NULL; + req->next = req->prev = NULL; + req->handle = handle; + if (handle) { + handle->current_req = req; + handle->base = base; + } + + return req; +err1: + mm_free(req); + return NULL; +} + +static void +request_submit(struct request *const req) { + struct evdns_base *base = req->base; + ASSERT_LOCKED(base); + ASSERT_VALID_REQUEST(req); + if (req->ns) { + /* if it has a nameserver assigned then this is going */ + /* straight into the inflight queue */ + evdns_request_insert(req, &REQ_HEAD(base, req->trans_id)); + + base->global_requests_inflight++; + req->ns->requests_inflight++; + + evdns_request_transmit(req); + } else { + evdns_request_insert(req, &base->req_waiting_head); + base->global_requests_waiting++; + } +} + +/* exported function */ +void +evdns_cancel_request(struct evdns_base *base, struct evdns_request *handle) +{ + struct request *req; + + if (!handle->current_req) + return; + + if (!base) { + /* This redundancy is silly; can we fix it? (Not for 2.0) XXXX */ + base = handle->base; + if (!base) + base = handle->current_req->base; + } + + EVDNS_LOCK(base); + if (handle->pending_cb) { + EVDNS_UNLOCK(base); + return; + } + + req = handle->current_req; + ASSERT_VALID_REQUEST(req); + + reply_schedule_callback(req, 0, DNS_ERR_CANCEL, NULL); + if (req->ns) { + /* remove from inflight queue */ + request_finished(req, &REQ_HEAD(base, req->trans_id), 1); + } else { + /* remove from global_waiting head */ + request_finished(req, &base->req_waiting_head, 1); + } + EVDNS_UNLOCK(base); +} + +/* exported function */ +struct evdns_request * +evdns_base_resolve_ipv4(struct evdns_base *base, const char *name, int flags, + evdns_callback_type callback, void *ptr) { + struct evdns_request *handle; + struct request *req; + log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name); + handle = mm_calloc(1, sizeof(*handle)); + if (handle == NULL) + return NULL; + EVDNS_LOCK(base); + if (flags & DNS_QUERY_NO_SEARCH) { + req = + request_new(base, handle, TYPE_A, name, flags, + callback, ptr); + if (req) + request_submit(req); + } else { + search_request_new(base, handle, TYPE_A, name, flags, + callback, ptr); + } + if (handle->current_req == NULL) { + mm_free(handle); + handle = NULL; + } + EVDNS_UNLOCK(base); + return handle; +} + +int evdns_resolve_ipv4(const char *name, int flags, + evdns_callback_type callback, void *ptr) +{ + return evdns_base_resolve_ipv4(current_base, name, flags, callback, ptr) + ? 0 : -1; +} + + +/* exported function */ +struct evdns_request * +evdns_base_resolve_ipv6(struct evdns_base *base, + const char *name, int flags, + evdns_callback_type callback, void *ptr) +{ + struct evdns_request *handle; + struct request *req; + log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name); + handle = mm_calloc(1, sizeof(*handle)); + if (handle == NULL) + return NULL; + EVDNS_LOCK(base); + if (flags & DNS_QUERY_NO_SEARCH) { + req = request_new(base, handle, TYPE_AAAA, name, flags, + callback, ptr); + if (req) + request_submit(req); + } else { + search_request_new(base, handle, TYPE_AAAA, name, flags, + callback, ptr); + } + if (handle->current_req == NULL) { + mm_free(handle); + handle = NULL; + } + EVDNS_UNLOCK(base); + return handle; +} + +int evdns_resolve_ipv6(const char *name, int flags, + evdns_callback_type callback, void *ptr) { + return evdns_base_resolve_ipv6(current_base, name, flags, callback, ptr) + ? 0 : -1; +} + +struct evdns_request * +evdns_base_resolve_reverse(struct evdns_base *base, const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr) { + char buf[32]; + struct evdns_request *handle; + struct request *req; + u32 a; + EVUTIL_ASSERT(in); + a = ntohl(in->s_addr); + evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa", + (int)(u8)((a )&0xff), + (int)(u8)((a>>8 )&0xff), + (int)(u8)((a>>16)&0xff), + (int)(u8)((a>>24)&0xff)); + handle = mm_calloc(1, sizeof(*handle)); + if (handle == NULL) + return NULL; + log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf); + EVDNS_LOCK(base); + req = request_new(base, handle, TYPE_PTR, buf, flags, callback, ptr); + if (req) + request_submit(req); + if (handle->current_req == NULL) { + mm_free(handle); + handle = NULL; + } + EVDNS_UNLOCK(base); + return (handle); +} + +int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr) { + return evdns_base_resolve_reverse(current_base, in, flags, callback, ptr) + ? 0 : -1; +} + +struct evdns_request * +evdns_base_resolve_reverse_ipv6(struct evdns_base *base, const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr) { + /* 32 nybbles, 32 periods, "ip6.arpa", NUL. */ + char buf[73]; + char *cp; + struct evdns_request *handle; + struct request *req; + int i; + EVUTIL_ASSERT(in); + cp = buf; + for (i=15; i >= 0; --i) { + u8 byte = in->s6_addr[i]; + *cp++ = "0123456789abcdef"[byte & 0x0f]; + *cp++ = '.'; + *cp++ = "0123456789abcdef"[byte >> 4]; + *cp++ = '.'; + } + EVUTIL_ASSERT(cp + strlen("ip6.arpa") < buf+sizeof(buf)); + memcpy(cp, "ip6.arpa", strlen("ip6.arpa")+1); + handle = mm_calloc(1, sizeof(*handle)); + if (handle == NULL) + return NULL; + log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf); + EVDNS_LOCK(base); + req = request_new(base, handle, TYPE_PTR, buf, flags, callback, ptr); + if (req) + request_submit(req); + if (handle->current_req == NULL) { + mm_free(handle); + handle = NULL; + } + EVDNS_UNLOCK(base); + return (handle); +} + +int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr) { + return evdns_base_resolve_reverse_ipv6(current_base, in, flags, callback, ptr) + ? 0 : -1; +} + +/* ================================================================= */ +/* Search support */ +/* */ +/* the libc resolver has support for searching a number of domains */ +/* to find a name. If nothing else then it takes the single domain */ +/* from the gethostname() call. */ +/* */ +/* It can also be configured via the domain and search options in a */ +/* resolv.conf. */ +/* */ +/* The ndots option controls how many dots it takes for the resolver */ +/* to decide that a name is non-local and so try a raw lookup first. */ + +struct search_domain { + int len; + struct search_domain *next; + /* the text string is appended to this structure */ +}; + +struct search_state { + int refcount; + int ndots; + int num_domains; + struct search_domain *head; +}; + +static void +search_state_decref(struct search_state *const state) { + if (!state) return; + state->refcount--; + if (!state->refcount) { + struct search_domain *next, *dom; + for (dom = state->head; dom; dom = next) { + next = dom->next; + mm_free(dom); + } + mm_free(state); + } +} + +static struct search_state * +search_state_new(void) { + struct search_state *state = (struct search_state *) mm_malloc(sizeof(struct search_state)); + if (!state) return NULL; + memset(state, 0, sizeof(struct search_state)); + state->refcount = 1; + state->ndots = 1; + + return state; +} + +static void +search_postfix_clear(struct evdns_base *base) { + search_state_decref(base->global_search_state); + + base->global_search_state = search_state_new(); +} + +/* exported function */ +void +evdns_base_search_clear(struct evdns_base *base) +{ + EVDNS_LOCK(base); + search_postfix_clear(base); + EVDNS_UNLOCK(base); +} + +void +evdns_search_clear(void) { + evdns_base_search_clear(current_base); +} + +static void +search_postfix_add(struct evdns_base *base, const char *domain) { + size_t domain_len; + struct search_domain *sdomain; + while (domain[0] == '.') domain++; + domain_len = strlen(domain); + + ASSERT_LOCKED(base); + if (!base->global_search_state) base->global_search_state = search_state_new(); + if (!base->global_search_state) return; + base->global_search_state->num_domains++; + + sdomain = (struct search_domain *) mm_malloc(sizeof(struct search_domain) + domain_len); + if (!sdomain) return; + memcpy( ((u8 *) sdomain) + sizeof(struct search_domain), domain, domain_len); + sdomain->next = base->global_search_state->head; + sdomain->len = (int) domain_len; + + base->global_search_state->head = sdomain; +} + +/* reverse the order of members in the postfix list. This is needed because, */ +/* when parsing resolv.conf we push elements in the wrong order */ +static void +search_reverse(struct evdns_base *base) { + struct search_domain *cur, *prev = NULL, *next; + ASSERT_LOCKED(base); + cur = base->global_search_state->head; + while (cur) { + next = cur->next; + cur->next = prev; + prev = cur; + cur = next; + } + + base->global_search_state->head = prev; +} + +/* exported function */ +void +evdns_base_search_add(struct evdns_base *base, const char *domain) { + EVDNS_LOCK(base); + search_postfix_add(base, domain); + EVDNS_UNLOCK(base); +} +void +evdns_search_add(const char *domain) { + evdns_base_search_add(current_base, domain); +} + +/* exported function */ +void +evdns_base_search_ndots_set(struct evdns_base *base, const int ndots) { + EVDNS_LOCK(base); + if (!base->global_search_state) base->global_search_state = search_state_new(); + if (base->global_search_state) + base->global_search_state->ndots = ndots; + EVDNS_UNLOCK(base); +} +void +evdns_search_ndots_set(const int ndots) { + evdns_base_search_ndots_set(current_base, ndots); +} + +static void +search_set_from_hostname(struct evdns_base *base) { + char hostname[HOST_NAME_MAX + 1], *domainname; + + ASSERT_LOCKED(base); + search_postfix_clear(base); + if (gethostname(hostname, sizeof(hostname))) return; + domainname = strchr(hostname, '.'); + if (!domainname) return; + search_postfix_add(base, domainname); +} + +/* warning: returns malloced string */ +static char * +search_make_new(const struct search_state *const state, int n, const char *const base_name) { + const size_t base_len = strlen(base_name); + char need_to_append_dot; + struct search_domain *dom; + + if (!base_len) return NULL; + need_to_append_dot = base_name[base_len - 1] == '.' ? 0 : 1; + + for (dom = state->head; dom; dom = dom->next) { + if (!n--) { + /* this is the postfix we want */ + /* the actual postfix string is kept at the end of the structure */ + const u8 *const postfix = ((u8 *) dom) + sizeof(struct search_domain); + const int postfix_len = dom->len; + char *const newname = (char *) mm_malloc(base_len + need_to_append_dot + postfix_len + 1); + if (!newname) return NULL; + memcpy(newname, base_name, base_len); + if (need_to_append_dot) newname[base_len] = '.'; + memcpy(newname + base_len + need_to_append_dot, postfix, postfix_len); + newname[base_len + need_to_append_dot + postfix_len] = 0; + return newname; + } + } + + /* we ran off the end of the list and still didn't find the requested string */ + EVUTIL_ASSERT(0); + return NULL; /* unreachable; stops warnings in some compilers. */ +} + +static struct request * +search_request_new(struct evdns_base *base, struct evdns_request *handle, + int type, const char *const name, int flags, + evdns_callback_type user_callback, void *user_arg) { + ASSERT_LOCKED(base); + EVUTIL_ASSERT(type == TYPE_A || type == TYPE_AAAA); + EVUTIL_ASSERT(handle->current_req == NULL); + if ( ((flags & DNS_QUERY_NO_SEARCH) == 0) && + base->global_search_state && + base->global_search_state->num_domains) { + /* we have some domains to search */ + struct request *req; + if (string_num_dots(name) >= base->global_search_state->ndots) { + req = request_new(base, handle, type, name, flags, user_callback, user_arg); + if (!req) return NULL; + handle->search_index = -1; + } else { + char *const new_name = search_make_new(base->global_search_state, 0, name); + if (!new_name) return NULL; + req = request_new(base, handle, type, new_name, flags, user_callback, user_arg); + mm_free(new_name); + if (!req) return NULL; + handle->search_index = 0; + } + EVUTIL_ASSERT(handle->search_origname == NULL); + handle->search_origname = mm_strdup(name); + if (handle->search_origname == NULL) { + /* XXX Should we dealloc req? If yes, how? */ + if (req) + mm_free(req); + return NULL; + } + handle->search_state = base->global_search_state; + handle->search_flags = flags; + base->global_search_state->refcount++; + request_submit(req); + return req; + } else { + struct request *const req = request_new(base, handle, type, name, flags, user_callback, user_arg); + if (!req) return NULL; + request_submit(req); + return req; + } +} + +/* this is called when a request has failed to find a name. We need to check */ +/* if it is part of a search and, if so, try the next name in the list */ +/* returns: */ +/* 0 another request has been submitted */ +/* 1 no more requests needed */ +static int +search_try_next(struct evdns_request *const handle) { + struct request *req = handle->current_req; + struct evdns_base *base = req->base; + struct request *newreq; + ASSERT_LOCKED(base); + if (handle->search_state) { + /* it is part of a search */ + char *new_name; + handle->search_index++; + if (handle->search_index >= handle->search_state->num_domains) { + /* no more postfixes to try, however we may need to try */ + /* this name without a postfix */ + if (string_num_dots(handle->search_origname) < handle->search_state->ndots) { + /* yep, we need to try it raw */ + newreq = request_new(base, NULL, req->request_type, handle->search_origname, handle->search_flags, req->user_callback, req->user_pointer); + log(EVDNS_LOG_DEBUG, "Search: trying raw query %s", handle->search_origname); + if (newreq) { + search_request_finished(handle); + goto submit_next; + } + } + return 1; + } + + new_name = search_make_new(handle->search_state, handle->search_index, handle->search_origname); + if (!new_name) return 1; + log(EVDNS_LOG_DEBUG, "Search: now trying %s (%d)", new_name, handle->search_index); + newreq = request_new(base, NULL, req->request_type, new_name, handle->search_flags, req->user_callback, req->user_pointer); + mm_free(new_name); + if (!newreq) return 1; + goto submit_next; + } + return 1; + +submit_next: + request_finished(req, &REQ_HEAD(req->base, req->trans_id), 0); + handle->current_req = newreq; + newreq->handle = handle; + request_submit(newreq); + return 0; +} + +static void +search_request_finished(struct evdns_request *const handle) { + ASSERT_LOCKED(handle->current_req->base); + if (handle->search_state) { + search_state_decref(handle->search_state); + handle->search_state = NULL; + } + if (handle->search_origname) { + mm_free(handle->search_origname); + handle->search_origname = NULL; + } +} + +/* ================================================================= */ +/* Parsing resolv.conf files */ + +static void +evdns_resolv_set_defaults(struct evdns_base *base, int flags) { + int add_default = flags & DNS_OPTION_NAMESERVERS; + if (flags & DNS_OPTION_NAMESERVERS_NO_DEFAULT) + add_default = 0; + + /* if the file isn't found then we assume a local resolver */ + ASSERT_LOCKED(base); + if (flags & DNS_OPTION_SEARCH) + search_set_from_hostname(base); + if (add_default) + evdns_base_nameserver_ip_add(base, "127.0.0.1"); +} + +#ifndef EVENT__HAVE_STRTOK_R +static char * +strtok_r(char *s, const char *delim, char **state) { + char *cp, *start; + start = cp = s ? s : *state; + if (!cp) + return NULL; + while (*cp && !strchr(delim, *cp)) + ++cp; + if (!*cp) { + if (cp == start) + return NULL; + *state = NULL; + return start; + } else { + *cp++ = '\0'; + *state = cp; + return start; + } +} +#endif + +/* helper version of atoi which returns -1 on error */ +static int +strtoint(const char *const str) +{ + char *endptr; + const int r = strtol(str, &endptr, 10); + if (*endptr) return -1; + return r; +} + +/* Parse a number of seconds into a timeval; return -1 on error. */ +static int +evdns_strtotimeval(const char *const str, struct timeval *out) +{ + double d; + char *endptr; + d = strtod(str, &endptr); + if (*endptr) return -1; + if (d < 0) return -1; + out->tv_sec = (int) d; + out->tv_usec = (int) ((d - (int) d)*1000000); + if (out->tv_sec == 0 && out->tv_usec < 1000) /* less than 1 msec */ + return -1; + return 0; +} + +/* helper version of atoi that returns -1 on error and clips to bounds. */ +static int +strtoint_clipped(const char *const str, int min, int max) +{ + int r = strtoint(str); + if (r == -1) + return r; + else if (rmax) + return max; + else + return r; +} + +static int +evdns_base_set_max_requests_inflight(struct evdns_base *base, int maxinflight) +{ + int old_n_heads = base->n_req_heads, n_heads; + struct request **old_heads = base->req_heads, **new_heads, *req; + int i; + + ASSERT_LOCKED(base); + if (maxinflight < 1) + maxinflight = 1; + n_heads = (maxinflight+4) / 5; + EVUTIL_ASSERT(n_heads > 0); + new_heads = mm_calloc(n_heads, sizeof(struct request*)); + if (!new_heads) + return (-1); + if (old_heads) { + for (i = 0; i < old_n_heads; ++i) { + while (old_heads[i]) { + req = old_heads[i]; + evdns_request_remove(req, &old_heads[i]); + evdns_request_insert(req, &new_heads[req->trans_id % n_heads]); + } + } + mm_free(old_heads); + } + base->req_heads = new_heads; + base->n_req_heads = n_heads; + base->global_max_requests_inflight = maxinflight; + return (0); +} + +/* exported function */ +int +evdns_base_set_option(struct evdns_base *base, + const char *option, const char *val) +{ + int res; + EVDNS_LOCK(base); + res = evdns_base_set_option_impl(base, option, val, DNS_OPTIONS_ALL); + EVDNS_UNLOCK(base); + return res; +} + +static inline int +str_matches_option(const char *s1, const char *optionname) +{ + /* Option names are given as "option:" We accept either 'option' in + * s1, or 'option:randomjunk'. The latter form is to implement the + * resolv.conf parser. */ + size_t optlen = strlen(optionname); + size_t slen = strlen(s1); + if (slen == optlen || slen == optlen - 1) + return !strncmp(s1, optionname, slen); + else if (slen > optlen) + return !strncmp(s1, optionname, optlen); + else + return 0; +} + +static int +evdns_base_set_option_impl(struct evdns_base *base, + const char *option, const char *val, int flags) +{ + ASSERT_LOCKED(base); + if (str_matches_option(option, "ndots:")) { + const int ndots = strtoint(val); + if (ndots == -1) return -1; + if (!(flags & DNS_OPTION_SEARCH)) return 0; + log(EVDNS_LOG_DEBUG, "Setting ndots to %d", ndots); + if (!base->global_search_state) base->global_search_state = search_state_new(); + if (!base->global_search_state) return -1; + base->global_search_state->ndots = ndots; + } else if (str_matches_option(option, "timeout:")) { + struct timeval tv; + if (evdns_strtotimeval(val, &tv) == -1) return -1; + if (!(flags & DNS_OPTION_MISC)) return 0; + log(EVDNS_LOG_DEBUG, "Setting timeout to %s", val); + memcpy(&base->global_timeout, &tv, sizeof(struct timeval)); + } else if (str_matches_option(option, "getaddrinfo-allow-skew:")) { + struct timeval tv; + if (evdns_strtotimeval(val, &tv) == -1) return -1; + if (!(flags & DNS_OPTION_MISC)) return 0; + log(EVDNS_LOG_DEBUG, "Setting getaddrinfo-allow-skew to %s", + val); + memcpy(&base->global_getaddrinfo_allow_skew, &tv, + sizeof(struct timeval)); + } else if (str_matches_option(option, "max-timeouts:")) { + const int maxtimeout = strtoint_clipped(val, 1, 255); + if (maxtimeout == -1) return -1; + if (!(flags & DNS_OPTION_MISC)) return 0; + log(EVDNS_LOG_DEBUG, "Setting maximum allowed timeouts to %d", + maxtimeout); + base->global_max_nameserver_timeout = maxtimeout; + } else if (str_matches_option(option, "max-inflight:")) { + const int maxinflight = strtoint_clipped(val, 1, 65000); + if (maxinflight == -1) return -1; + if (!(flags & DNS_OPTION_MISC)) return 0; + log(EVDNS_LOG_DEBUG, "Setting maximum inflight requests to %d", + maxinflight); + evdns_base_set_max_requests_inflight(base, maxinflight); + } else if (str_matches_option(option, "attempts:")) { + int retries = strtoint(val); + if (retries == -1) return -1; + if (retries > 255) retries = 255; + if (!(flags & DNS_OPTION_MISC)) return 0; + log(EVDNS_LOG_DEBUG, "Setting retries to %d", retries); + base->global_max_retransmits = retries; + } else if (str_matches_option(option, "randomize-case:")) { + int randcase = strtoint(val); + if (!(flags & DNS_OPTION_MISC)) return 0; + base->global_randomize_case = randcase; + } else if (str_matches_option(option, "bind-to:")) { + /* XXX This only applies to successive nameservers, not + * to already-configured ones. We might want to fix that. */ + int len = sizeof(base->global_outgoing_address); + if (!(flags & DNS_OPTION_NAMESERVERS)) return 0; + if (evutil_parse_sockaddr_port(val, + (struct sockaddr*)&base->global_outgoing_address, &len)) + return -1; + base->global_outgoing_addrlen = len; + } else if (str_matches_option(option, "initial-probe-timeout:")) { + struct timeval tv; + if (evdns_strtotimeval(val, &tv) == -1) return -1; + if (tv.tv_sec > 3600) + tv.tv_sec = 3600; + if (!(flags & DNS_OPTION_MISC)) return 0; + log(EVDNS_LOG_DEBUG, "Setting initial probe timeout to %s", + val); + memcpy(&base->global_nameserver_probe_initial_timeout, &tv, + sizeof(tv)); + } else if (str_matches_option(option, "so-rcvbuf:")) { + int buf = strtoint(val); + if (!(flags & DNS_OPTION_MISC)) return 0; + log(EVDNS_LOG_DEBUG, "Setting SO_RCVBUF to %s", val); + base->so_rcvbuf = buf; + } else if (str_matches_option(option, "so-sndbuf:")) { + int buf = strtoint(val); + if (!(flags & DNS_OPTION_MISC)) return 0; + log(EVDNS_LOG_DEBUG, "Setting SO_SNDBUF to %s", val); + base->so_sndbuf = buf; + } + return 0; +} + +int +evdns_set_option(const char *option, const char *val, int flags) +{ + if (!current_base) + current_base = evdns_base_new(NULL, 0); + return evdns_base_set_option(current_base, option, val); +} + +static void +resolv_conf_parse_line(struct evdns_base *base, char *const start, int flags) { + char *strtok_state; + static const char *const delims = " \t"; +#define NEXT_TOKEN strtok_r(NULL, delims, &strtok_state) + + + char *const first_token = strtok_r(start, delims, &strtok_state); + ASSERT_LOCKED(base); + if (!first_token) return; + + if (!strcmp(first_token, "nameserver") && (flags & DNS_OPTION_NAMESERVERS)) { + const char *const nameserver = NEXT_TOKEN; + + if (nameserver) + evdns_base_nameserver_ip_add(base, nameserver); + } else if (!strcmp(first_token, "domain") && (flags & DNS_OPTION_SEARCH)) { + const char *const domain = NEXT_TOKEN; + if (domain) { + search_postfix_clear(base); + search_postfix_add(base, domain); + } + } else if (!strcmp(first_token, "search") && (flags & DNS_OPTION_SEARCH)) { + const char *domain; + search_postfix_clear(base); + + while ((domain = NEXT_TOKEN)) { + search_postfix_add(base, domain); + } + search_reverse(base); + } else if (!strcmp(first_token, "options")) { + const char *option; + while ((option = NEXT_TOKEN)) { + const char *val = strchr(option, ':'); + evdns_base_set_option_impl(base, option, val ? val+1 : "", flags); + } + } +#undef NEXT_TOKEN +} + +/* exported function */ +/* returns: */ +/* 0 no errors */ +/* 1 failed to open file */ +/* 2 failed to stat file */ +/* 3 file too large */ +/* 4 out of memory */ +/* 5 short read from file */ +int +evdns_base_resolv_conf_parse(struct evdns_base *base, int flags, const char *const filename) { + int res; + EVDNS_LOCK(base); + res = evdns_base_resolv_conf_parse_impl(base, flags, filename); + EVDNS_UNLOCK(base); + return res; +} + +static char * +evdns_get_default_hosts_filename(void) +{ +#ifdef _WIN32 + /* Windows is a little coy about where it puts its configuration + * files. Sure, they're _usually_ in C:\windows\system32, but + * there's no reason in principle they couldn't be in + * W:\hoboken chicken emergency\ + */ + char path[MAX_PATH+1]; + static const char hostfile[] = "\\drivers\\etc\\hosts"; + char *path_out; + size_t len_out; + + if (! SHGetSpecialFolderPathA(NULL, path, CSIDL_SYSTEM, 0)) + return NULL; + len_out = strlen(path)+strlen(hostfile)+1; + path_out = mm_malloc(len_out); + evutil_snprintf(path_out, len_out, "%s%s", path, hostfile); + return path_out; +#else + return mm_strdup("/etc/hosts"); +#endif +} + +static int +evdns_base_resolv_conf_parse_impl(struct evdns_base *base, int flags, const char *const filename) { + size_t n; + char *resolv; + char *start; + int err = 0; + int add_default; + + log(EVDNS_LOG_DEBUG, "Parsing resolv.conf file %s", filename); + + add_default = flags & DNS_OPTION_NAMESERVERS; + if (flags & DNS_OPTION_NAMESERVERS_NO_DEFAULT) + add_default = 0; + + if (flags & DNS_OPTION_HOSTSFILE) { + char *fname = evdns_get_default_hosts_filename(); + evdns_base_load_hosts(base, fname); + if (fname) + mm_free(fname); + } + + if (!filename) { + evdns_resolv_set_defaults(base, flags); + return 1; + } + + if ((err = evutil_read_file_(filename, &resolv, &n, 0)) < 0) { + if (err == -1) { + /* No file. */ + evdns_resolv_set_defaults(base, flags); + return 1; + } else { + return 2; + } + } + + start = resolv; + for (;;) { + char *const newline = strchr(start, '\n'); + if (!newline) { + resolv_conf_parse_line(base, start, flags); + break; + } else { + *newline = 0; + resolv_conf_parse_line(base, start, flags); + start = newline + 1; + } + } + + if (!base->server_head && add_default) { + /* no nameservers were configured. */ + evdns_base_nameserver_ip_add(base, "127.0.0.1"); + err = 6; + } + if (flags & DNS_OPTION_SEARCH && (!base->global_search_state || base->global_search_state->num_domains == 0)) { + search_set_from_hostname(base); + } + + mm_free(resolv); + return err; +} + +int +evdns_resolv_conf_parse(int flags, const char *const filename) { + if (!current_base) + current_base = evdns_base_new(NULL, 0); + return evdns_base_resolv_conf_parse(current_base, flags, filename); +} + + +#ifdef _WIN32 +/* Add multiple nameservers from a space-or-comma-separated list. */ +static int +evdns_nameserver_ip_add_line(struct evdns_base *base, const char *ips) { + const char *addr; + char *buf; + int r; + ASSERT_LOCKED(base); + while (*ips) { + while (isspace(*ips) || *ips == ',' || *ips == '\t') + ++ips; + addr = ips; + while (isdigit(*ips) || *ips == '.' || *ips == ':' || + *ips=='[' || *ips==']') + ++ips; + buf = mm_malloc(ips-addr+1); + if (!buf) return 4; + memcpy(buf, addr, ips-addr); + buf[ips-addr] = '\0'; + r = evdns_base_nameserver_ip_add(base, buf); + mm_free(buf); + if (r) return r; + } + return 0; +} + +typedef DWORD(WINAPI *GetNetworkParams_fn_t)(FIXED_INFO *, DWORD*); + +/* Use the windows GetNetworkParams interface in iphlpapi.dll to */ +/* figure out what our nameservers are. */ +static int +load_nameservers_with_getnetworkparams(struct evdns_base *base) +{ + /* Based on MSDN examples and inspection of c-ares code. */ + FIXED_INFO *fixed; + HMODULE handle = 0; + ULONG size = sizeof(FIXED_INFO); + void *buf = NULL; + int status = 0, r, added_any; + IP_ADDR_STRING *ns; + GetNetworkParams_fn_t fn; + + ASSERT_LOCKED(base); + if (!(handle = evutil_load_windows_system_library_( + TEXT("iphlpapi.dll")))) { + log(EVDNS_LOG_WARN, "Could not open iphlpapi.dll"); + status = -1; + goto done; + } + if (!(fn = (GetNetworkParams_fn_t) GetProcAddress(handle, "GetNetworkParams"))) { + log(EVDNS_LOG_WARN, "Could not get address of function."); + status = -1; + goto done; + } + + buf = mm_malloc(size); + if (!buf) { status = 4; goto done; } + fixed = buf; + r = fn(fixed, &size); + if (r != ERROR_SUCCESS && r != ERROR_BUFFER_OVERFLOW) { + status = -1; + goto done; + } + if (r != ERROR_SUCCESS) { + mm_free(buf); + buf = mm_malloc(size); + if (!buf) { status = 4; goto done; } + fixed = buf; + r = fn(fixed, &size); + if (r != ERROR_SUCCESS) { + log(EVDNS_LOG_DEBUG, "fn() failed."); + status = -1; + goto done; + } + } + + EVUTIL_ASSERT(fixed); + added_any = 0; + ns = &(fixed->DnsServerList); + while (ns) { + r = evdns_nameserver_ip_add_line(base, ns->IpAddress.String); + if (r) { + log(EVDNS_LOG_DEBUG,"Could not add nameserver %s to list,error: %d", + (ns->IpAddress.String),(int)GetLastError()); + status = r; + } else { + ++added_any; + log(EVDNS_LOG_DEBUG,"Successfully added %s as nameserver",ns->IpAddress.String); + } + + ns = ns->Next; + } + + if (!added_any) { + log(EVDNS_LOG_DEBUG, "No nameservers added."); + if (status == 0) + status = -1; + } else { + status = 0; + } + + done: + if (buf) + mm_free(buf); + if (handle) + FreeLibrary(handle); + return status; +} + +static int +config_nameserver_from_reg_key(struct evdns_base *base, HKEY key, const TCHAR *subkey) +{ + char *buf; + DWORD bufsz = 0, type = 0; + int status = 0; + + ASSERT_LOCKED(base); + if (RegQueryValueEx(key, subkey, 0, &type, NULL, &bufsz) + != ERROR_MORE_DATA) + return -1; + if (!(buf = mm_malloc(bufsz))) + return -1; + + if (RegQueryValueEx(key, subkey, 0, &type, (LPBYTE)buf, &bufsz) + == ERROR_SUCCESS && bufsz > 1) { + status = evdns_nameserver_ip_add_line(base,buf); + } + + mm_free(buf); + return status; +} + +#define SERVICES_KEY TEXT("System\\CurrentControlSet\\Services\\") +#define WIN_NS_9X_KEY SERVICES_KEY TEXT("VxD\\MSTCP") +#define WIN_NS_NT_KEY SERVICES_KEY TEXT("Tcpip\\Parameters") + +static int +load_nameservers_from_registry(struct evdns_base *base) +{ + int found = 0; + int r; +#define TRY(k, name) \ + if (!found && config_nameserver_from_reg_key(base,k,TEXT(name)) == 0) { \ + log(EVDNS_LOG_DEBUG,"Found nameservers in %s/%s",#k,name); \ + found = 1; \ + } else if (!found) { \ + log(EVDNS_LOG_DEBUG,"Didn't find nameservers in %s/%s", \ + #k,#name); \ + } + + ASSERT_LOCKED(base); + + if (((int)GetVersion()) > 0) { /* NT */ + HKEY nt_key = 0, interfaces_key = 0; + + if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, WIN_NS_NT_KEY, 0, + KEY_READ, &nt_key) != ERROR_SUCCESS) { + log(EVDNS_LOG_DEBUG,"Couldn't open nt key, %d",(int)GetLastError()); + return -1; + } + r = RegOpenKeyEx(nt_key, TEXT("Interfaces"), 0, + KEY_QUERY_VALUE|KEY_ENUMERATE_SUB_KEYS, + &interfaces_key); + if (r != ERROR_SUCCESS) { + log(EVDNS_LOG_DEBUG,"Couldn't open interfaces key, %d",(int)GetLastError()); + return -1; + } + TRY(nt_key, "NameServer"); + TRY(nt_key, "DhcpNameServer"); + TRY(interfaces_key, "NameServer"); + TRY(interfaces_key, "DhcpNameServer"); + RegCloseKey(interfaces_key); + RegCloseKey(nt_key); + } else { + HKEY win_key = 0; + if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, WIN_NS_9X_KEY, 0, + KEY_READ, &win_key) != ERROR_SUCCESS) { + log(EVDNS_LOG_DEBUG, "Couldn't open registry key, %d", (int)GetLastError()); + return -1; + } + TRY(win_key, "NameServer"); + RegCloseKey(win_key); + } + + if (found == 0) { + log(EVDNS_LOG_WARN,"Didn't find any nameservers."); + } + + return found ? 0 : -1; +#undef TRY +} + +int +evdns_base_config_windows_nameservers(struct evdns_base *base) +{ + int r; + char *fname; + if (base == NULL) + base = current_base; + if (base == NULL) + return -1; + EVDNS_LOCK(base); + fname = evdns_get_default_hosts_filename(); + log(EVDNS_LOG_DEBUG, "Loading hosts entries from %s", fname); + evdns_base_load_hosts(base, fname); + if (fname) + mm_free(fname); + + if (load_nameservers_with_getnetworkparams(base) == 0) { + EVDNS_UNLOCK(base); + return 0; + } + r = load_nameservers_from_registry(base); + + EVDNS_UNLOCK(base); + return r; +} + +int +evdns_config_windows_nameservers(void) +{ + if (!current_base) { + current_base = evdns_base_new(NULL, 1); + return current_base == NULL ? -1 : 0; + } else { + return evdns_base_config_windows_nameservers(current_base); + } +} +#endif + +struct evdns_base * +evdns_base_new(struct event_base *event_base, int flags) +{ + struct evdns_base *base; + + if (evutil_secure_rng_init() < 0) { + log(EVDNS_LOG_WARN, "Unable to seed random number generator; " + "DNS can't run."); + return NULL; + } + + /* Give the evutil library a hook into its evdns-enabled + * functionality. We can't just call evdns_getaddrinfo directly or + * else libevent-core will depend on libevent-extras. */ + evutil_set_evdns_getaddrinfo_fn_(evdns_getaddrinfo); + evutil_set_evdns_getaddrinfo_cancel_fn_(evdns_getaddrinfo_cancel); + + base = mm_malloc(sizeof(struct evdns_base)); + if (base == NULL) + return (NULL); + memset(base, 0, sizeof(struct evdns_base)); + base->req_waiting_head = NULL; + + EVTHREAD_ALLOC_LOCK(base->lock, EVTHREAD_LOCKTYPE_RECURSIVE); + EVDNS_LOCK(base); + + /* Set max requests inflight and allocate req_heads. */ + base->req_heads = NULL; + + evdns_base_set_max_requests_inflight(base, 64); + + base->server_head = NULL; + base->event_base = event_base; + base->global_good_nameservers = base->global_requests_inflight = + base->global_requests_waiting = 0; + + base->global_timeout.tv_sec = 5; + base->global_timeout.tv_usec = 0; + base->global_max_reissues = 1; + base->global_max_retransmits = 3; + base->global_max_nameserver_timeout = 3; + base->global_search_state = NULL; + base->global_randomize_case = 1; + base->global_getaddrinfo_allow_skew.tv_sec = 3; + base->global_getaddrinfo_allow_skew.tv_usec = 0; + base->global_nameserver_probe_initial_timeout.tv_sec = 10; + base->global_nameserver_probe_initial_timeout.tv_usec = 0; + + TAILQ_INIT(&base->hostsdb); + +#define EVDNS_BASE_ALL_FLAGS ( \ + EVDNS_BASE_INITIALIZE_NAMESERVERS | \ + EVDNS_BASE_DISABLE_WHEN_INACTIVE | \ + EVDNS_BASE_NAMESERVERS_NO_DEFAULT | \ + 0) + + if (flags & ~EVDNS_BASE_ALL_FLAGS) { + flags = EVDNS_BASE_INITIALIZE_NAMESERVERS; + log(EVDNS_LOG_WARN, + "Unrecognized flag passed to evdns_base_new(). Assuming " + "you meant EVDNS_BASE_INITIALIZE_NAMESERVERS."); + } +#undef EVDNS_BASE_ALL_FLAGS + + if (flags & EVDNS_BASE_INITIALIZE_NAMESERVERS) { + int r; + int opts = DNS_OPTIONS_ALL; + if (flags & EVDNS_BASE_NAMESERVERS_NO_DEFAULT) { + opts |= DNS_OPTION_NAMESERVERS_NO_DEFAULT; + } + +#ifdef _WIN32 + r = evdns_base_config_windows_nameservers(base); +#else + r = evdns_base_resolv_conf_parse(base, opts, "/etc/resolv.conf"); +#endif + if (r == -1) { + evdns_base_free_and_unlock(base, 0); + return NULL; + } + } + if (flags & EVDNS_BASE_DISABLE_WHEN_INACTIVE) { + base->disable_when_inactive = 1; + } + + EVDNS_UNLOCK(base); + return base; +} + +int +evdns_init(void) +{ + struct evdns_base *base = evdns_base_new(NULL, 1); + if (base) { + current_base = base; + return 0; + } else { + return -1; + } +} + +const char * +evdns_err_to_string(int err) +{ + switch (err) { + case DNS_ERR_NONE: return "no error"; + case DNS_ERR_FORMAT: return "misformatted query"; + case DNS_ERR_SERVERFAILED: return "server failed"; + case DNS_ERR_NOTEXIST: return "name does not exist"; + case DNS_ERR_NOTIMPL: return "query not implemented"; + case DNS_ERR_REFUSED: return "refused"; + + case DNS_ERR_TRUNCATED: return "reply truncated or ill-formed"; + case DNS_ERR_UNKNOWN: return "unknown"; + case DNS_ERR_TIMEOUT: return "request timed out"; + case DNS_ERR_SHUTDOWN: return "dns subsystem shut down"; + case DNS_ERR_CANCEL: return "dns request canceled"; + case DNS_ERR_NODATA: return "no records in the reply"; + default: return "[Unknown error code]"; + } +} + +static void +evdns_nameserver_free(struct nameserver *server) +{ + if (server->socket >= 0) + evutil_closesocket(server->socket); + (void) event_del(&server->event); + event_debug_unassign(&server->event); + if (server->state == 0) + (void) event_del(&server->timeout_event); + if (server->probe_request) { + evdns_cancel_request(server->base, server->probe_request); + server->probe_request = NULL; + } + event_debug_unassign(&server->timeout_event); + mm_free(server); +} + +static void +evdns_base_free_and_unlock(struct evdns_base *base, int fail_requests) +{ + struct nameserver *server, *server_next; + struct search_domain *dom, *dom_next; + int i; + + /* Requires that we hold the lock. */ + + /* TODO(nickm) we might need to refcount here. */ + + for (i = 0; i < base->n_req_heads; ++i) { + while (base->req_heads[i]) { + if (fail_requests) + reply_schedule_callback(base->req_heads[i], 0, DNS_ERR_SHUTDOWN, NULL); + request_finished(base->req_heads[i], &REQ_HEAD(base, base->req_heads[i]->trans_id), 1); + } + } + while (base->req_waiting_head) { + if (fail_requests) + reply_schedule_callback(base->req_waiting_head, 0, DNS_ERR_SHUTDOWN, NULL); + request_finished(base->req_waiting_head, &base->req_waiting_head, 1); + } + base->global_requests_inflight = base->global_requests_waiting = 0; + + for (server = base->server_head; server; server = server_next) { + server_next = server->next; + /** already done something before */ + server->probe_request = NULL; + evdns_nameserver_free(server); + if (server_next == base->server_head) + break; + } + base->server_head = NULL; + base->global_good_nameservers = 0; + + if (base->interface_name) + { + free(base->interface_name); + base->interface_name= NULL; + } + + if (base->global_search_state) { + for (dom = base->global_search_state->head; dom; dom = dom_next) { + dom_next = dom->next; + mm_free(dom); + } + mm_free(base->global_search_state); + base->global_search_state = NULL; + } + + { + struct hosts_entry *victim; + while ((victim = TAILQ_FIRST(&base->hostsdb))) { + TAILQ_REMOVE(&base->hostsdb, victim, next); + mm_free(victim); + } + } + + mm_free(base->req_heads); + + EVDNS_UNLOCK(base); + EVTHREAD_FREE_LOCK(base->lock, EVTHREAD_LOCKTYPE_RECURSIVE); + + mm_free(base); +} + +void +evdns_base_free(struct evdns_base *base, int fail_requests) +{ + EVDNS_LOCK(base); + evdns_base_free_and_unlock(base, fail_requests); +} + +void +evdns_base_clear_host_addresses(struct evdns_base *base) +{ + struct hosts_entry *victim; + EVDNS_LOCK(base); + while ((victim = TAILQ_FIRST(&base->hostsdb))) { + TAILQ_REMOVE(&base->hostsdb, victim, next); + mm_free(victim); + } + EVDNS_UNLOCK(base); +} + +int evdns_base_set_interface(struct evdns_base *base, char *interface_name) +{ + if (base->interface_name) + { + free(base->interface_name); + base->interface_name= NULL; + } + if (!interface_name) + return 0; + + base->interface_name= strdup(interface_name); + if (!base->interface_name) + return -1; + return 0; +} + +void +evdns_shutdown(int fail_requests) +{ + if (current_base) { + struct evdns_base *b = current_base; + current_base = NULL; + evdns_base_free(b, fail_requests); + } + evdns_log_fn = NULL; +} + +static int +evdns_base_parse_hosts_line(struct evdns_base *base, char *line) +{ + char *strtok_state; + static const char *const delims = " \t"; + char *const addr = strtok_r(line, delims, &strtok_state); + char *hostname, *hash; + struct sockaddr_storage ss; + int socklen = sizeof(ss); + ASSERT_LOCKED(base); + +#define NEXT_TOKEN strtok_r(NULL, delims, &strtok_state) + + if (!addr || *addr == '#') + return 0; + + memset(&ss, 0, sizeof(ss)); + if (evutil_parse_sockaddr_port(addr, (struct sockaddr*)&ss, &socklen)<0) + return -1; + if (socklen > (int)sizeof(struct sockaddr_in6)) + return -1; + + if (sockaddr_getport((struct sockaddr*)&ss)) + return -1; + + while ((hostname = NEXT_TOKEN)) { + struct hosts_entry *he; + size_t namelen; + if ((hash = strchr(hostname, '#'))) { + if (hash == hostname) + return 0; + *hash = '\0'; + } + + namelen = strlen(hostname); + + he = mm_calloc(1, sizeof(struct hosts_entry)+namelen); + if (!he) + return -1; + EVUTIL_ASSERT(socklen <= (int)sizeof(he->addr)); + memcpy(&he->addr, &ss, socklen); + memcpy(he->hostname, hostname, namelen+1); + he->addrlen = socklen; + + TAILQ_INSERT_TAIL(&base->hostsdb, he, next); + + if (hash) + return 0; + } + + return 0; +#undef NEXT_TOKEN +} + +static int +evdns_base_load_hosts_impl(struct evdns_base *base, const char *hosts_fname) +{ + char *str=NULL, *cp, *eol; + size_t len; + int err=0; + + ASSERT_LOCKED(base); + + { + struct hosts_entry *victim; + while ((victim = TAILQ_FIRST(&base->hostsdb))) { + TAILQ_REMOVE(&base->hostsdb, victim, next); + mm_free(victim); + } + } + + + if (hosts_fname == NULL || + (err = evutil_read_file_(hosts_fname, &str, &len, 0)) < 0) { + char tmp[64]; + strlcpy(tmp, "127.0.0.1 localhost", sizeof(tmp)); + evdns_base_parse_hosts_line(base, tmp); + strlcpy(tmp, "::1 localhost", sizeof(tmp)); + evdns_base_parse_hosts_line(base, tmp); + return err ? -1 : 0; + } + + /* This will break early if there is a NUL in the hosts file. + * Probably not a problem.*/ + cp = str; + for (;;) { + eol = strchr(cp, '\n'); + + if (eol) { + *eol = '\0'; + evdns_base_parse_hosts_line(base, cp); + cp = eol+1; + } else { + evdns_base_parse_hosts_line(base, cp); + break; + } + } + + mm_free(str); + return 0; +} + +int +evdns_base_load_hosts(struct evdns_base *base, const char *hosts_fname) +{ + int res; + if (!base) + base = current_base; + EVDNS_LOCK(base); + res = evdns_base_load_hosts_impl(base, hosts_fname); + EVDNS_UNLOCK(base); + return res; +} + +/* A single request for a getaddrinfo, either v4 or v6. */ +struct getaddrinfo_subrequest { + struct evdns_request *r; + ev_uint32_t type; +}; + +/* State data used to implement an in-progress getaddrinfo. */ +struct evdns_getaddrinfo_request { + struct evdns_base *evdns_base; + /* Copy of the modified 'hints' data that we'll use to build + * answers. */ + struct evutil_addrinfo hints; + /* The callback to invoke when we're done */ + evdns_getaddrinfo_cb user_cb; + /* User-supplied data to give to the callback. */ + void *user_data; + /* The port to use when building sockaddrs. */ + ev_uint16_t port; + /* The sub_request for an A record (if any) */ + struct getaddrinfo_subrequest ipv4_request; + /* The sub_request for an AAAA record (if any) */ + struct getaddrinfo_subrequest ipv6_request; + + /* The cname result that we were told (if any) */ + char *cname_result; + + /* If we have one request answered and one request still inflight, + * then this field holds the answer from the first request... */ + struct evutil_addrinfo *pending_result; + /* And this event is a timeout that will tell us to cancel the second + * request if it's taking a long time. */ + struct event timeout; + + /* And this field holds the error code from the first request... */ + int pending_error; + /* If this is set, the user canceled this request. */ + unsigned user_canceled : 1; + /* If this is set, the user can no longer cancel this request; we're + * just waiting for the free. */ + unsigned request_done : 1; +}; + +/* Convert an evdns errors to the equivalent getaddrinfo error. */ +static int +evdns_err_to_getaddrinfo_err(int e1) +{ + /* XXX Do this better! */ + if (e1 == DNS_ERR_NONE) + return 0; + else if (e1 == DNS_ERR_NOTEXIST) + return EVUTIL_EAI_NONAME; + else + return EVUTIL_EAI_FAIL; +} + +/* Return the more informative of two getaddrinfo errors. */ +static int +getaddrinfo_merge_err(int e1, int e2) +{ + /* XXXX be cleverer here. */ + if (e1 == 0) + return e2; + else + return e1; +} + +static void +free_getaddrinfo_request(struct evdns_getaddrinfo_request *data) +{ + /* DO NOT CALL this if either of the requests is pending. Only once + * both callbacks have been invoked is it safe to free the request */ + if (data->pending_result) + evutil_freeaddrinfo(data->pending_result); + if (data->cname_result) + mm_free(data->cname_result); + event_del(&data->timeout); + mm_free(data); + return; +} + +static void +add_cname_to_reply(struct evdns_getaddrinfo_request *data, + struct evutil_addrinfo *ai) +{ + if (data->cname_result && ai) { + ai->ai_canonname = data->cname_result; + data->cname_result = NULL; + } +} + +/* Callback: invoked when one request in a mixed-format A/AAAA getaddrinfo + * request has finished, but the other one took too long to answer. Pass + * along the answer we got, and cancel the other request. + */ +static void +evdns_getaddrinfo_timeout_cb(evutil_socket_t fd, short what, void *ptr) +{ + int v4_timedout = 0, v6_timedout = 0; + struct evdns_getaddrinfo_request *data = ptr; + + /* Cancel any pending requests, and note which one */ + if (data->ipv4_request.r) { + /* XXXX This does nothing if the request's callback is already + * running (pending_cb is set). */ + evdns_cancel_request(NULL, data->ipv4_request.r); + v4_timedout = 1; + EVDNS_LOCK(data->evdns_base); + ++data->evdns_base->getaddrinfo_ipv4_timeouts; + EVDNS_UNLOCK(data->evdns_base); + } + if (data->ipv6_request.r) { + /* XXXX This does nothing if the request's callback is already + * running (pending_cb is set). */ + evdns_cancel_request(NULL, data->ipv6_request.r); + v6_timedout = 1; + EVDNS_LOCK(data->evdns_base); + ++data->evdns_base->getaddrinfo_ipv6_timeouts; + EVDNS_UNLOCK(data->evdns_base); + } + + /* We only use this timeout callback when we have an answer for + * one address. */ + EVUTIL_ASSERT(!v4_timedout || !v6_timedout); + + /* Report the outcome of the other request that didn't time out. */ + if (data->pending_result) { + add_cname_to_reply(data, data->pending_result); + data->user_cb(0, data->pending_result, data->user_data); + data->pending_result = NULL; + } else { + int e = data->pending_error; + if (!e) + e = EVUTIL_EAI_AGAIN; + data->user_cb(e, NULL, data->user_data); + } + + data->user_cb = NULL; /* prevent double-call if evdns callbacks are + * in-progress. XXXX It would be better if this + * weren't necessary. */ + + if (!v4_timedout && !v6_timedout) { + /* should be impossible? XXXX */ + free_getaddrinfo_request(data); + } +} + +static int +evdns_getaddrinfo_set_timeout(struct evdns_base *evdns_base, + struct evdns_getaddrinfo_request *data) +{ + return event_add(&data->timeout, &evdns_base->global_getaddrinfo_allow_skew); +} + +static inline int +evdns_result_is_answer(int result) +{ + return (result != DNS_ERR_NOTIMPL && result != DNS_ERR_REFUSED && + result != DNS_ERR_SERVERFAILED && result != DNS_ERR_CANCEL); +} + +static void +evdns_getaddrinfo_gotresolve(int result, char type, int count, + int ttl, void *addresses, void *arg) +{ + int i; + struct getaddrinfo_subrequest *req = arg; + struct getaddrinfo_subrequest *other_req; + struct evdns_getaddrinfo_request *data; + + struct evutil_addrinfo *res; + + struct sockaddr_in sin; + struct sockaddr_in6 sin6; + struct sockaddr *sa; + int socklen, addrlen; + void *addrp; + int err; + int user_canceled; + + EVUTIL_ASSERT(req->type == DNS_IPv4_A || req->type == DNS_IPv6_AAAA); + if (req->type == DNS_IPv4_A) { + data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv4_request); + other_req = &data->ipv6_request; + } else { + data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv6_request); + other_req = &data->ipv4_request; + } + + /** Called from evdns_base_free() with @fail_requests == 1 */ + if (result != DNS_ERR_SHUTDOWN) { + EVDNS_LOCK(data->evdns_base); + if (evdns_result_is_answer(result)) { + if (req->type == DNS_IPv4_A) + ++data->evdns_base->getaddrinfo_ipv4_answered; + else + ++data->evdns_base->getaddrinfo_ipv6_answered; + } + user_canceled = data->user_canceled; + if (other_req->r == NULL) + data->request_done = 1; + EVDNS_UNLOCK(data->evdns_base); + } else { + data->evdns_base = NULL; + user_canceled = data->user_canceled; + } + + req->r = NULL; + + if (result == DNS_ERR_CANCEL && ! user_canceled) { + /* Internal cancel request from timeout or internal error. + * we already answered the user. */ + if (other_req->r == NULL) + free_getaddrinfo_request(data); + return; + } + + if (data->user_cb == NULL) { + /* We already answered. XXXX This shouldn't be needed; see + * comments in evdns_getaddrinfo_timeout_cb */ + free_getaddrinfo_request(data); + return; + } + + if (result == DNS_ERR_NONE) { + if (count == 0) + err = EVUTIL_EAI_NODATA; + else + err = 0; + } else { + err = evdns_err_to_getaddrinfo_err(result); + } + + if (err) { + /* Looks like we got an error. */ + if (other_req->r) { + /* The other request is still working; maybe it will + * succeed. */ + /* XXXX handle failure from set_timeout */ + if (result != DNS_ERR_SHUTDOWN) { + evdns_getaddrinfo_set_timeout(data->evdns_base, data); + } + data->pending_error = err; + return; + } + + if (user_canceled) { + data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data); + } else if (data->pending_result) { + /* If we have an answer waiting, and we weren't + * canceled, ignore this error. */ + add_cname_to_reply(data, data->pending_result); + data->user_cb(0, data->pending_result, data->user_data); + data->pending_result = NULL; + } else { + if (data->pending_error) + err = getaddrinfo_merge_err(err, + data->pending_error); + data->user_cb(err, NULL, data->user_data); + } + free_getaddrinfo_request(data); + return; + } else if (user_canceled) { + if (other_req->r) { + /* The other request is still working; let it hit this + * callback with EVUTIL_EAI_CANCEL callback and report + * the failure. */ + return; + } + data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data); + free_getaddrinfo_request(data); + return; + } + + /* Looks like we got some answers. We should turn them into addrinfos + * and then either queue those or return them all. */ + EVUTIL_ASSERT(type == DNS_IPv4_A || type == DNS_IPv6_AAAA); + + if (type == DNS_IPv4_A) { + memset(&sin, 0, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_port = htons(data->port); + + sa = (struct sockaddr *)&sin; + socklen = sizeof(sin); + addrlen = 4; + addrp = &sin.sin_addr.s_addr; + } else { + memset(&sin6, 0, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + sin6.sin6_port = htons(data->port); + + sa = (struct sockaddr *)&sin6; + socklen = sizeof(sin6); + addrlen = 16; + addrp = &sin6.sin6_addr.s6_addr; + } + + res = NULL; + for (i=0; i < count; ++i) { + struct evutil_addrinfo *ai; + memcpy(addrp, ((char*)addresses)+i*addrlen, addrlen); + ai = evutil_new_addrinfo_(sa, socklen, &data->hints); + if (!ai) { + if (other_req->r) { + evdns_cancel_request(NULL, other_req->r); + } + data->user_cb(EVUTIL_EAI_MEMORY, NULL, data->user_data); + if (res) + evutil_freeaddrinfo(res); + + if (other_req->r == NULL) + free_getaddrinfo_request(data); + return; + } + res = evutil_addrinfo_append_(res, ai); + } + + if (other_req->r) { + /* The other request is still in progress; wait for it */ + /* XXXX handle failure from set_timeout */ + evdns_getaddrinfo_set_timeout(data->evdns_base, data); + data->pending_result = res; + return; + } else { + /* The other request is done or never started; append its + * results (if any) and return them. */ + if (data->pending_result) { + if (req->type == DNS_IPv4_A) + res = evutil_addrinfo_append_(res, + data->pending_result); + else + res = evutil_addrinfo_append_( + data->pending_result, res); + data->pending_result = NULL; + } + + /* Call the user callback. */ + add_cname_to_reply(data, res); + data->user_cb(0, res, data->user_data); + + /* Free data. */ + free_getaddrinfo_request(data); + } +} + +static struct hosts_entry * +find_hosts_entry(struct evdns_base *base, const char *hostname, + struct hosts_entry *find_after) +{ + struct hosts_entry *e; + + if (find_after) + e = TAILQ_NEXT(find_after, next); + else + e = TAILQ_FIRST(&base->hostsdb); + + for (; e; e = TAILQ_NEXT(e, next)) { + if (!evutil_ascii_strcasecmp(e->hostname, hostname)) + return e; + } + return NULL; +} + +static int +evdns_getaddrinfo_fromhosts(struct evdns_base *base, + const char *nodename, struct evutil_addrinfo *hints, ev_uint16_t port, + struct evutil_addrinfo **res) +{ + int n_found = 0; + struct hosts_entry *e; + struct evutil_addrinfo *ai=NULL; + int f = hints->ai_family; + + EVDNS_LOCK(base); + for (e = find_hosts_entry(base, nodename, NULL); e; + e = find_hosts_entry(base, nodename, e)) { + struct evutil_addrinfo *ai_new; + ++n_found; + if ((e->addr.sa.sa_family == AF_INET && f == PF_INET6) || + (e->addr.sa.sa_family == AF_INET6 && f == PF_INET)) + continue; + ai_new = evutil_new_addrinfo_(&e->addr.sa, e->addrlen, hints); + if (!ai_new) { + n_found = 0; + goto out; + } + sockaddr_setport(ai_new->ai_addr, port); + ai = evutil_addrinfo_append_(ai, ai_new); + } + EVDNS_UNLOCK(base); +out: + if (n_found) { + /* Note that we return an empty answer if we found entries for + * this hostname but none were of the right address type. */ + *res = ai; + return 0; + } else { + if (ai) + evutil_freeaddrinfo(ai); + return -1; + } +} + +struct evdns_getaddrinfo_request * +evdns_getaddrinfo(struct evdns_base *dns_base, + const char *nodename, const char *servname, + const struct evutil_addrinfo *hints_in, + evdns_getaddrinfo_cb cb, void *arg) +{ + struct evdns_getaddrinfo_request *data; + struct evutil_addrinfo hints; + struct evutil_addrinfo *res = NULL; + int err; + int port = 0; + int want_cname = 0; + int started = 0; + + if (!dns_base) { + dns_base = current_base; + if (!dns_base) { + log(EVDNS_LOG_WARN, + "Call to getaddrinfo_async with no " + "evdns_base configured."); + cb(EVUTIL_EAI_FAIL, NULL, arg); /* ??? better error? */ + return NULL; + } + } + + /* If we _must_ answer this immediately, do so. */ + if ((hints_in && (hints_in->ai_flags & EVUTIL_AI_NUMERICHOST))) { + res = NULL; + err = evutil_getaddrinfo(nodename, servname, hints_in, &res); + cb(err, res, arg); + return NULL; + } + + if (hints_in) { + memcpy(&hints, hints_in, sizeof(hints)); + } else { + memset(&hints, 0, sizeof(hints)); + hints.ai_family = PF_UNSPEC; + } + + evutil_adjust_hints_for_addrconfig_(&hints); + + /* Now try to see if we _can_ answer immediately. */ + /* (It would be nice to do this by calling getaddrinfo directly, with + * AI_NUMERICHOST, on plaforms that have it, but we can't: there isn't + * a reliable way to distinguish the "that wasn't a numeric host!" case + * from any other EAI_NONAME cases.) */ + err = evutil_getaddrinfo_common_(nodename, servname, &hints, &res, &port); + if (err != EVUTIL_EAI_NEED_RESOLVE) { + cb(err, res, arg); + return NULL; + } + + /* If there is an entry in the hosts file, we should give it now. */ + if (!evdns_getaddrinfo_fromhosts(dns_base, nodename, &hints, port, &res)) { + cb(0, res, arg); + return NULL; + } + + /* Okay, things are serious now. We're going to need to actually + * launch a request. + */ + data = mm_calloc(1,sizeof(struct evdns_getaddrinfo_request)); + if (!data) { + cb(EVUTIL_EAI_MEMORY, NULL, arg); + return NULL; + } + + memcpy(&data->hints, &hints, sizeof(data->hints)); + data->port = (ev_uint16_t)port; + data->ipv4_request.type = DNS_IPv4_A; + data->ipv6_request.type = DNS_IPv6_AAAA; + data->user_cb = cb; + data->user_data = arg; + data->evdns_base = dns_base; + + want_cname = (hints.ai_flags & EVUTIL_AI_CANONNAME); + + /* If we are asked for a PF_UNSPEC address, we launch two requests in + * parallel: one for an A address and one for an AAAA address. We + * can't send just one request, since many servers only answer one + * question per DNS request. + * + * Once we have the answer to one request, we allow for a short + * timeout before we report it, to see if the other one arrives. If + * they both show up in time, then we report both the answers. + * + * If too many addresses of one type time out or fail, we should stop + * launching those requests. (XXX we don't do that yet.) + */ + + EVDNS_LOCK(dns_base); + + if (hints.ai_family != PF_INET6) { + log(EVDNS_LOG_DEBUG, "Sending request for %s on ipv4 as %p", + nodename, &data->ipv4_request); + + data->ipv4_request.r = evdns_base_resolve_ipv4(dns_base, + nodename, 0, evdns_getaddrinfo_gotresolve, + &data->ipv4_request); + if (want_cname && data->ipv4_request.r) + data->ipv4_request.r->current_req->put_cname_in_ptr = + &data->cname_result; + } + if (hints.ai_family != PF_INET) { + log(EVDNS_LOG_DEBUG, "Sending request for %s on ipv6 as %p", + nodename, &data->ipv6_request); + + data->ipv6_request.r = evdns_base_resolve_ipv6(dns_base, + nodename, 0, evdns_getaddrinfo_gotresolve, + &data->ipv6_request); + if (want_cname && data->ipv6_request.r) + data->ipv6_request.r->current_req->put_cname_in_ptr = + &data->cname_result; + } + + evtimer_assign(&data->timeout, dns_base->event_base, + evdns_getaddrinfo_timeout_cb, data); + + started = (data->ipv4_request.r || data->ipv6_request.r); + + EVDNS_UNLOCK(dns_base); + + if (started) { + return data; + } else { + mm_free(data); + cb(EVUTIL_EAI_FAIL, NULL, arg); + return NULL; + } +} + +void +evdns_getaddrinfo_cancel(struct evdns_getaddrinfo_request *data) +{ + EVDNS_LOCK(data->evdns_base); + if (data->request_done) { + EVDNS_UNLOCK(data->evdns_base); + return; + } + event_del(&data->timeout); + data->user_canceled = 1; + if (data->ipv4_request.r) + evdns_cancel_request(data->evdns_base, data->ipv4_request.r); + if (data->ipv6_request.r) + evdns_cancel_request(data->evdns_base, data->ipv6_request.r); + EVDNS_UNLOCK(data->evdns_base); +} diff --git a/probe-busybox/libevent-2.1.11-stable/event-config.h.cmake b/probe-busybox/libevent-2.1.11-stable/event-config.h.cmake new file mode 100644 index 00000000..498ab1ea --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/event-config.h.cmake @@ -0,0 +1,507 @@ +/* event-config.h + * + * This file was generated by cmake when the makefiles were generated. + * + * DO NOT EDIT THIS FILE. + * + * Do not rely on macros in this file existing in later versions. + */ +#ifndef EVENT2_EVENT_CONFIG_H_INCLUDED_ +#define EVENT2_EVENT_CONFIG_H_INCLUDED_ + +/* Numeric representation of the version */ +#define EVENT__NUMERIC_VERSION @EVENT_NUMERIC_VERSION@ +#define EVENT__PACKAGE_VERSION "@EVENT_PACKAGE_VERSION@" + +#define EVENT__VERSION_MAJOR @EVENT_VERSION_MAJOR@ +#define EVENT__VERSION_MINOR @EVENT_VERSION_MINOR@ +#define EVENT__VERSION_PATCH @EVENT_VERSION_PATCH@ + +/* Version number of package */ +#define EVENT__VERSION "@EVENT_VERSION@" + +/* Name of package */ +#define EVENT__PACKAGE "libevent" + +/* Define to the address where bug reports for this package should be sent. */ +#define EVENT__PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define EVENT__PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define EVENT__PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define EVENT__PACKAGE_TARNAME "" + +/* Define if libevent should build without support for a debug mode */ +#cmakedefine EVENT__DISABLE_DEBUG_MODE 1 + +/* Define if libevent should not allow replacing the mm functions */ +#cmakedefine EVENT__DISABLE_MM_REPLACEMENT 1 + +/* Define if libevent should not be compiled with thread support */ +#cmakedefine EVENT__DISABLE_THREAD_SUPPORT 1 + +/* Define to 1 if you have the `accept4' function. */ +#cmakedefine EVENT__HAVE_ACCEPT4 1 + +/* Define to 1 if you have the `arc4random' function. */ +#cmakedefine EVENT__HAVE_ARC4RANDOM 1 + +/* Define to 1 if you have the `arc4random_buf' function. */ +#cmakedefine EVENT__HAVE_ARC4RANDOM_BUF 1 + +/* Define to 1 if you have the `arc4random_addrandom' function. */ +#cmakedefine EVENT__HAVE_ARC4RANDOM_ADDRANDOM 1 + +/* Define if clock_gettime is available in libc */ +#cmakedefine EVENT__DNS_USE_CPU_CLOCK_FOR_ID 1 + +/* Define is no secure id variant is available */ +#cmakedefine EVENT__DNS_USE_GETTIMEOFDAY_FOR_ID 1 +#cmakedefine EVENT__DNS_USE_FTIME_FOR_ID 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the `clock_gettime' function. */ +#cmakedefine EVENT__HAVE_CLOCK_GETTIME 1 + +/* Define to 1 if you have the declaration of `CTL_KERN'. */ +#define EVENT__HAVE_DECL_CTL_KERN @EVENT__HAVE_DECL_CTL_KERN@ + +/* Define to 1 if you have the declaration of `KERN_ARND'. */ +#define EVENT__HAVE_DECL_KERN_ARND @EVENT__HAVE_DECL_KERN_ARND@ + +/* Define to 1 if you have the declaration of `KERN_RANDOM'. */ +#define EVENT__HAVE_DECL_KERN_RANDOM @EVENT__HAVE_DECL_KERN_RANDOM@ + +/* Define to 1 if you have the declaration of `RANDOM_UUID'. */ +#define EVENT__HAVE_DECL_RANDOM_UUID @EVENT__HAVE_DECL_RANDOM_UUID@ + +/* Define if /dev/poll is available */ +#cmakedefine EVENT__HAVE_DEVPOLL 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_NETDB_H 1 + +/* Define to 1 if fd_mask type is defined */ +#cmakedefine EVENT__HAVE_FD_MASK 1 + +/* Define to 1 if the header file defines TAILQ_FOREACH. */ +#cmakedefine EVENT__HAVE_TAILQFOREACH 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_DLFCN_H 1 + +/* Define if your system supports the epoll system calls */ +#cmakedefine EVENT__HAVE_EPOLL 1 + +/* Define to 1 if you have the `epoll_create1' function. */ +#cmakedefine EVENT__HAVE_EPOLL_CREATE1 1 + +/* Define to 1 if you have the `epoll_ctl' function. */ +#cmakedefine EVENT__HAVE_EPOLL_CTL 1 + +/* Define to 1 if you have the `eventfd' function. */ +#cmakedefine EVENT__HAVE_EVENTFD 1 + +/* Define if your system supports event ports */ +#cmakedefine EVENT__HAVE_EVENT_PORTS 1 + +/* Define to 1 if you have the `fcntl' function. */ +#cmakedefine EVENT__HAVE_FCNTL 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_FCNTL_H 1 + +/* Define to 1 if you have the `getaddrinfo' function. */ +#cmakedefine EVENT__HAVE_GETADDRINFO 1 + +/* Define to 1 if you have the `getegid' function. */ +#cmakedefine EVENT__HAVE_GETEGID 1 + +/* Define to 1 if you have the `geteuid' function. */ +#cmakedefine EVENT__HAVE_GETEUID 1 + +/* TODO: Check for different gethostname argument counts. CheckPrototypeDefinition.cmake can be used. */ +/* Define this if you have any gethostbyname_r() */ +#cmakedefine EVENT__HAVE_GETHOSTBYNAME_R 1 + +/* Define this if gethostbyname_r takes 3 arguments */ +#cmakedefine EVENT__HAVE_GETHOSTBYNAME_R_3_ARG 1 + +/* Define this if gethostbyname_r takes 5 arguments */ +#cmakedefine EVENT__HAVE_GETHOSTBYNAME_R_5_ARG 1 + +/* Define this if gethostbyname_r takes 6 arguments */ +#cmakedefine EVENT__HAVE_GETHOSTBYNAME_R_6_ARG 1 + +/* Define to 1 if you have the `getifaddrs' function. */ +#cmakedefine EVENT__HAVE_GETIFADDRS 1 + +/* Define to 1 if you have the `getnameinfo' function. */ +#cmakedefine EVENT__HAVE_GETNAMEINFO 1 + +/* Define to 1 if you have the `getprotobynumber' function. */ +#cmakedefine EVENT__HAVE_GETPROTOBYNUMBER 1 + +/* Define to 1 if you have the `getservbyname' function. */ +#cmakedefine EVENT__HAVE_GETSERVBYNAME 1 + +/* Define to 1 if you have the `gettimeofday' function. */ +#cmakedefine EVENT__HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_IFADDRS_H 1 + +/* Define to 1 if you have the `inet_ntop' function. */ +#cmakedefine EVENT__HAVE_INET_NTOP 1 + +/* Define to 1 if you have the `inet_pton' function. */ +#cmakedefine EVENT__HAVE_INET_PTON 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `issetugid' function. */ +#cmakedefine EVENT__HAVE_ISSETUGID 1 + +/* Define to 1 if you have the `kqueue' function. */ +#cmakedefine EVENT__HAVE_KQUEUE 1 + +/* Define if the system has zlib */ +#cmakedefine EVENT__HAVE_LIBZ 1 + +/* Define to 1 if you have the `mach_absolute_time' function. */ +#cmakedefine EVENT__HAVE_MACH_ABSOLUTE_TIME 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_MACH_MACH_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mmap' function. */ +#cmakedefine EVENT__HAVE_MMAP 1 + +/* Define to 1 if you have the `nanosleep' function. */ +#cmakedefine EVENT__HAVE_NANOSLEEP 1 + +/* Define to 1 if you have the `usleep' function. */ +#cmakedefine EVENT__HAVE_USLEEP 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_NETINET_IN6_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_NETINET_IN_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_NETINET_TCP_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_UN_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_AFUNIX_H 1 + +/* Define if the system has openssl */ +#cmakedefine EVENT__HAVE_OPENSSL 1 + +/* Define to 1 if you have the `pipe' function. */ +#cmakedefine EVENT__HAVE_PIPE 1 + +/* Define to 1 if you have the `pipe2' function. */ +#cmakedefine EVENT__HAVE_PIPE2 1 + +/* Define to 1 if you have the `poll' function. */ +#cmakedefine EVENT__HAVE_POLL 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_POLL_H 1 + +/* Define to 1 if you have the `port_create' function. */ +#cmakedefine EVENT__HAVE_PORT_CREATE 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_PORT_H 1 + +/* Define if we have pthreads on this system */ +#cmakedefine EVENT__HAVE_PTHREADS 1 + +/* Define to 1 if you have the `putenv' function. */ +#cmakedefine EVENT__HAVE_PUTENV 1 + +/* Define to 1 if the system has the type `sa_family_t'. */ +#cmakedefine EVENT__HAVE_SA_FAMILY_T 1 + +/* Define to 1 if you have the `select' function. */ +#cmakedefine EVENT__HAVE_SELECT 1 + +/* Define to 1 if you have the `setenv' function. */ +#cmakedefine EVENT__HAVE_SETENV 1 + +/* Define if F_SETFD is defined in */ +#cmakedefine EVENT__HAVE_SETFD 1 + +/* Define to 1 if you have the `setrlimit' function. */ +#cmakedefine EVENT__HAVE_SETRLIMIT 1 + +/* Define to 1 if you have the `sendfile' function. */ +#cmakedefine EVENT__HAVE_SENDFILE 1 + +/* Define to 1 if you have the `sigaction' function. */ +#cmakedefine EVENT__HAVE_SIGACTION 1 + +/* Define to 1 if you have the `signal' function. */ +#cmakedefine EVENT__HAVE_SIGNAL 1 + +/* Define to 1 if you have the `splice' function. */ +#cmakedefine EVENT__HAVE_SPLICE 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_STDARG_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_STRING_H 1 + +/* Define to 1 if you have the `strlcpy' function. */ +#cmakedefine EVENT__HAVE_STRLCPY 1 + +/* Define to 1 if you have the `strsep' function. */ +#cmakedefine EVENT__HAVE_STRSEP 1 + +/* Define to 1 if you have the `strtok_r' function. */ +#cmakedefine EVENT__HAVE_STRTOK_R 1 + +/* Define to 1 if you have the `strtoll' function. */ +#cmakedefine EVENT__HAVE_STRTOLL 1 + +/* Define to 1 if the system has the type `struct addrinfo'. */ +#cmakedefine EVENT__HAVE_STRUCT_ADDRINFO 1 + +/* Define to 1 if the system has the type `struct in6_addr'. */ +#cmakedefine EVENT__HAVE_STRUCT_IN6_ADDR 1 + +/* Define to 1 if `s6_addr16' is member of `struct in6_addr'. */ +#cmakedefine EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR16 1 + +/* Define to 1 if `s6_addr32' is member of `struct in6_addr'. */ +#cmakedefine EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR32 1 + +/* Define to 1 if the system has the type `struct sockaddr_in6'. */ +#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_IN6 1 + +/* Define to 1 if `sin6_len' is member of `struct sockaddr_in6'. */ +#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN 1 + +/* Define to 1 if `sin_len' is member of `struct sockaddr_in'. */ +#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 1 + +/* Define to 1 if the system has the type `struct sockaddr_un'. */ +#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_UN 1 + +/* Define to 1 if the system has the type `struct sockaddr_storage'. */ +#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_STORAGE 1 + +/* Define to 1 if `ss_family' is a member of `struct sockaddr_storage'. */ +#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY 1 + +/* Define to 1 if `__ss_family' is a member of `struct sockaddr_storage'. */ +#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY 1 + +/* Define to 1 if the system has the type `struct linger'. */ +#cmakedefine EVENT__HAVE_STRUCT_LINGER 1 + +/* Define to 1 if you have the `sysctl' function. */ +#cmakedefine EVENT__HAVE_SYSCTL 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_EPOLL_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_EVENTFD_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_EVENT_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_IOCTL_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_PARAM_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_QUEUE_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_RESOURCE_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_SENDFILE_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_SYSCTL_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_TIMERFD_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_UIO_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_SYS_WAIT_H 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_ERRNO_H 1 + +/* Define if TAILQ_FOREACH is defined in */ +#cmakedefine EVENT__HAVE_TAILQFOREACH 1 + +/* Define if timeradd is defined in */ +#cmakedefine EVENT__HAVE_TIMERADD 1 + +/* Define if timerclear is defined in */ +#cmakedefine EVENT__HAVE_TIMERCLEAR 1 + +/* Define if timercmp is defined in */ +#cmakedefine EVENT__HAVE_TIMERCMP 1 + + +/* Define to 1 if you have the `timerfd_create' function. */ +#cmakedefine EVENT__HAVE_TIMERFD_CREATE 1 + +/* Define if timerisset is defined in */ +#cmakedefine EVENT__HAVE_TIMERISSET 1 + +/* Define to 1 if the system has the type `uint8_t'. */ +#cmakedefine EVENT__HAVE_UINT8_T 1 + +/* Define to 1 if the system has the type `uint16_t'. */ +#cmakedefine EVENT__HAVE_UINT16_T 1 + +/* Define to 1 if the system has the type `uint32_t'. */ +#cmakedefine EVENT__HAVE_UINT32_T 1 + +/* Define to 1 if the system has the type `uint64_t'. */ +#cmakedefine EVENT__HAVE_UINT64_T 1 + +/* Define to 1 if the system has the type `uintptr_t'. */ +#cmakedefine EVENT__HAVE_UINTPTR_T 1 + +/* Define to 1 if you have the `umask' function. */ +#cmakedefine EVENT__HAVE_UMASK 1 + +/* Define to 1 if you have the header file. */ +#cmakedefine EVENT__HAVE_UNISTD_H 1 + +/* Define to 1 if you have the `unsetenv' function. */ +#cmakedefine EVENT__HAVE_UNSETENV 1 + +/* Define to 1 if you have the `vasprintf' function. */ +#cmakedefine EVENT__HAVE_VASPRINTF 1 + +/* Define if kqueue works correctly with pipes */ +#cmakedefine EVENT__HAVE_WORKING_KQUEUE 1 + +#ifdef __USE_UNUSED_DEFINITIONS__ +/* Define to necessary symbol if this constant uses a non-standard name on your system. */ +/* XXX: Hello, this isn't even used, nor is it defined anywhere... - Ellzey */ +#define EVENT__PTHREAD_CREATE_JOINABLE ${EVENT__PTHREAD_CREATE_JOINABLE} +#endif + +/* The size of `pthread_t', as computed by sizeof. */ +#define EVENT__SIZEOF_PTHREAD_T @EVENT__SIZEOF_PTHREAD_T@ + +/* The size of a `int', as computed by sizeof. */ +#define EVENT__SIZEOF_INT @EVENT__SIZEOF_INT@ + +/* The size of a `long', as computed by sizeof. */ +#define EVENT__SIZEOF_LONG @EVENT__SIZEOF_LONG@ + +/* The size of a `long long', as computed by sizeof. */ +#define EVENT__SIZEOF_LONG_LONG @EVENT__SIZEOF_LONG_LONG@ + +/* The size of `off_t', as computed by sizeof. */ +#define EVENT__SIZEOF_OFF_T @EVENT__SIZEOF_OFF_T@ + +#define EVENT__SIZEOF_SSIZE_T @EVENT__SIZEOF_SSIZE_T@ + + +/* The size of a `short', as computed by sizeof. */ +#define EVENT__SIZEOF_SHORT @EVENT__SIZEOF_SHORT@ + +/* The size of `size_t', as computed by sizeof. */ +#define EVENT__SIZEOF_SIZE_T @EVENT__SIZEOF_SIZE_T@ + +/* Define to 1 if you can safely include both and . */ +#cmakedefine EVENT__TIME_WITH_SYS_TIME 1 + +/* The size of `socklen_t', as computed by sizeof. */ +#define EVENT__SIZEOF_SOCKLEN_T @EVENT__SIZEOF_SOCKLEN_T@ + +/* The size of 'void *', as computer by sizeof */ +#define EVENT__SIZEOF_VOID_P @EVENT__SIZEOF_VOID_P@ + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#ifndef __cplusplus +/* why not c++? + * + * and are we really expected to use EVENT__inline everywhere, + * shouldn't we just do: + * ifdef EVENT__inline + * define inline EVENT__inline + * + * - Ellzey + */ + +#define EVENT__inline @EVENT__inline@ +#endif + +#cmakedefine EVENT__HAVE___func__ 1 +#cmakedefine EVENT__HAVE___FUNCTION__ 1 + +/* Define to `unsigned' if does not define. */ +#define EVENT__size_t @EVENT__size_t@ + +/* Define to unsigned int if you dont have it */ +#define EVENT__socklen_t @EVENT__socklen_t@ + +/* Define to `int' if does not define. */ +#define EVENT__ssize_t @EVENT__ssize_t@ + +#endif /* \EVENT2_EVENT_CONFIG_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/event-internal.h b/probe-busybox/libevent-2.1.11-stable/event-internal.h new file mode 100644 index 00000000..92941b71 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/event-internal.h @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT_INTERNAL_H_INCLUDED_ +#define EVENT_INTERNAL_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include +#include +#include "event2/event_struct.h" +#include "minheap-internal.h" +#include "evsignal-internal.h" +#include "mm-internal.h" +#include "defer-internal.h" + +/* map union members back */ + +/* mutually exclusive */ +#define ev_signal_next ev_.ev_signal.ev_signal_next +#define ev_io_next ev_.ev_io.ev_io_next +#define ev_io_timeout ev_.ev_io.ev_timeout + +/* used only by signals */ +#define ev_ncalls ev_.ev_signal.ev_ncalls +#define ev_pncalls ev_.ev_signal.ev_pncalls + +#define ev_pri ev_evcallback.evcb_pri +#define ev_flags ev_evcallback.evcb_flags +#define ev_closure ev_evcallback.evcb_closure +#define ev_callback ev_evcallback.evcb_cb_union.evcb_callback +#define ev_arg ev_evcallback.evcb_arg + +/** @name Event closure codes + + Possible values for evcb_closure in struct event_callback + + @{ + */ +/** A regular event. Uses the evcb_callback callback */ +#define EV_CLOSURE_EVENT 0 +/** A signal event. Uses the evcb_callback callback */ +#define EV_CLOSURE_EVENT_SIGNAL 1 +/** A persistent non-signal event. Uses the evcb_callback callback */ +#define EV_CLOSURE_EVENT_PERSIST 2 +/** A simple callback. Uses the evcb_selfcb callback. */ +#define EV_CLOSURE_CB_SELF 3 +/** A finalizing callback. Uses the evcb_cbfinalize callback. */ +#define EV_CLOSURE_CB_FINALIZE 4 +/** A finalizing event. Uses the evcb_evfinalize callback. */ +#define EV_CLOSURE_EVENT_FINALIZE 5 +/** A finalizing event that should get freed after. Uses the evcb_evfinalize + * callback. */ +#define EV_CLOSURE_EVENT_FINALIZE_FREE 6 +/** @} */ + +/** Structure to define the backend of a given event_base. */ +struct eventop { + /** The name of this backend. */ + const char *name; + /** Function to set up an event_base to use this backend. It should + * create a new structure holding whatever information is needed to + * run the backend, and return it. The returned pointer will get + * stored by event_init into the event_base.evbase field. On failure, + * this function should return NULL. */ + void *(*init)(struct event_base *); + /** Enable reading/writing on a given fd or signal. 'events' will be + * the events that we're trying to enable: one or more of EV_READ, + * EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that + * were enabled on this fd previously. 'fdinfo' will be a structure + * associated with the fd by the evmap; its size is defined by the + * fdinfo field below. It will be set to 0 the first time the fd is + * added. The function should return 0 on success and -1 on error. + */ + int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); + /** As "add", except 'events' contains the events we mean to disable. */ + int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); + /** Function to implement the core of an event loop. It must see which + added events are ready, and cause event_active to be called for each + active event (usually via event_io_active or such). It should + return 0 on success and -1 on error. + */ + int (*dispatch)(struct event_base *, struct timeval *); + /** Function to clean up and free our data from the event_base. */ + void (*dealloc)(struct event_base *); + /** Flag: set if we need to reinitialize the event base after we fork. + */ + int need_reinit; + /** Bit-array of supported event_method_features that this backend can + * provide. */ + enum event_method_feature features; + /** Length of the extra information we should record for each fd that + has one or more active events. This information is recorded + as part of the evmap entry for each fd, and passed as an argument + to the add and del functions above. + */ + size_t fdinfo_len; +}; + +#ifdef _WIN32 +/* If we're on win32, then file descriptors are not nice low densely packed + integers. Instead, they are pointer-like windows handles, and we want to + use a hashtable instead of an array to map fds to events. +*/ +#define EVMAP_USE_HT +#endif + +/* #define HT_CACHE_HASH_VALS */ + +#ifdef EVMAP_USE_HT +#define HT_NO_CACHE_HASH_VALUES +#include "ht-internal.h" +struct event_map_entry; +HT_HEAD(event_io_map, event_map_entry); +#else +#define event_io_map event_signal_map +#endif + +/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not + defined, this structure is also used as event_io_map, which maps fds to a + list of events. +*/ +struct event_signal_map { + /* An array of evmap_io * or of evmap_signal *; empty entries are + * set to NULL. */ + void **entries; + /* The number of entries available in entries */ + int nentries; +}; + +/* A list of events waiting on a given 'common' timeout value. Ordinarily, + * events waiting for a timeout wait on a minheap. Sometimes, however, a + * queue can be faster. + **/ +struct common_timeout_list { + /* List of events currently waiting in the queue. */ + struct event_list events; + /* 'magic' timeval used to indicate the duration of events in this + * queue. */ + struct timeval duration; + /* Event that triggers whenever one of the events in the queue is + * ready to activate */ + struct event timeout_event; + /* The event_base that this timeout list is part of */ + struct event_base *base; +}; + +/** Mask used to get the real tv_usec value from a common timeout. */ +#define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff + +struct event_change; + +/* List of 'changes' since the last call to eventop.dispatch. Only maintained + * if the backend is using changesets. */ +struct event_changelist { + struct event_change *changes; + int n_changes; + int changes_size; +}; + +#ifndef EVENT__DISABLE_DEBUG_MODE +/* Global internal flag: set to one if debug mode is on. */ +extern int event_debug_mode_on_; +#define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_) +#else +#define EVENT_DEBUG_MODE_IS_ON() (0) +#endif + +TAILQ_HEAD(evcallback_list, event_callback); + +/* Sets up an event for processing once */ +struct event_once { + LIST_ENTRY(event_once) next_once; + struct event ev; + + void (*cb)(evutil_socket_t, short, void *); + void *arg; +}; + +struct event_base { + /** Function pointers and other data to describe this event_base's + * backend. */ + const struct eventop *evsel; + /** Pointer to backend-specific data. */ + void *evbase; + + /** List of changes to tell backend about at next dispatch. Only used + * by the O(1) backends. */ + struct event_changelist changelist; + + /** Function pointers used to describe the backend that this event_base + * uses for signals */ + const struct eventop *evsigsel; + /** Data to implement the common signal handler code. */ + struct evsig_info sig; + + /** Number of virtual events */ + int virtual_event_count; + /** Maximum number of virtual events active */ + int virtual_event_count_max; + /** Number of total events added to this event_base */ + int event_count; + /** Maximum number of total events added to this event_base */ + int event_count_max; + /** Number of total events active in this event_base */ + int event_count_active; + /** Maximum number of total events active in this event_base */ + int event_count_active_max; + + /** Set if we should terminate the loop once we're done processing + * events. */ + int event_gotterm; + /** Set if we should terminate the loop immediately */ + int event_break; + /** Set if we should start a new instance of the loop immediately. */ + int event_continue; + + /** The currently running priority of events */ + int event_running_priority; + + /** Set if we're running the event_base_loop function, to prevent + * reentrant invocation. */ + int running_loop; + + /** Set to the number of deferred_cbs we've made 'active' in the + * loop. This is a hack to prevent starvation; it would be smarter + * to just use event_config_set_max_dispatch_interval's max_callbacks + * feature */ + int n_deferreds_queued; + + /* Active event management. */ + /** An array of nactivequeues queues for active event_callbacks (ones + * that have triggered, and whose callbacks need to be called). Low + * priority numbers are more important, and stall higher ones. + */ + struct evcallback_list *activequeues; + /** The length of the activequeues array */ + int nactivequeues; + /** A list of event_callbacks that should become active the next time + * we process events, but not this time. */ + struct evcallback_list active_later_queue; + + /* common timeout logic */ + + /** An array of common_timeout_list* for all of the common timeout + * values we know. */ + struct common_timeout_list **common_timeout_queues; + /** The number of entries used in common_timeout_queues */ + int n_common_timeouts; + /** The total size of common_timeout_queues. */ + int n_common_timeouts_allocated; + + /** Mapping from file descriptors to enabled (added) events */ + struct event_io_map io; + + /** Mapping from signal numbers to enabled (added) events. */ + struct event_signal_map sigmap; + + /** Priority queue of events with timeouts. */ + struct min_heap timeheap; + + /** Stored timeval: used to avoid calling gettimeofday/clock_gettime + * too often. */ + struct timeval tv_cache; + + struct evutil_monotonic_timer monotonic_timer; + + /** Difference between internal time (maybe from clock_gettime) and + * gettimeofday. */ + struct timeval tv_clock_diff; + /** Second in which we last updated tv_clock_diff, in monotonic time. */ + time_t last_updated_clock_diff; + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + /* threading support */ + /** The thread currently running the event_loop for this base */ + unsigned long th_owner_id; + /** A lock to prevent conflicting accesses to this event_base */ + void *th_base_lock; + /** A condition that gets signalled when we're done processing an + * event with waiters on it. */ + void *current_event_cond; + /** Number of threads blocking on current_event_cond. */ + int current_event_waiters; +#endif + /** The event whose callback is executing right now */ + struct event_callback *current_event; + +#ifdef _WIN32 + /** IOCP support structure, if IOCP is enabled. */ + struct event_iocp_port *iocp; +#endif + + /** Flags that this base was configured with */ + enum event_base_config_flag flags; + + struct timeval max_dispatch_time; + int max_dispatch_callbacks; + int limit_callbacks_after_prio; + + /* Notify main thread to wake up break, etc. */ + /** True if the base already has a pending notify, and we don't need + * to add any more. */ + int is_notify_pending; + /** A socketpair used by some th_notify functions to wake up the main + * thread. */ + evutil_socket_t th_notify_fd[2]; + /** An event used by some th_notify functions to wake up the main + * thread. */ + struct event th_notify; + /** A function used to wake up the main thread from another thread. */ + int (*th_notify_fn)(struct event_base *base); + + /** Saved seed for weak random number generator. Some backends use + * this to produce fairness among sockets. Protected by th_base_lock. */ + struct evutil_weakrand_state weakrand_seed; + + /** List of event_onces that have not yet fired. */ + LIST_HEAD(once_event_list, event_once) once_events; + +}; + +struct event_config_entry { + TAILQ_ENTRY(event_config_entry) next; + + const char *avoid_method; +}; + +/** Internal structure: describes the configuration we want for an event_base + * that we're about to allocate. */ +struct event_config { + TAILQ_HEAD(event_configq, event_config_entry) entries; + + int n_cpus_hint; + struct timeval max_dispatch_interval; + int max_dispatch_callbacks; + int limit_callbacks_after_prio; + enum event_method_feature require_features; + enum event_base_config_flag flags; +}; + +/* Internal use only: Functions that might be missing from */ +#ifndef LIST_END +#define LIST_END(head) NULL +#endif + +#ifndef TAILQ_FIRST +#define TAILQ_FIRST(head) ((head)->tqh_first) +#endif +#ifndef TAILQ_END +#define TAILQ_END(head) NULL +#endif +#ifndef TAILQ_NEXT +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) +#endif + +#ifndef TAILQ_FOREACH +#define TAILQ_FOREACH(var, head, field) \ + for ((var) = TAILQ_FIRST(head); \ + (var) != TAILQ_END(head); \ + (var) = TAILQ_NEXT(var, field)) +#endif + +#ifndef TAILQ_INSERT_BEFORE +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (0) +#endif + +#define N_ACTIVE_CALLBACKS(base) \ + ((base)->event_count_active) + +int evsig_set_handler_(struct event_base *base, int evsignal, + void (*fn)(int)); +int evsig_restore_handler_(struct event_base *base, int evsignal); + +int event_add_nolock_(struct event *ev, + const struct timeval *tv, int tv_is_absolute); +/** Argument for event_del_nolock_. Tells event_del not to block on the event + * if it's running in another thread. */ +#define EVENT_DEL_NOBLOCK 0 +/** Argument for event_del_nolock_. Tells event_del to block on the event + * if it's running in another thread, regardless of its value for EV_FINALIZE + */ +#define EVENT_DEL_BLOCK 1 +/** Argument for event_del_nolock_. Tells event_del to block on the event + * if it is running in another thread and it doesn't have EV_FINALIZE set. + */ +#define EVENT_DEL_AUTOBLOCK 2 +/** Argument for event_del_nolock_. Tells event_del to procede even if the + * event is set up for finalization rather for regular use.*/ +#define EVENT_DEL_EVEN_IF_FINALIZING 3 +int event_del_nolock_(struct event *ev, int blocking); +int event_remove_timer_nolock_(struct event *ev); + +void event_active_nolock_(struct event *ev, int res, short count); +EVENT2_EXPORT_SYMBOL +int event_callback_activate_(struct event_base *, struct event_callback *); +int event_callback_activate_nolock_(struct event_base *, struct event_callback *); +int event_callback_cancel_(struct event_base *base, + struct event_callback *evcb); + +void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)); +EVENT2_EXPORT_SYMBOL +void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)); +int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *)); + + +EVENT2_EXPORT_SYMBOL +void event_active_later_(struct event *ev, int res); +void event_active_later_nolock_(struct event *ev, int res); +int event_callback_activate_later_nolock_(struct event_base *base, + struct event_callback *evcb); +int event_callback_cancel_nolock_(struct event_base *base, + struct event_callback *evcb, int even_if_finalizing); +void event_callback_init_(struct event_base *base, + struct event_callback *cb); + +/* FIXME document. */ +EVENT2_EXPORT_SYMBOL +void event_base_add_virtual_(struct event_base *base); +void event_base_del_virtual_(struct event_base *base); + +/** For debugging: unless assertions are disabled, verify the referential + integrity of the internal data structures of 'base'. This operation can + be expensive. + + Returns on success; aborts on failure. +*/ +EVENT2_EXPORT_SYMBOL +void event_base_assert_ok_(struct event_base *base); +void event_base_assert_ok_nolock_(struct event_base *base); + + +/* Helper function: Call 'fn' exactly once every inserted or active event in + * the event_base 'base'. + * + * If fn returns 0, continue on to the next event. Otherwise, return the same + * value that fn returned. + * + * Requires that 'base' be locked. + */ +int event_base_foreach_event_nolock_(struct event_base *base, + event_base_foreach_event_cb cb, void *arg); + +/* Cleanup function to reset debug mode during shutdown. + * + * Calling this function doesn't mean it'll be possible to re-enable + * debug mode if any events were added. + */ +void event_disable_debug_mode(void); + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/event.3 b/probe-busybox/libevent-2.1.11-stable/event.3 new file mode 100644 index 00000000..655a823e --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/event.3 @@ -0,0 +1,624 @@ +.\" $OpenBSD: event.3,v 1.4 2002/07/12 18:50:48 provos Exp $ +.\" +.\" Copyright (c) 2000 Artur Grabowski +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" 3. The name of the author may not be used to endorse or promote products +.\" derived from this software without specific prior written permission. +.\" +.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, +.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +.\" EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +.\" +.Dd August 8, 2000 +.Dt EVENT 3 +.Os +.Sh NAME +.Nm event_init , +.Nm event_dispatch , +.Nm event_loop , +.Nm event_loopexit , +.Nm event_loopbreak , +.Nm event_set , +.Nm event_base_dispatch , +.Nm event_base_loop , +.Nm event_base_loopexit , +.Nm event_base_loopbreak , +.Nm event_base_set , +.Nm event_base_free , +.Nm event_add , +.Nm event_del , +.Nm event_once , +.Nm event_base_once , +.Nm event_pending , +.Nm event_initialized , +.Nm event_priority_init , +.Nm event_priority_set , +.Nm evtimer_set , +.Nm evtimer_add , +.Nm evtimer_del , +.Nm evtimer_pending , +.Nm evtimer_initialized , +.Nm signal_set , +.Nm signal_add , +.Nm signal_del , +.Nm signal_pending , +.Nm signal_initialized , +.Nm bufferevent_new , +.Nm bufferevent_free , +.Nm bufferevent_write , +.Nm bufferevent_write_buffer , +.Nm bufferevent_read , +.Nm bufferevent_enable , +.Nm bufferevent_disable , +.Nm bufferevent_settimeout , +.Nm bufferevent_base_set , +.Nm evbuffer_new , +.Nm evbuffer_free , +.Nm evbuffer_add , +.Nm evbuffer_add_buffer , +.Nm evbuffer_add_printf , +.Nm evbuffer_add_vprintf , +.Nm evbuffer_drain , +.Nm evbuffer_write , +.Nm evbuffer_read , +.Nm evbuffer_find , +.Nm evbuffer_readline , +.Nm evhttp_new , +.Nm evhttp_bind_socket , +.Nm evhttp_free +.Nd execute a function when a specific event occurs +.Sh SYNOPSIS +.Fd #include +.Fd #include +.Ft "struct event_base *" +.Fn "event_init" "void" +.Ft int +.Fn "event_dispatch" "void" +.Ft int +.Fn "event_loop" "int flags" +.Ft int +.Fn "event_loopexit" "struct timeval *tv" +.Ft int +.Fn "event_loopbreak" "void" +.Ft void +.Fn "event_set" "struct event *ev" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" +.Ft int +.Fn "event_base_dispatch" "struct event_base *base" +.Ft int +.Fn "event_base_loop" "struct event_base *base" "int flags" +.Ft int +.Fn "event_base_loopexit" "struct event_base *base" "struct timeval *tv" +.Ft int +.Fn "event_base_loopbreak" "struct event_base *base" +.Ft int +.Fn "event_base_set" "struct event_base *base" "struct event *" +.Ft void +.Fn "event_base_free" "struct event_base *base" +.Ft int +.Fn "event_add" "struct event *ev" "struct timeval *tv" +.Ft int +.Fn "event_del" "struct event *ev" +.Ft int +.Fn "event_once" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv" +.Ft int +.Fn "event_base_once" "struct event_base *base" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv" +.Ft int +.Fn "event_pending" "struct event *ev" "short event" "struct timeval *tv" +.Ft int +.Fn "event_initialized" "struct event *ev" +.Ft int +.Fn "event_priority_init" "int npriorities" +.Ft int +.Fn "event_priority_set" "struct event *ev" "int priority" +.Ft void +.Fn "evtimer_set" "struct event *ev" "void (*fn)(int, short, void *)" "void *arg" +.Ft void +.Fn "evtimer_add" "struct event *ev" "struct timeval *" +.Ft void +.Fn "evtimer_del" "struct event *ev" +.Ft int +.Fn "evtimer_pending" "struct event *ev" "struct timeval *tv" +.Ft int +.Fn "evtimer_initialized" "struct event *ev" +.Ft void +.Fn "signal_set" "struct event *ev" "int signal" "void (*fn)(int, short, void *)" "void *arg" +.Ft void +.Fn "signal_add" "struct event *ev" "struct timeval *" +.Ft void +.Fn "signal_del" "struct event *ev" +.Ft int +.Fn "signal_pending" "struct event *ev" "struct timeval *tv" +.Ft int +.Fn "signal_initialized" "struct event *ev" +.Ft "struct bufferevent *" +.Fn "bufferevent_new" "int fd" "evbuffercb readcb" "evbuffercb writecb" "everrorcb" "void *cbarg" +.Ft void +.Fn "bufferevent_free" "struct bufferevent *bufev" +.Ft int +.Fn "bufferevent_write" "struct bufferevent *bufev" "void *data" "size_t size" +.Ft int +.Fn "bufferevent_write_buffer" "struct bufferevent *bufev" "struct evbuffer *buf" +.Ft size_t +.Fn "bufferevent_read" "struct bufferevent *bufev" "void *data" "size_t size" +.Ft int +.Fn "bufferevent_enable" "struct bufferevent *bufev" "short event" +.Ft int +.Fn "bufferevent_disable" "struct bufferevent *bufev" "short event" +.Ft void +.Fn "bufferevent_settimeout" "struct bufferevent *bufev" "int timeout_read" "int timeout_write" +.Ft int +.Fn "bufferevent_base_set" "struct event_base *base" "struct bufferevent *bufev" +.Ft "struct evbuffer *" +.Fn "evbuffer_new" "void" +.Ft void +.Fn "evbuffer_free" "struct evbuffer *buf" +.Ft int +.Fn "evbuffer_add" "struct evbuffer *buf" "const void *data" "size_t size" +.Ft int +.Fn "evbuffer_add_buffer" "struct evbuffer *dst" "struct evbuffer *src" +.Ft int +.Fn "evbuffer_add_printf" "struct evbuffer *buf" "const char *fmt" "..." +.Ft int +.Fn "evbuffer_add_vprintf" "struct evbuffer *buf" "const char *fmt" "va_list ap" +.Ft void +.Fn "evbuffer_drain" "struct evbuffer *buf" "size_t size" +.Ft int +.Fn "evbuffer_write" "struct evbuffer *buf" "int fd" +.Ft int +.Fn "evbuffer_read" "struct evbuffer *buf" "int fd" "int size" +.Ft "unsigned char *" +.Fn "evbuffer_find" "struct evbuffer *buf" "const unsigned char *data" "size_t size" +.Ft "char *" +.Fn "evbuffer_readline" "struct evbuffer *buf" +.Ft "struct evhttp *" +.Fn "evhttp_new" "struct event_base *base" +.Ft int +.Fn "evhttp_bind_socket" "struct evhttp *http" "const char *address" "unsigned short port" +.Ft "void" +.Fn "evhttp_free" "struct evhttp *http" +.Ft int +.Fa (*event_sigcb)(void) ; +.Ft volatile sig_atomic_t +.Fa event_gotsig ; +.Sh DESCRIPTION +The +.Nm event +API provides a mechanism to execute a function when a specific event +on a file descriptor occurs or after a given time has passed. +.Pp +The +.Nm event +API needs to be initialized with +.Fn event_init +before it can be used. +.Pp +In order to process events, an application needs to call +.Fn event_dispatch . +This function only returns on error, and should replace the event core +of the application program. +.Pp +The function +.Fn event_set +prepares the event structure +.Fa ev +to be used in future calls to +.Fn event_add +and +.Fn event_del . +The event will be prepared to call the function specified by the +.Fa fn +argument with an +.Fa int +argument indicating the file descriptor, a +.Fa short +argument indicating the type of event, and a +.Fa void * +argument given in the +.Fa arg +argument. +The +.Fa fd +indicates the file descriptor that should be monitored for events. +The events can be either +.Va EV_READ , +.Va EV_WRITE , +or both, +indicating that an application can read or write from the file descriptor +respectively without blocking. +.Pp +The function +.Fa fn +will be called with the file descriptor that triggered the event and +the type of event which will be either +.Va EV_TIMEOUT , +.Va EV_SIGNAL , +.Va EV_READ , +or +.Va EV_WRITE . +Additionally, an event which has registered interest in more than one of the +preceeding events, via bitwise-OR to +.Fn event_set , +can provide its callback function with a bitwise-OR of more than one triggered +event. +The additional flag +.Va EV_PERSIST +makes an +.Fn event_add +persistent until +.Fn event_del +has been called. +.Pp +Once initialized, the +.Fa ev +structure can be used repeatedly with +.Fn event_add +and +.Fn event_del +and does not need to be reinitialized unless the function called and/or +the argument to it are to be changed. +However, when an +.Fa ev +structure has been added to libevent using +.Fn event_add +the structure must persist until the event occurs (assuming +.Fa EV_PERSIST +is not set) or is removed +using +.Fn event_del . +You may not reuse the same +.Fa ev +structure for multiple monitored descriptors; each descriptor +needs its own +.Fa ev . +.Pp +The function +.Fn event_add +schedules the execution of the +.Fa ev +event when the event specified in +.Fn event_set +occurs or in at least the time specified in the +.Fa tv . +If +.Fa tv +is +.Dv NULL , +no timeout occurs and the function will only be called +if a matching event occurs on the file descriptor. +The event in the +.Fa ev +argument must be already initialized by +.Fn event_set +and may not be used in calls to +.Fn event_set +until it has timed out or been removed with +.Fn event_del . +If the event in the +.Fa ev +argument already has a scheduled timeout, the old timeout will be +replaced by the new one. +.Pp +The function +.Fn event_del +will cancel the event in the argument +.Fa ev . +If the event has already executed or has never been added +the call will have no effect. +.Pp +The functions +.Fn evtimer_set , +.Fn evtimer_add , +.Fn evtimer_del , +.Fn evtimer_initialized , +and +.Fn evtimer_pending +are abbreviations for common situations where only a timeout is required. +The file descriptor passed will be \-1, and the event type will be +.Va EV_TIMEOUT . +.Pp +The functions +.Fn signal_set , +.Fn signal_add , +.Fn signal_del , +.Fn signal_initialized , +and +.Fn signal_pending +are abbreviations. +The event type will be a persistent +.Va EV_SIGNAL . +That means +.Fn signal_set +adds +.Va EV_PERSIST . +.Pp +In order to avoid races in signal handlers, the +.Nm event +API provides two variables: +.Va event_sigcb +and +.Va event_gotsig . +A signal handler +sets +.Va event_gotsig +to indicate that a signal has been received. +The application sets +.Va event_sigcb +to a callback function. +After the signal handler sets +.Va event_gotsig , +.Nm event_dispatch +will execute the callback function to process received signals. +The callback returns 1 when no events are registered any more. +It can return \-1 to indicate an error to the +.Nm event +library, causing +.Fn event_dispatch +to terminate with +.Va errno +set to +.Er EINTR . +.Pp +The function +.Fn event_once +is similar to +.Fn event_set . +However, it schedules a callback to be called exactly once and does not +require the caller to prepare an +.Fa event +structure. +This function supports +.Fa EV_TIMEOUT , +.Fa EV_READ , +and +.Fa EV_WRITE . +.Pp +The +.Fn event_pending +function can be used to check if the event specified by +.Fa event +is pending to run. +If +.Va EV_TIMEOUT +was specified and +.Fa tv +is not +.Dv NULL , +the expiration time of the event will be returned in +.Fa tv . +.Pp +The +.Fn event_initialized +macro can be used to check if an event has been initialized. +.Pp +The +.Nm event_loop +function provides an interface for single pass execution of pending +events. +The flags +.Va EVLOOP_ONCE +and +.Va EVLOOP_NONBLOCK +are recognized. +The +.Nm event_loopexit +function exits from the event loop. The next +.Fn event_loop +iteration after the +given timer expires will complete normally (handling all queued events) then +exit without blocking for events again. Subsequent invocations of +.Fn event_loop +will proceed normally. +The +.Nm event_loopbreak +function exits from the event loop immediately. +.Fn event_loop +will abort after the next event is completed; +.Fn event_loopbreak +is typically invoked from this event's callback. This behavior is analogous +to the "break;" statement. Subsequent invocations of +.Fn event_loop +will proceed normally. +.Pp +It is the responsibility of the caller to provide these functions with +pre-allocated event structures. +.Pp +.Sh EVENT PRIORITIES +By default +.Nm libevent +schedules all active events with the same priority. +However, sometimes it is desirable to process some events with a higher +priority than others. +For that reason, +.Nm libevent +supports strict priority queues. +Active events with a lower priority are always processed before events +with a higher priority. +.Pp +The number of different priorities can be set initially with the +.Fn event_priority_init +function. +This function should be called before the first call to +.Fn event_dispatch . +The +.Fn event_priority_set +function can be used to assign a priority to an event. +By default, +.Nm libevent +assigns the middle priority to all events unless their priority +is explicitly set. +.Sh THREAD SAFE EVENTS +.Nm Libevent +has experimental support for thread-safe events. +When initializing the library via +.Fn event_init , +an event base is returned. +This event base can be used in conjunction with calls to +.Fn event_base_set , +.Fn event_base_dispatch , +.Fn event_base_loop , +.Fn event_base_loopexit , +.Fn bufferevent_base_set +and +.Fn event_base_free . +.Fn event_base_set +should be called after preparing an event with +.Fn event_set , +as +.Fn event_set +assigns the provided event to the most recently created event base. +.Fn bufferevent_base_set +should be called after preparing a bufferevent with +.Fn bufferevent_new . +.Fn event_base_free +should be used to free memory associated with the event base +when it is no longer needed. +.Sh BUFFERED EVENTS +.Nm libevent +provides an abstraction on top of the regular event callbacks. +This abstraction is called a +.Va "buffered event" . +A buffered event provides input and output buffers that get filled +and drained automatically. +The user of a buffered event no longer deals directly with the IO, +but instead is reading from input and writing to output buffers. +.Pp +A new bufferevent is created by +.Fn bufferevent_new . +The parameter +.Fa fd +specifies the file descriptor from which data is read and written to. +This file descriptor is not allowed to be a +.Xr pipe 2 . +The next three parameters are callbacks. +The read and write callback have the following form: +.Ft void +.Fn "(*cb)" "struct bufferevent *bufev" "void *arg" . +The error callback has the following form: +.Ft void +.Fn "(*cb)" "struct bufferevent *bufev" "short what" "void *arg" . +The argument is specified by the fourth parameter +.Fa "cbarg" . +A +.Fa bufferevent struct +pointer is returned on success, NULL on error. +Both the read and the write callback may be NULL. +The error callback has to be always provided. +.Pp +Once initialized, the bufferevent structure can be used repeatedly with +bufferevent_enable() and bufferevent_disable(). +The flags parameter can be a combination of +.Va EV_READ +and +.Va EV_WRITE . +When read enabled the bufferevent will try to read from the file +descriptor and call the read callback. +The write callback is executed +whenever the output buffer is drained below the write low watermark, +which is +.Va 0 +by default. +.Pp +The +.Fn bufferevent_write +function can be used to write data to the file descriptor. +The data is appended to the output buffer and written to the descriptor +automatically as it becomes available for writing. +.Fn bufferevent_write +returns 0 on success or \-1 on failure. +The +.Fn bufferevent_read +function is used to read data from the input buffer, +returning the amount of data read. +.Pp +If multiple bases are in use, bufferevent_base_set() must be called before +enabling the bufferevent for the first time. +.Sh NON-BLOCKING HTTP SUPPORT +.Nm libevent +provides a very thin HTTP layer that can be used both to host an HTTP +server and also to make HTTP requests. +An HTTP server can be created by calling +.Fn evhttp_new . +It can be bound to any port and address with the +.Fn evhttp_bind_socket +function. +When the HTTP server is no longer used, it can be freed via +.Fn evhttp_free . +.Pp +To be notified of HTTP requests, a user needs to register callbacks with the +HTTP server. +This can be done by calling +.Fn evhttp_set_cb . +The second argument is the URI for which a callback is being registered. +The corresponding callback will receive an +.Va struct evhttp_request +object that contains all information about the request. +.Pp +This section does not document all the possible function calls; please +check +.Va event.h +for the public interfaces. +.Sh ADDITIONAL NOTES +It is possible to disable support for +.Va epoll , kqueue , devpoll , poll +or +.Va select +by setting the environment variable +.Va EVENT_NOEPOLL , EVENT_NOKQUEUE , EVENT_NODEVPOLL , EVENT_NOPOLL +or +.Va EVENT_NOSELECT , +respectively. +By setting the environment variable +.Va EVENT_SHOW_METHOD , +.Nm libevent +displays the kernel notification method that it uses. +.Sh RETURN VALUES +Upon successful completion +.Fn event_add +and +.Fn event_del +return 0. +Otherwise, \-1 is returned and the global variable errno is +set to indicate the error. +.Sh SEE ALSO +.Xr kqueue 2 , +.Xr poll 2 , +.Xr select 2 , +.Xr evdns 3 , +.Xr timeout 9 +.Sh HISTORY +The +.Nm event +API manpage is based on the +.Xr timeout 9 +manpage by Artur Grabowski. +The port of +.Nm libevent +to Windows is due to Michael A. Davis. +Support for real-time signals is due to Taral. +.Sh AUTHORS +The +.Nm event +library was written by Niels Provos. +.Sh BUGS +This documentation is neither complete nor authoritative. +If you are in doubt about the usage of this API then +check the source code to find out how it works, write +up the missing piece of documentation and send it to +me for inclusion in this man page. diff --git a/probe-busybox/libevent-2.1.11-stable/event.c b/probe-busybox/libevent-2.1.11-stable/event.c new file mode 100644 index 00000000..b2ad3410 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/event.c @@ -0,0 +1,4023 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef _WIN32 +#include +#define WIN32_LEAN_AND_MEAN +#include +#undef WIN32_LEAN_AND_MEAN +#endif +#include +#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H) +#include +#endif +#include +#ifdef EVENT__HAVE_SYS_SOCKET_H +#include +#endif +#include +#include +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif +#include +#include +#include +#include +#include +#include +#ifdef EVENT__HAVE_FCNTL_H +#include +#endif + +#include "event2/event.h" +#include "event2/event_struct.h" +#include "event2/event_compat.h" +#include "event-internal.h" +#include "defer-internal.h" +#include "evthread-internal.h" +#include "event2/thread.h" +#include "event2/util.h" +#include "log-internal.h" +#include "evmap-internal.h" +#include "iocp-internal.h" +#include "changelist-internal.h" +#define HT_NO_CACHE_HASH_VALUES +#include "ht-internal.h" +#include "util-internal.h" + + +#ifdef EVENT__HAVE_WORKING_KQUEUE +#include "kqueue-internal.h" +#endif + +#ifdef EVENT__HAVE_EVENT_PORTS +extern const struct eventop evportops; +#endif +#ifdef EVENT__HAVE_SELECT +extern const struct eventop selectops; +#endif +#ifdef EVENT__HAVE_POLL +extern const struct eventop pollops; +#endif +#ifdef EVENT__HAVE_EPOLL +extern const struct eventop epollops; +#endif +#ifdef EVENT__HAVE_WORKING_KQUEUE +extern const struct eventop kqops; +#endif +#ifdef EVENT__HAVE_DEVPOLL +extern const struct eventop devpollops; +#endif +#ifdef _WIN32 +extern const struct eventop win32ops; +#endif + +/* Array of backends in order of preference. */ +static const struct eventop *eventops[] = { +#ifdef EVENT__HAVE_EVENT_PORTS + &evportops, +#endif +#ifdef EVENT__HAVE_WORKING_KQUEUE + &kqops, +#endif +#ifdef EVENT__HAVE_EPOLL + &epollops, +#endif +#ifdef EVENT__HAVE_DEVPOLL + &devpollops, +#endif +#ifdef EVENT__HAVE_POLL + &pollops, +#endif +#ifdef EVENT__HAVE_SELECT + &selectops, +#endif +#ifdef _WIN32 + &win32ops, +#endif + NULL +}; + +/* Global state; deprecated */ +EVENT2_EXPORT_SYMBOL +struct event_base *event_global_current_base_ = NULL; +#define current_base event_global_current_base_ + +/* Global state */ + +static void *event_self_cbarg_ptr_ = NULL; + +/* Prototypes */ +static void event_queue_insert_active(struct event_base *, struct event_callback *); +static void event_queue_insert_active_later(struct event_base *, struct event_callback *); +static void event_queue_insert_timeout(struct event_base *, struct event *); +static void event_queue_insert_inserted(struct event_base *, struct event *); +static void event_queue_remove_active(struct event_base *, struct event_callback *); +static void event_queue_remove_active_later(struct event_base *, struct event_callback *); +static void event_queue_remove_timeout(struct event_base *, struct event *); +static void event_queue_remove_inserted(struct event_base *, struct event *); +static void event_queue_make_later_events_active(struct event_base *base); + +static int evthread_make_base_notifiable_nolock_(struct event_base *base); +static int event_del_(struct event *ev, int blocking); + +#ifdef USE_REINSERT_TIMEOUT +/* This code seems buggy; only turn it on if we find out what the trouble is. */ +static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx); +#endif + +static int event_haveevents(struct event_base *); + +static int event_process_active(struct event_base *); + +static int timeout_next(struct event_base *, struct timeval **); +static void timeout_process(struct event_base *); + +static inline void event_signal_closure(struct event_base *, struct event *ev); +static inline void event_persist_closure(struct event_base *, struct event *ev); + +static int evthread_notify_base(struct event_base *base); + +static void insert_common_timeout_inorder(struct common_timeout_list *ctl, + struct event *ev); + +#ifndef EVENT__DISABLE_DEBUG_MODE +/* These functions implement a hashtable of which 'struct event *' structures + * have been setup or added. We don't want to trust the content of the struct + * event itself, since we're trying to work through cases where an event gets + * clobbered or freed. Instead, we keep a hashtable indexed by the pointer. + */ + +struct event_debug_entry { + HT_ENTRY(event_debug_entry) node; + const struct event *ptr; + unsigned added : 1; +}; + +static inline unsigned +hash_debug_entry(const struct event_debug_entry *e) +{ + /* We need to do this silliness to convince compilers that we + * honestly mean to cast e->ptr to an integer, and discard any + * part of it that doesn't fit in an unsigned. + */ + unsigned u = (unsigned) ((ev_uintptr_t) e->ptr); + /* Our hashtable implementation is pretty sensitive to low bits, + * and every struct event is over 64 bytes in size, so we can + * just say >>6. */ + return (u >> 6); +} + +static inline int +eq_debug_entry(const struct event_debug_entry *a, + const struct event_debug_entry *b) +{ + return a->ptr == b->ptr; +} + +int event_debug_mode_on_ = 0; + + +#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE) +/** + * @brief debug mode variable which is set for any function/structure that needs + * to be shared across threads (if thread support is enabled). + * + * When and if evthreads are initialized, this variable will be evaluated, + * and if set to something other than zero, this means the evthread setup + * functions were called out of order. + * + * See: "Locks and threading" in the documentation. + */ +int event_debug_created_threadable_ctx_ = 0; +#endif + +/* Set if it's too late to enable event_debug_mode. */ +static int event_debug_mode_too_late = 0; +#ifndef EVENT__DISABLE_THREAD_SUPPORT +static void *event_debug_map_lock_ = NULL; +#endif +static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map = + HT_INITIALIZER(); + +HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry, + eq_debug_entry) +HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry, + eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free) + +/* record that ev is now setup (that is, ready for an add) */ +static void event_debug_note_setup_(const struct event *ev) +{ + struct event_debug_entry *dent, find; + + if (!event_debug_mode_on_) + goto out; + + find.ptr = ev; + EVLOCK_LOCK(event_debug_map_lock_, 0); + dent = HT_FIND(event_debug_map, &global_debug_map, &find); + if (dent) { + dent->added = 0; + } else { + dent = mm_malloc(sizeof(*dent)); + if (!dent) + event_err(1, + "Out of memory in debugging code"); + dent->ptr = ev; + dent->added = 0; + HT_INSERT(event_debug_map, &global_debug_map, dent); + } + EVLOCK_UNLOCK(event_debug_map_lock_, 0); + +out: + event_debug_mode_too_late = 1; +} +/* record that ev is no longer setup */ +static void event_debug_note_teardown_(const struct event *ev) +{ + struct event_debug_entry *dent, find; + + if (!event_debug_mode_on_) + goto out; + + find.ptr = ev; + EVLOCK_LOCK(event_debug_map_lock_, 0); + dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); + if (dent) + mm_free(dent); + EVLOCK_UNLOCK(event_debug_map_lock_, 0); + +out: + event_debug_mode_too_late = 1; +} +/* Macro: record that ev is now added */ +static void event_debug_note_add_(const struct event *ev) +{ + struct event_debug_entry *dent,find; + + if (!event_debug_mode_on_) + goto out; + + find.ptr = ev; + EVLOCK_LOCK(event_debug_map_lock_, 0); + dent = HT_FIND(event_debug_map, &global_debug_map, &find); + if (dent) { + dent->added = 1; + } else { + event_errx(EVENT_ERR_ABORT_, + "%s: noting an add on a non-setup event %p" + " (events: 0x%x, fd: "EV_SOCK_FMT + ", flags: 0x%x)", + __func__, ev, ev->ev_events, + EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); + } + EVLOCK_UNLOCK(event_debug_map_lock_, 0); + +out: + event_debug_mode_too_late = 1; +} +/* record that ev is no longer added */ +static void event_debug_note_del_(const struct event *ev) +{ + struct event_debug_entry *dent, find; + + if (!event_debug_mode_on_) + goto out; + + find.ptr = ev; + EVLOCK_LOCK(event_debug_map_lock_, 0); + dent = HT_FIND(event_debug_map, &global_debug_map, &find); + if (dent) { + dent->added = 0; + } else { + event_errx(EVENT_ERR_ABORT_, + "%s: noting a del on a non-setup event %p" + " (events: 0x%x, fd: "EV_SOCK_FMT + ", flags: 0x%x)", + __func__, ev, ev->ev_events, + EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); + } + EVLOCK_UNLOCK(event_debug_map_lock_, 0); + +out: + event_debug_mode_too_late = 1; +} +/* assert that ev is setup (i.e., okay to add or inspect) */ +static void event_debug_assert_is_setup_(const struct event *ev) +{ + struct event_debug_entry *dent, find; + + if (!event_debug_mode_on_) + return; + + find.ptr = ev; + EVLOCK_LOCK(event_debug_map_lock_, 0); + dent = HT_FIND(event_debug_map, &global_debug_map, &find); + if (!dent) { + event_errx(EVENT_ERR_ABORT_, + "%s called on a non-initialized event %p" + " (events: 0x%x, fd: "EV_SOCK_FMT + ", flags: 0x%x)", + __func__, ev, ev->ev_events, + EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); + } + EVLOCK_UNLOCK(event_debug_map_lock_, 0); +} +/* assert that ev is not added (i.e., okay to tear down or set up again) */ +static void event_debug_assert_not_added_(const struct event *ev) +{ + struct event_debug_entry *dent, find; + + if (!event_debug_mode_on_) + return; + + find.ptr = ev; + EVLOCK_LOCK(event_debug_map_lock_, 0); + dent = HT_FIND(event_debug_map, &global_debug_map, &find); + if (dent && dent->added) { + event_errx(EVENT_ERR_ABORT_, + "%s called on an already added event %p" + " (events: 0x%x, fd: "EV_SOCK_FMT", " + "flags: 0x%x)", + __func__, ev, ev->ev_events, + EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); + } + EVLOCK_UNLOCK(event_debug_map_lock_, 0); +} +static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) +{ + if (!event_debug_mode_on_) + return; + if (fd < 0) + return; + +#ifndef _WIN32 + { + int flags; + if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) { + EVUTIL_ASSERT(flags & O_NONBLOCK); + } + } +#endif +} +#else +static void event_debug_note_setup_(const struct event *ev) { (void)ev; } +static void event_debug_note_teardown_(const struct event *ev) { (void)ev; } +static void event_debug_note_add_(const struct event *ev) { (void)ev; } +static void event_debug_note_del_(const struct event *ev) { (void)ev; } +static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; } +static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; } +static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; } +#endif + +#define EVENT_BASE_ASSERT_LOCKED(base) \ + EVLOCK_ASSERT_LOCKED((base)->th_base_lock) + +/* How often (in seconds) do we check for changes in wall clock time relative + * to monotonic time? Set this to -1 for 'never.' */ +#define CLOCK_SYNC_INTERVAL 5 + +/** Set 'tp' to the current time according to 'base'. We must hold the lock + * on 'base'. If there is a cached time, return it. Otherwise, use + * clock_gettime or gettimeofday as appropriate to find out the right time. + * Return 0 on success, -1 on failure. + */ +static int +gettime(struct event_base *base, struct timeval *tp) +{ + EVENT_BASE_ASSERT_LOCKED(base); + + if (base->tv_cache.tv_sec) { + *tp = base->tv_cache; + return (0); + } + + if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) { + return -1; + } + + if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL + < tp->tv_sec) { + struct timeval tv; + evutil_gettimeofday(&tv,NULL); + evutil_timersub(&tv, tp, &base->tv_clock_diff); + base->last_updated_clock_diff = tp->tv_sec; + } + + return 0; +} + +int +event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv) +{ + int r; + if (!base) { + base = current_base; + if (!current_base) + return evutil_gettimeofday(tv, NULL); + } + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + if (base->tv_cache.tv_sec == 0) { + r = evutil_gettimeofday(tv, NULL); + } else { + evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv); + r = 0; + } + EVBASE_RELEASE_LOCK(base, th_base_lock); + return r; +} + +/** Make 'base' have no current cached time. */ +static inline void +clear_time_cache(struct event_base *base) +{ + base->tv_cache.tv_sec = 0; +} + +/** Replace the cached time in 'base' with the current time. */ +static inline void +update_time_cache(struct event_base *base) +{ + base->tv_cache.tv_sec = 0; + if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME)) + gettime(base, &base->tv_cache); +} + +int +event_base_update_cache_time(struct event_base *base) +{ + + if (!base) { + base = current_base; + if (!current_base) + return -1; + } + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + if (base->running_loop) + update_time_cache(base); + EVBASE_RELEASE_LOCK(base, th_base_lock); + return 0; +} + +static inline struct event * +event_callback_to_event(struct event_callback *evcb) +{ + EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT)); + return EVUTIL_UPCAST(evcb, struct event, ev_evcallback); +} + +static inline struct event_callback * +event_to_event_callback(struct event *ev) +{ + return &ev->ev_evcallback; +} + +struct event_base * +event_init(void) +{ + struct event_base *base = event_base_new_with_config(NULL); + + if (base == NULL) { + event_errx(1, "%s: Unable to construct event_base", __func__); + return NULL; + } + + current_base = base; + + return (base); +} + +struct event_base * +event_base_new(void) +{ + struct event_base *base = NULL; + struct event_config *cfg = event_config_new(); + if (cfg) { + base = event_base_new_with_config(cfg); + event_config_free(cfg); + } + return base; +} + +/** Return true iff 'method' is the name of a method that 'cfg' tells us to + * avoid. */ +static int +event_config_is_avoided_method(const struct event_config *cfg, + const char *method) +{ + struct event_config_entry *entry; + + TAILQ_FOREACH(entry, &cfg->entries, next) { + if (entry->avoid_method != NULL && + strcmp(entry->avoid_method, method) == 0) + return (1); + } + + return (0); +} + +/** Return true iff 'method' is disabled according to the environment. */ +static int +event_is_method_disabled(const char *name) +{ + char environment[64]; + int i; + + evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name); + for (i = 8; environment[i] != '\0'; ++i) + environment[i] = EVUTIL_TOUPPER_(environment[i]); + /* Note that evutil_getenv_() ignores the environment entirely if + * we're setuid */ + return (evutil_getenv_(environment) != NULL); +} + +int +event_base_get_features(const struct event_base *base) +{ + return base->evsel->features; +} + +void +event_enable_debug_mode(void) +{ +#ifndef EVENT__DISABLE_DEBUG_MODE + if (event_debug_mode_on_) + event_errx(1, "%s was called twice!", __func__); + if (event_debug_mode_too_late) + event_errx(1, "%s must be called *before* creating any events " + "or event_bases",__func__); + + event_debug_mode_on_ = 1; + + HT_INIT(event_debug_map, &global_debug_map); +#endif +} + +void +event_disable_debug_mode(void) +{ +#ifndef EVENT__DISABLE_DEBUG_MODE + struct event_debug_entry **ent, *victim; + + EVLOCK_LOCK(event_debug_map_lock_, 0); + for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) { + victim = *ent; + ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent); + mm_free(victim); + } + HT_CLEAR(event_debug_map, &global_debug_map); + EVLOCK_UNLOCK(event_debug_map_lock_ , 0); + + event_debug_mode_on_ = 0; +#endif +} + +struct event_base * +event_base_new_with_config(const struct event_config *cfg) +{ + int i; + struct event_base *base; + int should_check_environment; + +#ifndef EVENT__DISABLE_DEBUG_MODE + event_debug_mode_too_late = 1; +#endif + + if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) { + event_warn("%s: calloc", __func__); + return NULL; + } + + if (cfg) + base->flags = cfg->flags; + + should_check_environment = + !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV)); + + { + struct timeval tmp; + int precise_time = + cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER); + int flags; + if (should_check_environment && !precise_time) { + precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL; + if (precise_time) { + base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER; + } + } + flags = precise_time ? EV_MONOT_PRECISE : 0; + evutil_configure_monotonic_time_(&base->monotonic_timer, flags); + + gettime(base, &tmp); + } + + min_heap_ctor_(&base->timeheap); + + base->sig.ev_signal_pair[0] = -1; + base->sig.ev_signal_pair[1] = -1; + base->th_notify_fd[0] = -1; + base->th_notify_fd[1] = -1; + + TAILQ_INIT(&base->active_later_queue); + + evmap_io_initmap_(&base->io); + evmap_signal_initmap_(&base->sigmap); + event_changelist_init_(&base->changelist); + + base->evbase = NULL; + + if (cfg) { + memcpy(&base->max_dispatch_time, + &cfg->max_dispatch_interval, sizeof(struct timeval)); + base->limit_callbacks_after_prio = + cfg->limit_callbacks_after_prio; + } else { + base->max_dispatch_time.tv_sec = -1; + base->limit_callbacks_after_prio = 1; + } + if (cfg && cfg->max_dispatch_callbacks >= 0) { + base->max_dispatch_callbacks = cfg->max_dispatch_callbacks; + } else { + base->max_dispatch_callbacks = INT_MAX; + } + if (base->max_dispatch_callbacks == INT_MAX && + base->max_dispatch_time.tv_sec == -1) + base->limit_callbacks_after_prio = INT_MAX; + + for (i = 0; eventops[i] && !base->evbase; i++) { + if (cfg != NULL) { + /* determine if this backend should be avoided */ + if (event_config_is_avoided_method(cfg, + eventops[i]->name)) + continue; + if ((eventops[i]->features & cfg->require_features) + != cfg->require_features) + continue; + } + + /* also obey the environment variables */ + if (should_check_environment && + event_is_method_disabled(eventops[i]->name)) + continue; + + base->evsel = eventops[i]; + + base->evbase = base->evsel->init(base); + } + + if (base->evbase == NULL) { + event_warnx("%s: no event mechanism available", + __func__); + base->evsel = NULL; + event_base_free(base); + return NULL; + } + + if (evutil_getenv_("EVENT_SHOW_METHOD")) + event_msgx("libevent using: %s", base->evsel->name); + + /* allocate a single active event queue */ + if (event_base_priority_init(base, 1) < 0) { + event_base_free(base); + return NULL; + } + + /* prepare for threading */ + +#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE) + event_debug_created_threadable_ctx_ = 1; +#endif + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (EVTHREAD_LOCKING_ENABLED() && + (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) { + int r; + EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0); + EVTHREAD_ALLOC_COND(base->current_event_cond); + r = evthread_make_base_notifiable(base); + if (r<0) { + event_warnx("%s: Unable to make base notifiable.", __func__); + event_base_free(base); + return NULL; + } + } +#endif + +#ifdef _WIN32 + if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP)) + event_base_start_iocp_(base, cfg->n_cpus_hint); +#endif + + return (base); +} + +int +event_base_start_iocp_(struct event_base *base, int n_cpus) +{ +#ifdef _WIN32 + if (base->iocp) + return 0; + base->iocp = event_iocp_port_launch_(n_cpus); + if (!base->iocp) { + event_warnx("%s: Couldn't launch IOCP", __func__); + return -1; + } + return 0; +#else + return -1; +#endif +} + +void +event_base_stop_iocp_(struct event_base *base) +{ +#ifdef _WIN32 + int rv; + + if (!base->iocp) + return; + rv = event_iocp_shutdown_(base->iocp, -1); + EVUTIL_ASSERT(rv >= 0); + base->iocp = NULL; +#endif +} + +static int +event_base_cancel_single_callback_(struct event_base *base, + struct event_callback *evcb, + int run_finalizers) +{ + int result = 0; + + if (evcb->evcb_flags & EVLIST_INIT) { + struct event *ev = event_callback_to_event(evcb); + if (!(ev->ev_flags & EVLIST_INTERNAL)) { + event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING); + result = 1; + } + } else { + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + event_callback_cancel_nolock_(base, evcb, 1); + EVBASE_RELEASE_LOCK(base, th_base_lock); + result = 1; + } + + if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) { + switch (evcb->evcb_closure) { + case EV_CLOSURE_EVENT_FINALIZE: + case EV_CLOSURE_EVENT_FINALIZE_FREE: { + struct event *ev = event_callback_to_event(evcb); + ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg); + if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) + mm_free(ev); + break; + } + case EV_CLOSURE_CB_FINALIZE: + evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg); + break; + default: + break; + } + } + return result; +} + +static int event_base_free_queues_(struct event_base *base, int run_finalizers) +{ + int deleted = 0, i; + + for (i = 0; i < base->nactivequeues; ++i) { + struct event_callback *evcb, *next; + for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) { + next = TAILQ_NEXT(evcb, evcb_active_next); + deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers); + evcb = next; + } + } + + { + struct event_callback *evcb; + while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { + deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers); + } + } + + return deleted; +} + +static void +event_base_free_(struct event_base *base, int run_finalizers) +{ + int i, n_deleted=0; + struct event *ev; + /* XXXX grab the lock? If there is contention when one thread frees + * the base, then the contending thread will be very sad soon. */ + + /* event_base_free(NULL) is how to free the current_base if we + * made it with event_init and forgot to hold a reference to it. */ + if (base == NULL && current_base) + base = current_base; + /* Don't actually free NULL. */ + if (base == NULL) { + event_warnx("%s: no base to free", __func__); + return; + } + /* XXX(niels) - check for internal events first */ + +#ifdef _WIN32 + event_base_stop_iocp_(base); +#endif + + /* threading fds if we have them */ + if (base->th_notify_fd[0] != -1) { + event_del(&base->th_notify); + EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); + if (base->th_notify_fd[1] != -1) + EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); + base->th_notify_fd[0] = -1; + base->th_notify_fd[1] = -1; + event_debug_unassign(&base->th_notify); + } + + /* Delete all non-internal events. */ + evmap_delete_all_(base); + + while ((ev = min_heap_top_(&base->timeheap)) != NULL) { + event_del(ev); + ++n_deleted; + } + for (i = 0; i < base->n_common_timeouts; ++i) { + struct common_timeout_list *ctl = + base->common_timeout_queues[i]; + event_del(&ctl->timeout_event); /* Internal; doesn't count */ + event_debug_unassign(&ctl->timeout_event); + for (ev = TAILQ_FIRST(&ctl->events); ev; ) { + struct event *next = TAILQ_NEXT(ev, + ev_timeout_pos.ev_next_with_common_timeout); + if (!(ev->ev_flags & EVLIST_INTERNAL)) { + event_del(ev); + ++n_deleted; + } + ev = next; + } + mm_free(ctl); + } + if (base->common_timeout_queues) + mm_free(base->common_timeout_queues); + + for (;;) { + /* For finalizers we can register yet another finalizer out from + * finalizer, and iff finalizer will be in active_later_queue we can + * add finalizer to activequeues, and we will have events in + * activequeues after this function returns, which is not what we want + * (we even have an assertion for this). + * + * A simple case is bufferevent with underlying (i.e. filters). + */ + int i = event_base_free_queues_(base, run_finalizers); + event_debug(("%s: %d events freed", __func__, i)); + if (!i) { + break; + } + n_deleted += i; + } + + if (n_deleted) + event_debug(("%s: %d events were still set in base", + __func__, n_deleted)); + + while (LIST_FIRST(&base->once_events)) { + struct event_once *eonce = LIST_FIRST(&base->once_events); + LIST_REMOVE(eonce, next_once); + mm_free(eonce); + } + + if (base->evsel != NULL && base->evsel->dealloc != NULL) + base->evsel->dealloc(base); + + for (i = 0; i < base->nactivequeues; ++i) + EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i])); + + EVUTIL_ASSERT(min_heap_empty_(&base->timeheap)); + min_heap_dtor_(&base->timeheap); + + mm_free(base->activequeues); + + evmap_io_clear_(&base->io); + evmap_signal_clear_(&base->sigmap); + event_changelist_freemem_(&base->changelist); + + EVTHREAD_FREE_LOCK(base->th_base_lock, 0); + EVTHREAD_FREE_COND(base->current_event_cond); + + /* If we're freeing current_base, there won't be a current_base. */ + if (base == current_base) + current_base = NULL; + mm_free(base); +} + +void +event_base_free_nofinalize(struct event_base *base) +{ + event_base_free_(base, 0); +} + +void +event_base_free(struct event_base *base) +{ + event_base_free_(base, 1); +} + +/* Fake eventop; used to disable the backend temporarily inside event_reinit + * so that we can call event_del() on an event without telling the backend. + */ +static int +nil_backend_del(struct event_base *b, evutil_socket_t fd, short old, + short events, void *fdinfo) +{ + return 0; +} +const struct eventop nil_eventop = { + "nil", + NULL, /* init: unused. */ + NULL, /* add: unused. */ + nil_backend_del, /* del: used, so needs to be killed. */ + NULL, /* dispatch: unused. */ + NULL, /* dealloc: unused. */ + 0, 0, 0 +}; + +/* reinitialize the event base after a fork */ +int +event_reinit(struct event_base *base) +{ + const struct eventop *evsel; + int res = 0; + int was_notifiable = 0; + int had_signal_added = 0; + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + if (base->running_loop) { + event_warnx("%s: forked from the event_loop.", __func__); + res = -1; + goto done; + } + + evsel = base->evsel; + + /* check if this event mechanism requires reinit on the backend */ + if (evsel->need_reinit) { + /* We're going to call event_del() on our notify events (the + * ones that tell about signals and wakeup events). But we + * don't actually want to tell the backend to change its + * state, since it might still share some resource (a kqueue, + * an epoll fd) with the parent process, and we don't want to + * delete the fds from _that_ backend, we temporarily stub out + * the evsel with a replacement. + */ + base->evsel = &nil_eventop; + } + + /* We need to re-create a new signal-notification fd and a new + * thread-notification fd. Otherwise, we'll still share those with + * the parent process, which would make any notification sent to them + * get received by one or both of the event loops, more or less at + * random. + */ + if (base->sig.ev_signal_added) { + event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK); + event_debug_unassign(&base->sig.ev_signal); + memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal)); + had_signal_added = 1; + base->sig.ev_signal_added = 0; + } + if (base->sig.ev_signal_pair[0] != -1) + EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]); + if (base->sig.ev_signal_pair[1] != -1) + EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]); + if (base->th_notify_fn != NULL) { + was_notifiable = 1; + base->th_notify_fn = NULL; + } + if (base->th_notify_fd[0] != -1) { + event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK); + EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); + if (base->th_notify_fd[1] != -1) + EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); + base->th_notify_fd[0] = -1; + base->th_notify_fd[1] = -1; + event_debug_unassign(&base->th_notify); + } + + /* Replace the original evsel. */ + base->evsel = evsel; + + if (evsel->need_reinit) { + /* Reconstruct the backend through brute-force, so that we do + * not share any structures with the parent process. For some + * backends, this is necessary: epoll and kqueue, for + * instance, have events associated with a kernel + * structure. If didn't reinitialize, we'd share that + * structure with the parent process, and any changes made by + * the parent would affect our backend's behavior (and vice + * versa). + */ + if (base->evsel->dealloc != NULL) + base->evsel->dealloc(base); + base->evbase = evsel->init(base); + if (base->evbase == NULL) { + event_errx(1, + "%s: could not reinitialize event mechanism", + __func__); + res = -1; + goto done; + } + + /* Empty out the changelist (if any): we are starting from a + * blank slate. */ + event_changelist_freemem_(&base->changelist); + + /* Tell the event maps to re-inform the backend about all + * pending events. This will make the signal notification + * event get re-created if necessary. */ + if (evmap_reinit_(base) < 0) + res = -1; + } else { + res = evsig_init_(base); + if (res == 0 && had_signal_added) { + res = event_add_nolock_(&base->sig.ev_signal, NULL, 0); + if (res == 0) + base->sig.ev_signal_added = 1; + } + } + + /* If we were notifiable before, and nothing just exploded, become + * notifiable again. */ + if (was_notifiable && res == 0) + res = evthread_make_base_notifiable_nolock_(base); + +done: + EVBASE_RELEASE_LOCK(base, th_base_lock); + return (res); +} + +/* Get the monotonic time for this event_base' timer */ +int +event_gettime_monotonic(struct event_base *base, struct timeval *tv) +{ + int rv = -1; + + if (base && tv) { + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv); + EVBASE_RELEASE_LOCK(base, th_base_lock); + } + + return rv; +} + +const char ** +event_get_supported_methods(void) +{ + static const char **methods = NULL; + const struct eventop **method; + const char **tmp; + int i = 0, k; + + /* count all methods */ + for (method = &eventops[0]; *method != NULL; ++method) { + ++i; + } + + /* allocate one more than we need for the NULL pointer */ + tmp = mm_calloc((i + 1), sizeof(char *)); + if (tmp == NULL) + return (NULL); + + /* populate the array with the supported methods */ + for (k = 0, i = 0; eventops[k] != NULL; ++k) { + tmp[i++] = eventops[k]->name; + } + tmp[i] = NULL; + + if (methods != NULL) + mm_free((char**)methods); + + methods = tmp; + + return (methods); +} + +struct event_config * +event_config_new(void) +{ + struct event_config *cfg = mm_calloc(1, sizeof(*cfg)); + + if (cfg == NULL) + return (NULL); + + TAILQ_INIT(&cfg->entries); + cfg->max_dispatch_interval.tv_sec = -1; + cfg->max_dispatch_callbacks = INT_MAX; + cfg->limit_callbacks_after_prio = 1; + + return (cfg); +} + +static void +event_config_entry_free(struct event_config_entry *entry) +{ + if (entry->avoid_method != NULL) + mm_free((char *)entry->avoid_method); + mm_free(entry); +} + +void +event_config_free(struct event_config *cfg) +{ + struct event_config_entry *entry; + + while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) { + TAILQ_REMOVE(&cfg->entries, entry, next); + event_config_entry_free(entry); + } + mm_free(cfg); +} + +int +event_config_set_flag(struct event_config *cfg, int flag) +{ + if (!cfg) + return -1; + cfg->flags |= flag; + return 0; +} + +int +event_config_avoid_method(struct event_config *cfg, const char *method) +{ + struct event_config_entry *entry = mm_malloc(sizeof(*entry)); + if (entry == NULL) + return (-1); + + if ((entry->avoid_method = mm_strdup(method)) == NULL) { + mm_free(entry); + return (-1); + } + + TAILQ_INSERT_TAIL(&cfg->entries, entry, next); + + return (0); +} + +int +event_config_require_features(struct event_config *cfg, + int features) +{ + if (!cfg) + return (-1); + cfg->require_features = features; + return (0); +} + +int +event_config_set_num_cpus_hint(struct event_config *cfg, int cpus) +{ + if (!cfg) + return (-1); + cfg->n_cpus_hint = cpus; + return (0); +} + +int +event_config_set_max_dispatch_interval(struct event_config *cfg, + const struct timeval *max_interval, int max_callbacks, int min_priority) +{ + if (max_interval) + memcpy(&cfg->max_dispatch_interval, max_interval, + sizeof(struct timeval)); + else + cfg->max_dispatch_interval.tv_sec = -1; + cfg->max_dispatch_callbacks = + max_callbacks >= 0 ? max_callbacks : INT_MAX; + if (min_priority < 0) + min_priority = 0; + cfg->limit_callbacks_after_prio = min_priority; + return (0); +} + +int +event_priority_init(int npriorities) +{ + return event_base_priority_init(current_base, npriorities); +} + +int +event_base_priority_init(struct event_base *base, int npriorities) +{ + int i, r; + r = -1; + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + if (N_ACTIVE_CALLBACKS(base) || npriorities < 1 + || npriorities >= EVENT_MAX_PRIORITIES) + goto err; + + if (npriorities == base->nactivequeues) + goto ok; + + if (base->nactivequeues) { + mm_free(base->activequeues); + base->nactivequeues = 0; + } + + /* Allocate our priority queues */ + base->activequeues = (struct evcallback_list *) + mm_calloc(npriorities, sizeof(struct evcallback_list)); + if (base->activequeues == NULL) { + event_warn("%s: calloc", __func__); + goto err; + } + base->nactivequeues = npriorities; + + for (i = 0; i < base->nactivequeues; ++i) { + TAILQ_INIT(&base->activequeues[i]); + } + +ok: + r = 0; +err: + EVBASE_RELEASE_LOCK(base, th_base_lock); + return (r); +} + +int +event_base_get_npriorities(struct event_base *base) +{ + + int n; + if (base == NULL) + base = current_base; + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + n = base->nactivequeues; + EVBASE_RELEASE_LOCK(base, th_base_lock); + return (n); +} + +int +event_base_get_num_events(struct event_base *base, unsigned int type) +{ + int r = 0; + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + if (type & EVENT_BASE_COUNT_ACTIVE) + r += base->event_count_active; + + if (type & EVENT_BASE_COUNT_VIRTUAL) + r += base->virtual_event_count; + + if (type & EVENT_BASE_COUNT_ADDED) + r += base->event_count; + + EVBASE_RELEASE_LOCK(base, th_base_lock); + + return r; +} + +int +event_base_get_max_events(struct event_base *base, unsigned int type, int clear) +{ + int r = 0; + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + if (type & EVENT_BASE_COUNT_ACTIVE) { + r += base->event_count_active_max; + if (clear) + base->event_count_active_max = 0; + } + + if (type & EVENT_BASE_COUNT_VIRTUAL) { + r += base->virtual_event_count_max; + if (clear) + base->virtual_event_count_max = 0; + } + + if (type & EVENT_BASE_COUNT_ADDED) { + r += base->event_count_max; + if (clear) + base->event_count_max = 0; + } + + EVBASE_RELEASE_LOCK(base, th_base_lock); + + return r; +} + +/* Returns true iff we're currently watching any events. */ +static int +event_haveevents(struct event_base *base) +{ + /* Caller must hold th_base_lock */ + return (base->virtual_event_count > 0 || base->event_count > 0); +} + +/* "closure" function called when processing active signal events */ +static inline void +event_signal_closure(struct event_base *base, struct event *ev) +{ + short ncalls; + int should_break; + + /* Allows deletes to work */ + ncalls = ev->ev_ncalls; + if (ncalls != 0) + ev->ev_pncalls = &ncalls; + EVBASE_RELEASE_LOCK(base, th_base_lock); + while (ncalls) { + ncalls--; + ev->ev_ncalls = ncalls; + if (ncalls == 0) + ev->ev_pncalls = NULL; + (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg); + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + should_break = base->event_break; + EVBASE_RELEASE_LOCK(base, th_base_lock); + + if (should_break) { + if (ncalls != 0) + ev->ev_pncalls = NULL; + return; + } + } +} + +/* Common timeouts are special timeouts that are handled as queues rather than + * in the minheap. This is more efficient than the minheap if we happen to + * know that we're going to get several thousands of timeout events all with + * the same timeout value. + * + * Since all our timeout handling code assumes timevals can be copied, + * assigned, etc, we can't use "magic pointer" to encode these common + * timeouts. Searching through a list to see if every timeout is common could + * also get inefficient. Instead, we take advantage of the fact that tv_usec + * is 32 bits long, but only uses 20 of those bits (since it can never be over + * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits + * of index into the event_base's aray of common timeouts. + */ + +#define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK +#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000 +#define COMMON_TIMEOUT_IDX_SHIFT 20 +#define COMMON_TIMEOUT_MASK 0xf0000000 +#define COMMON_TIMEOUT_MAGIC 0x50000000 + +#define COMMON_TIMEOUT_IDX(tv) \ + (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT) + +/** Return true iff if 'tv' is a common timeout in 'base' */ +static inline int +is_common_timeout(const struct timeval *tv, + const struct event_base *base) +{ + int idx; + if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC) + return 0; + idx = COMMON_TIMEOUT_IDX(tv); + return idx < base->n_common_timeouts; +} + +/* True iff tv1 and tv2 have the same common-timeout index, or if neither + * one is a common timeout. */ +static inline int +is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2) +{ + return (tv1->tv_usec & ~MICROSECONDS_MASK) == + (tv2->tv_usec & ~MICROSECONDS_MASK); +} + +/** Requires that 'tv' is a common timeout. Return the corresponding + * common_timeout_list. */ +static inline struct common_timeout_list * +get_common_timeout_list(struct event_base *base, const struct timeval *tv) +{ + return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)]; +} + +#if 0 +static inline int +common_timeout_ok(const struct timeval *tv, + struct event_base *base) +{ + const struct timeval *expect = + &get_common_timeout_list(base, tv)->duration; + return tv->tv_sec == expect->tv_sec && + tv->tv_usec == expect->tv_usec; +} +#endif + +/* Add the timeout for the first event in given common timeout list to the + * event_base's minheap. */ +static void +common_timeout_schedule(struct common_timeout_list *ctl, + const struct timeval *now, struct event *head) +{ + struct timeval timeout = head->ev_timeout; + timeout.tv_usec &= MICROSECONDS_MASK; + event_add_nolock_(&ctl->timeout_event, &timeout, 1); +} + +/* Callback: invoked when the timeout for a common timeout queue triggers. + * This means that (at least) the first event in that queue should be run, + * and the timeout should be rescheduled if there are more events. */ +static void +common_timeout_callback(evutil_socket_t fd, short what, void *arg) +{ + struct timeval now; + struct common_timeout_list *ctl = arg; + struct event_base *base = ctl->base; + struct event *ev = NULL; + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + gettime(base, &now); + while (1) { + ev = TAILQ_FIRST(&ctl->events); + if (!ev || ev->ev_timeout.tv_sec > now.tv_sec || + (ev->ev_timeout.tv_sec == now.tv_sec && + (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec)) + break; + event_del_nolock_(ev, EVENT_DEL_NOBLOCK); + event_active_nolock_(ev, EV_TIMEOUT, 1); + } + if (ev) + common_timeout_schedule(ctl, &now, ev); + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + +#define MAX_COMMON_TIMEOUTS 256 + +const struct timeval * +event_base_init_common_timeout(struct event_base *base, + const struct timeval *duration) +{ + int i; + struct timeval tv; + const struct timeval *result=NULL; + struct common_timeout_list *new_ctl; + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + if (duration->tv_usec > 1000000) { + memcpy(&tv, duration, sizeof(struct timeval)); + if (is_common_timeout(duration, base)) + tv.tv_usec &= MICROSECONDS_MASK; + tv.tv_sec += tv.tv_usec / 1000000; + tv.tv_usec %= 1000000; + duration = &tv; + } + for (i = 0; i < base->n_common_timeouts; ++i) { + const struct common_timeout_list *ctl = + base->common_timeout_queues[i]; + if (duration->tv_sec == ctl->duration.tv_sec && + duration->tv_usec == + (ctl->duration.tv_usec & MICROSECONDS_MASK)) { + EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base)); + result = &ctl->duration; + goto done; + } + } + if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) { + event_warnx("%s: Too many common timeouts already in use; " + "we only support %d per event_base", __func__, + MAX_COMMON_TIMEOUTS); + goto done; + } + if (base->n_common_timeouts_allocated == base->n_common_timeouts) { + int n = base->n_common_timeouts < 16 ? 16 : + base->n_common_timeouts*2; + struct common_timeout_list **newqueues = + mm_realloc(base->common_timeout_queues, + n*sizeof(struct common_timeout_queue *)); + if (!newqueues) { + event_warn("%s: realloc",__func__); + goto done; + } + base->n_common_timeouts_allocated = n; + base->common_timeout_queues = newqueues; + } + new_ctl = mm_calloc(1, sizeof(struct common_timeout_list)); + if (!new_ctl) { + event_warn("%s: calloc",__func__); + goto done; + } + TAILQ_INIT(&new_ctl->events); + new_ctl->duration.tv_sec = duration->tv_sec; + new_ctl->duration.tv_usec = + duration->tv_usec | COMMON_TIMEOUT_MAGIC | + (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT); + evtimer_assign(&new_ctl->timeout_event, base, + common_timeout_callback, new_ctl); + new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL; + event_priority_set(&new_ctl->timeout_event, 0); + new_ctl->base = base; + base->common_timeout_queues[base->n_common_timeouts++] = new_ctl; + result = &new_ctl->duration; + +done: + if (result) + EVUTIL_ASSERT(is_common_timeout(result, base)); + + EVBASE_RELEASE_LOCK(base, th_base_lock); + return result; +} + +/* Closure function invoked when we're activating a persistent event. */ +static inline void +event_persist_closure(struct event_base *base, struct event *ev) +{ + void (*evcb_callback)(evutil_socket_t, short, void *); + + // Other fields of *ev that must be stored before executing + evutil_socket_t evcb_fd; + short evcb_res; + void *evcb_arg; + + /* reschedule the persistent event if we have a timeout. */ + if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) { + /* If there was a timeout, we want it to run at an interval of + * ev_io_timeout after the last time it was _scheduled_ for, + * not ev_io_timeout after _now_. If it fired for another + * reason, though, the timeout ought to start ticking _now_. */ + struct timeval run_at, relative_to, delay, now; + ev_uint32_t usec_mask = 0; + EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout, + &ev->ev_io_timeout)); + gettime(base, &now); + if (is_common_timeout(&ev->ev_timeout, base)) { + delay = ev->ev_io_timeout; + usec_mask = delay.tv_usec & ~MICROSECONDS_MASK; + delay.tv_usec &= MICROSECONDS_MASK; + if (ev->ev_res & EV_TIMEOUT) { + relative_to = ev->ev_timeout; + relative_to.tv_usec &= MICROSECONDS_MASK; + } else { + relative_to = now; + } + } else { + delay = ev->ev_io_timeout; + if (ev->ev_res & EV_TIMEOUT) { + relative_to = ev->ev_timeout; + } else { + relative_to = now; + } + } + evutil_timeradd(&relative_to, &delay, &run_at); + if (evutil_timercmp(&run_at, &now, <)) { + /* Looks like we missed at least one invocation due to + * a clock jump, not running the event loop for a + * while, really slow callbacks, or + * something. Reschedule relative to now. + */ + evutil_timeradd(&now, &delay, &run_at); + } + run_at.tv_usec |= usec_mask; + event_add_nolock_(ev, &run_at, 1); + } + + // Save our callback before we release the lock + evcb_callback = ev->ev_callback; + evcb_fd = ev->ev_fd; + evcb_res = ev->ev_res; + evcb_arg = ev->ev_arg; + + // Release the lock + EVBASE_RELEASE_LOCK(base, th_base_lock); + + // Execute the callback + (evcb_callback)(evcb_fd, evcb_res, evcb_arg); +} + +/* + Helper for event_process_active to process all the events in a single queue, + releasing the lock as we go. This function requires that the lock be held + when it's invoked. Returns -1 if we get a signal or an event_break that + means we should stop processing any active events now. Otherwise returns + the number of non-internal event_callbacks that we processed. +*/ +static int +event_process_active_single_queue(struct event_base *base, + struct evcallback_list *activeq, + int max_to_process, const struct timeval *endtime) +{ + struct event_callback *evcb; + int count = 0; + + EVUTIL_ASSERT(activeq != NULL); + + for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) { + struct event *ev=NULL; + if (evcb->evcb_flags & EVLIST_INIT) { + ev = event_callback_to_event(evcb); + + if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING) + event_queue_remove_active(base, evcb); + else + event_del_nolock_(ev, EVENT_DEL_NOBLOCK); + event_debug(( + "event_process_active: event: %p, %s%s%scall %p", + ev, + ev->ev_res & EV_READ ? "EV_READ " : " ", + ev->ev_res & EV_WRITE ? "EV_WRITE " : " ", + ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ", + ev->ev_callback)); + } else { + event_queue_remove_active(base, evcb); + event_debug(("event_process_active: event_callback %p, " + "closure %d, call %p", + evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback)); + } + + if (!(evcb->evcb_flags & EVLIST_INTERNAL)) + ++count; + + + base->current_event = evcb; +#ifndef EVENT__DISABLE_THREAD_SUPPORT + base->current_event_waiters = 0; +#endif + + switch (evcb->evcb_closure) { + case EV_CLOSURE_EVENT_SIGNAL: + EVUTIL_ASSERT(ev != NULL); + event_signal_closure(base, ev); + break; + case EV_CLOSURE_EVENT_PERSIST: + EVUTIL_ASSERT(ev != NULL); + event_persist_closure(base, ev); + break; + case EV_CLOSURE_EVENT: { + void (*evcb_callback)(evutil_socket_t, short, void *); + short res; + EVUTIL_ASSERT(ev != NULL); + evcb_callback = *ev->ev_callback; + res = ev->ev_res; + EVBASE_RELEASE_LOCK(base, th_base_lock); + evcb_callback(ev->ev_fd, res, ev->ev_arg); + } + break; + case EV_CLOSURE_CB_SELF: { + void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb; + EVBASE_RELEASE_LOCK(base, th_base_lock); + evcb_selfcb(evcb, evcb->evcb_arg); + } + break; + case EV_CLOSURE_EVENT_FINALIZE: + case EV_CLOSURE_EVENT_FINALIZE_FREE: { + void (*evcb_evfinalize)(struct event *, void *); + int evcb_closure = evcb->evcb_closure; + EVUTIL_ASSERT(ev != NULL); + base->current_event = NULL; + evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize; + EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); + EVBASE_RELEASE_LOCK(base, th_base_lock); + evcb_evfinalize(ev, ev->ev_arg); + event_debug_note_teardown_(ev); + if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) + mm_free(ev); + } + break; + case EV_CLOSURE_CB_FINALIZE: { + void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize; + base->current_event = NULL; + EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); + EVBASE_RELEASE_LOCK(base, th_base_lock); + evcb_cbfinalize(evcb, evcb->evcb_arg); + } + break; + default: + EVUTIL_ASSERT(0); + } + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + base->current_event = NULL; +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (base->current_event_waiters) { + base->current_event_waiters = 0; + EVTHREAD_COND_BROADCAST(base->current_event_cond); + } +#endif + + if (base->event_break) + return -1; + if (count >= max_to_process) + return count; + if (count && endtime) { + struct timeval now; + update_time_cache(base); + gettime(base, &now); + if (evutil_timercmp(&now, endtime, >=)) + return count; + } + if (base->event_continue) + break; + } + return count; +} + +/* + * Active events are stored in priority queues. Lower priorities are always + * process before higher priorities. Low priority events can starve high + * priority ones. + */ + +static int +event_process_active(struct event_base *base) +{ + /* Caller must hold th_base_lock */ + struct evcallback_list *activeq = NULL; + int i, c = 0; + const struct timeval *endtime; + struct timeval tv; + const int maxcb = base->max_dispatch_callbacks; + const int limit_after_prio = base->limit_callbacks_after_prio; + if (base->max_dispatch_time.tv_sec >= 0) { + update_time_cache(base); + gettime(base, &tv); + evutil_timeradd(&base->max_dispatch_time, &tv, &tv); + endtime = &tv; + } else { + endtime = NULL; + } + + for (i = 0; i < base->nactivequeues; ++i) { + if (TAILQ_FIRST(&base->activequeues[i]) != NULL) { + base->event_running_priority = i; + activeq = &base->activequeues[i]; + if (i < limit_after_prio) + c = event_process_active_single_queue(base, activeq, + INT_MAX, NULL); + else + c = event_process_active_single_queue(base, activeq, + maxcb, endtime); + if (c < 0) { + goto done; + } else if (c > 0) + break; /* Processed a real event; do not + * consider lower-priority events */ + /* If we get here, all of the events we processed + * were internal. Continue. */ + } + } + +done: + base->event_running_priority = -1; + + return c; +} + +/* + * Wait continuously for events. We exit only if no events are left. + */ + +int +event_dispatch(void) +{ + return (event_loop(0)); +} + +int +event_base_dispatch(struct event_base *event_base) +{ + return (event_base_loop(event_base, 0)); +} + +const char * +event_base_get_method(const struct event_base *base) +{ + EVUTIL_ASSERT(base); + return (base->evsel->name); +} + +/** Callback: used to implement event_base_loopexit by telling the event_base + * that it's time to exit its loop. */ +static void +event_loopexit_cb(evutil_socket_t fd, short what, void *arg) +{ + struct event_base *base = arg; + base->event_gotterm = 1; +} + +int +event_loopexit(const struct timeval *tv) +{ + return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, + current_base, tv)); +} + +int +event_base_loopexit(struct event_base *event_base, const struct timeval *tv) +{ + return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, + event_base, tv)); +} + +int +event_loopbreak(void) +{ + return (event_base_loopbreak(current_base)); +} + +int +event_base_loopbreak(struct event_base *event_base) +{ + int r = 0; + if (event_base == NULL) + return (-1); + + EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); + event_base->event_break = 1; + + if (EVBASE_NEED_NOTIFY(event_base)) { + r = evthread_notify_base(event_base); + } else { + r = (0); + } + EVBASE_RELEASE_LOCK(event_base, th_base_lock); + return r; +} + +int +event_base_loopcontinue(struct event_base *event_base) +{ + int r = 0; + if (event_base == NULL) + return (-1); + + EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); + event_base->event_continue = 1; + + if (EVBASE_NEED_NOTIFY(event_base)) { + r = evthread_notify_base(event_base); + } else { + r = (0); + } + EVBASE_RELEASE_LOCK(event_base, th_base_lock); + return r; +} + +int +event_base_got_break(struct event_base *event_base) +{ + int res; + EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); + res = event_base->event_break; + EVBASE_RELEASE_LOCK(event_base, th_base_lock); + return res; +} + +int +event_base_got_exit(struct event_base *event_base) +{ + int res; + EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); + res = event_base->event_gotterm; + EVBASE_RELEASE_LOCK(event_base, th_base_lock); + return res; +} + +/* not thread safe */ + +int +event_loop(int flags) +{ + return event_base_loop(current_base, flags); +} + +int +event_base_loop(struct event_base *base, int flags) +{ + const struct eventop *evsel = base->evsel; + struct timeval tv; + struct timeval *tv_p; + int res, done, retval = 0; + + /* Grab the lock. We will release it inside evsel.dispatch, and again + * as we invoke user callbacks. */ + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + if (base->running_loop) { + event_warnx("%s: reentrant invocation. Only one event_base_loop" + " can run on each event_base at once.", __func__); + EVBASE_RELEASE_LOCK(base, th_base_lock); + return -1; + } + + base->running_loop = 1; + + clear_time_cache(base); + + if (base->sig.ev_signal_added && base->sig.ev_n_signals_added) + evsig_set_base_(base); + + done = 0; + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + base->th_owner_id = EVTHREAD_GET_ID(); +#endif + + base->event_gotterm = base->event_break = 0; + + while (!done) { + base->event_continue = 0; + base->n_deferreds_queued = 0; + + /* Terminate the loop if we have been asked to */ + if (base->event_gotterm) { + break; + } + + if (base->event_break) { + break; + } + + tv_p = &tv; + if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) { + timeout_next(base, &tv_p); + } else { + /* + * if we have active events, we just poll new events + * without waiting. + */ + evutil_timerclear(&tv); + } + + /* If we have no events, we just exit */ + if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) && + !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) { + event_debug(("%s: no events registered.", __func__)); + retval = 1; + goto done; + } + + event_queue_make_later_events_active(base); + + clear_time_cache(base); + + res = evsel->dispatch(base, tv_p); + + if (res == -1) { + event_debug(("%s: dispatch returned unsuccessfully.", + __func__)); + retval = -1; + goto done; + } + + update_time_cache(base); + + timeout_process(base); + + if (N_ACTIVE_CALLBACKS(base)) { + int n = event_process_active(base); + if ((flags & EVLOOP_ONCE) + && N_ACTIVE_CALLBACKS(base) == 0 + && n != 0) + done = 1; + } else if (flags & EVLOOP_NONBLOCK) + done = 1; + } + event_debug(("%s: asked to terminate loop.", __func__)); + +done: + clear_time_cache(base); + base->running_loop = 0; + + EVBASE_RELEASE_LOCK(base, th_base_lock); + + return (retval); +} + +/* One-time callback to implement event_base_once: invokes the user callback, + * then deletes the allocated storage */ +static void +event_once_cb(evutil_socket_t fd, short events, void *arg) +{ + struct event_once *eonce = arg; + + (*eonce->cb)(fd, events, eonce->arg); + EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock); + LIST_REMOVE(eonce, next_once); + EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock); + event_debug_unassign(&eonce->ev); + mm_free(eonce); +} + +/* not threadsafe, event scheduled once. */ +int +event_once(evutil_socket_t fd, short events, + void (*callback)(evutil_socket_t, short, void *), + void *arg, const struct timeval *tv) +{ + return event_base_once(current_base, fd, events, callback, arg, tv); +} + +/* Schedules an event once */ +int +event_base_once(struct event_base *base, evutil_socket_t fd, short events, + void (*callback)(evutil_socket_t, short, void *), + void *arg, const struct timeval *tv) +{ + struct event_once *eonce; + int res = 0; + int activate = 0; + + /* We cannot support signals that just fire once, or persistent + * events. */ + if (events & (EV_SIGNAL|EV_PERSIST)) + return (-1); + + if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL) + return (-1); + + eonce->cb = callback; + eonce->arg = arg; + + if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) { + evtimer_assign(&eonce->ev, base, event_once_cb, eonce); + + if (tv == NULL || ! evutil_timerisset(tv)) { + /* If the event is going to become active immediately, + * don't put it on the timeout queue. This is one + * idiom for scheduling a callback, so let's make + * it fast (and order-preserving). */ + activate = 1; + } + } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) { + events &= EV_READ|EV_WRITE|EV_CLOSED; + + event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce); + } else { + /* Bad event combination */ + mm_free(eonce); + return (-1); + } + + if (res == 0) { + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + if (activate) + event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1); + else + res = event_add_nolock_(&eonce->ev, tv, 0); + + if (res != 0) { + mm_free(eonce); + return (res); + } else { + LIST_INSERT_HEAD(&base->once_events, eonce, next_once); + } + EVBASE_RELEASE_LOCK(base, th_base_lock); + } + + return (0); +} + +int +event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg) +{ + if (!base) + base = current_base; + if (arg == &event_self_cbarg_ptr_) + arg = ev; + + if (!(events & EV_SIGNAL)) + event_debug_assert_socket_nonblocking_(fd); + event_debug_assert_not_added_(ev); + + ev->ev_base = base; + + ev->ev_callback = callback; + ev->ev_arg = arg; + ev->ev_fd = fd; + ev->ev_events = events; + ev->ev_res = 0; + ev->ev_flags = EVLIST_INIT; + ev->ev_ncalls = 0; + ev->ev_pncalls = NULL; + + if (events & EV_SIGNAL) { + if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) { + event_warnx("%s: EV_SIGNAL is not compatible with " + "EV_READ, EV_WRITE or EV_CLOSED", __func__); + return -1; + } + ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL; + } else { + if (events & EV_PERSIST) { + evutil_timerclear(&ev->ev_io_timeout); + ev->ev_closure = EV_CLOSURE_EVENT_PERSIST; + } else { + ev->ev_closure = EV_CLOSURE_EVENT; + } + } + + min_heap_elem_init_(ev); + + if (base != NULL) { + /* by default, we put new events into the middle priority */ + ev->ev_pri = base->nactivequeues / 2; + } + + event_debug_note_setup_(ev); + + return 0; +} + +int +event_base_set(struct event_base *base, struct event *ev) +{ + /* Only innocent events may be assigned to a different base */ + if (ev->ev_flags != EVLIST_INIT) + return (-1); + + event_debug_assert_is_setup_(ev); + + ev->ev_base = base; + ev->ev_pri = base->nactivequeues/2; + + return (0); +} + +void +event_set(struct event *ev, evutil_socket_t fd, short events, + void (*callback)(evutil_socket_t, short, void *), void *arg) +{ + int r; + r = event_assign(ev, current_base, fd, events, callback, arg); + EVUTIL_ASSERT(r == 0); +} + +void * +event_self_cbarg(void) +{ + return &event_self_cbarg_ptr_; +} + +struct event * +event_base_get_running_event(struct event_base *base) +{ + struct event *ev = NULL; + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + if (EVBASE_IN_THREAD(base)) { + struct event_callback *evcb = base->current_event; + if (evcb->evcb_flags & EVLIST_INIT) + ev = event_callback_to_event(evcb); + } + EVBASE_RELEASE_LOCK(base, th_base_lock); + return ev; +} + +struct event * +event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg) +{ + struct event *ev; + ev = mm_malloc(sizeof(struct event)); + if (ev == NULL) + return (NULL); + if (event_assign(ev, base, fd, events, cb, arg) < 0) { + mm_free(ev); + return (NULL); + } + + return (ev); +} + +void +event_free(struct event *ev) +{ + /* This is disabled, so that events which have been finalized be a + * valid target for event_free(). That's */ + // event_debug_assert_is_setup_(ev); + + /* make sure that this event won't be coming back to haunt us. */ + event_del(ev); + event_debug_note_teardown_(ev); + mm_free(ev); + +} + +void +event_debug_unassign(struct event *ev) +{ + event_debug_assert_not_added_(ev); + event_debug_note_teardown_(ev); + + ev->ev_flags &= ~EVLIST_INIT; +} + +#define EVENT_FINALIZE_FREE_ 0x10000 +static int +event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb) +{ + ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ? + EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE; + + event_del_nolock_(ev, EVENT_DEL_NOBLOCK); + ev->ev_closure = closure; + ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb; + event_active_nolock_(ev, EV_FINALIZE, 1); + ev->ev_flags |= EVLIST_FINALIZING; + return 0; +} + +static int +event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb) +{ + int r; + struct event_base *base = ev->ev_base; + if (EVUTIL_FAILURE_CHECK(!base)) { + event_warnx("%s: event has no event_base set.", __func__); + return -1; + } + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + r = event_finalize_nolock_(base, flags, ev, cb); + EVBASE_RELEASE_LOCK(base, th_base_lock); + return r; +} + +int +event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb) +{ + return event_finalize_impl_(flags, ev, cb); +} + +int +event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb) +{ + return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb); +} + +void +event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)) +{ + struct event *ev = NULL; + if (evcb->evcb_flags & EVLIST_INIT) { + ev = event_callback_to_event(evcb); + event_del_nolock_(ev, EVENT_DEL_NOBLOCK); + } else { + event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/ + } + + evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE; + evcb->evcb_cb_union.evcb_cbfinalize = cb; + event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/ + evcb->evcb_flags |= EVLIST_FINALIZING; +} + +void +event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)) +{ + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + event_callback_finalize_nolock_(base, flags, evcb, cb); + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + +/** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided + * callback will be invoked on *one of them*, after they have *all* been + * finalized. */ +int +event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *)) +{ + int n_pending = 0, i; + + if (base == NULL) + base = current_base; + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + event_debug(("%s: %d events finalizing", __func__, n_cbs)); + + /* At most one can be currently executing; the rest we just + * cancel... But we always make sure that the finalize callback + * runs. */ + for (i = 0; i < n_cbs; ++i) { + struct event_callback *evcb = evcbs[i]; + if (evcb == base->current_event) { + event_callback_finalize_nolock_(base, 0, evcb, cb); + ++n_pending; + } else { + event_callback_cancel_nolock_(base, evcb, 0); + } + } + + if (n_pending == 0) { + /* Just do the first one. */ + event_callback_finalize_nolock_(base, 0, evcbs[0], cb); + } + + EVBASE_RELEASE_LOCK(base, th_base_lock); + return 0; +} + +/* + * Set's the priority of an event - if an event is already scheduled + * changing the priority is going to fail. + */ + +int +event_priority_set(struct event *ev, int pri) +{ + event_debug_assert_is_setup_(ev); + + if (ev->ev_flags & EVLIST_ACTIVE) + return (-1); + if (pri < 0 || pri >= ev->ev_base->nactivequeues) + return (-1); + + ev->ev_pri = pri; + + return (0); +} + +/* + * Checks if a specific event is pending or scheduled. + */ + +int +event_pending(const struct event *ev, short event, struct timeval *tv) +{ + int flags = 0; + + if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) { + event_warnx("%s: event has no event_base set.", __func__); + return 0; + } + + EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); + event_debug_assert_is_setup_(ev); + + if (ev->ev_flags & EVLIST_INSERTED) + flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)); + if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) + flags |= ev->ev_res; + if (ev->ev_flags & EVLIST_TIMEOUT) + flags |= EV_TIMEOUT; + + event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL); + + /* See if there is a timeout that we should report */ + if (tv != NULL && (flags & event & EV_TIMEOUT)) { + struct timeval tmp = ev->ev_timeout; + tmp.tv_usec &= MICROSECONDS_MASK; + /* correctly remamp to real time */ + evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv); + } + + EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); + + return (flags & event); +} + +int +event_initialized(const struct event *ev) +{ + if (!(ev->ev_flags & EVLIST_INIT)) + return 0; + + return 1; +} + +void +event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out) +{ + event_debug_assert_is_setup_(event); + + if (base_out) + *base_out = event->ev_base; + if (fd_out) + *fd_out = event->ev_fd; + if (events_out) + *events_out = event->ev_events; + if (callback_out) + *callback_out = event->ev_callback; + if (arg_out) + *arg_out = event->ev_arg; +} + +size_t +event_get_struct_event_size(void) +{ + return sizeof(struct event); +} + +evutil_socket_t +event_get_fd(const struct event *ev) +{ + event_debug_assert_is_setup_(ev); + return ev->ev_fd; +} + +struct event_base * +event_get_base(const struct event *ev) +{ + event_debug_assert_is_setup_(ev); + return ev->ev_base; +} + +short +event_get_events(const struct event *ev) +{ + event_debug_assert_is_setup_(ev); + return ev->ev_events; +} + +event_callback_fn +event_get_callback(const struct event *ev) +{ + event_debug_assert_is_setup_(ev); + return ev->ev_callback; +} + +void * +event_get_callback_arg(const struct event *ev) +{ + event_debug_assert_is_setup_(ev); + return ev->ev_arg; +} + +int +event_get_priority(const struct event *ev) +{ + event_debug_assert_is_setup_(ev); + return ev->ev_pri; +} + +int +event_add(struct event *ev, const struct timeval *tv) +{ + int res; + + if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { + event_warnx("%s: event has no event_base set.", __func__); + return -1; + } + + EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); + + res = event_add_nolock_(ev, tv, 0); + + EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); + + return (res); +} + +/* Helper callback: wake an event_base from another thread. This version + * works by writing a byte to one end of a socketpair, so that the event_base + * listening on the other end will wake up as the corresponding event + * triggers */ +static int +evthread_notify_base_default(struct event_base *base) +{ + char buf[1]; + int r; + buf[0] = (char) 0; +#ifdef _WIN32 + r = send(base->th_notify_fd[1], buf, 1, 0); +#else + r = write(base->th_notify_fd[1], buf, 1); +#endif + return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0; +} + +#ifdef EVENT__HAVE_EVENTFD +/* Helper callback: wake an event_base from another thread. This version + * assumes that you have a working eventfd() implementation. */ +static int +evthread_notify_base_eventfd(struct event_base *base) +{ + ev_uint64_t msg = 1; + int r; + do { + r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg)); + } while (r < 0 && errno == EAGAIN); + + return (r < 0) ? -1 : 0; +} +#endif + + +/** Tell the thread currently running the event_loop for base (if any) that it + * needs to stop waiting in its dispatch function (if it is) and process all + * active callbacks. */ +static int +evthread_notify_base(struct event_base *base) +{ + EVENT_BASE_ASSERT_LOCKED(base); + if (!base->th_notify_fn) + return -1; + if (base->is_notify_pending) + return 0; + base->is_notify_pending = 1; + return base->th_notify_fn(base); +} + +/* Implementation function to remove a timeout on a currently pending event. + */ +int +event_remove_timer_nolock_(struct event *ev) +{ + struct event_base *base = ev->ev_base; + + EVENT_BASE_ASSERT_LOCKED(base); + event_debug_assert_is_setup_(ev); + + event_debug(("event_remove_timer_nolock: event: %p", ev)); + + /* If it's not pending on a timeout, we don't need to do anything. */ + if (ev->ev_flags & EVLIST_TIMEOUT) { + event_queue_remove_timeout(base, ev); + evutil_timerclear(&ev->ev_.ev_io.ev_timeout); + } + + return (0); +} + +int +event_remove_timer(struct event *ev) +{ + int res; + + if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { + event_warnx("%s: event has no event_base set.", __func__); + return -1; + } + + EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); + + res = event_remove_timer_nolock_(ev); + + EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); + + return (res); +} + +/* Implementation function to add an event. Works just like event_add, + * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set, + * we treat tv as an absolute time, not as an interval to add to the current + * time */ +int +event_add_nolock_(struct event *ev, const struct timeval *tv, + int tv_is_absolute) +{ + struct event_base *base = ev->ev_base; + int res = 0; + int notify = 0; + + EVENT_BASE_ASSERT_LOCKED(base); + event_debug_assert_is_setup_(ev); + + event_debug(( + "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p", + ev, + EV_SOCK_ARG(ev->ev_fd), + ev->ev_events & EV_READ ? "EV_READ " : " ", + ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", + ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ", + tv ? "EV_TIMEOUT " : " ", + ev->ev_callback)); + + EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); + + if (ev->ev_flags & EVLIST_FINALIZING) { + /* XXXX debug */ + return (-1); + } + + /* + * prepare for timeout insertion further below, if we get a + * failure on any step, we should not change any state. + */ + if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { + if (min_heap_reserve_(&base->timeheap, + 1 + min_heap_size_(&base->timeheap)) == -1) + return (-1); /* ENOMEM == errno */ + } + + /* If the main thread is currently executing a signal event's + * callback, and we are not the main thread, then we want to wait + * until the callback is done before we mess with the event, or else + * we can race on ev_ncalls and ev_pncalls below. */ +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (base->current_event == event_to_event_callback(ev) && + (ev->ev_events & EV_SIGNAL) + && !EVBASE_IN_THREAD(base)) { + ++base->current_event_waiters; + EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); + } +#endif + + if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) && + !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { + if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) + res = evmap_io_add_(base, ev->ev_fd, ev); + else if (ev->ev_events & EV_SIGNAL) + res = evmap_signal_add_(base, (int)ev->ev_fd, ev); + if (res != -1) + event_queue_insert_inserted(base, ev); + if (res == 1) { + /* evmap says we need to notify the main thread. */ + notify = 1; + res = 0; + } + } + + /* + * we should change the timeout state only if the previous event + * addition succeeded. + */ + if (res != -1 && tv != NULL) { + struct timeval now; + int common_timeout; +#ifdef USE_REINSERT_TIMEOUT + int was_common; + int old_timeout_idx; +#endif + + /* + * for persistent timeout events, we remember the + * timeout value and re-add the event. + * + * If tv_is_absolute, this was already set. + */ + if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute) + ev->ev_io_timeout = *tv; + +#ifndef USE_REINSERT_TIMEOUT + if (ev->ev_flags & EVLIST_TIMEOUT) { + event_queue_remove_timeout(base, ev); + } +#endif + + /* Check if it is active due to a timeout. Rescheduling + * this timeout before the callback can be executed + * removes it from the active list. */ + if ((ev->ev_flags & EVLIST_ACTIVE) && + (ev->ev_res & EV_TIMEOUT)) { + if (ev->ev_events & EV_SIGNAL) { + /* See if we are just active executing + * this event in a loop + */ + if (ev->ev_ncalls && ev->ev_pncalls) { + /* Abort loop */ + *ev->ev_pncalls = 0; + } + } + + event_queue_remove_active(base, event_to_event_callback(ev)); + } + + gettime(base, &now); + + common_timeout = is_common_timeout(tv, base); +#ifdef USE_REINSERT_TIMEOUT + was_common = is_common_timeout(&ev->ev_timeout, base); + old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout); +#endif + + if (tv_is_absolute) { + ev->ev_timeout = *tv; + } else if (common_timeout) { + struct timeval tmp = *tv; + tmp.tv_usec &= MICROSECONDS_MASK; + evutil_timeradd(&now, &tmp, &ev->ev_timeout); + ev->ev_timeout.tv_usec |= + (tv->tv_usec & ~MICROSECONDS_MASK); + } else { + evutil_timeradd(&now, tv, &ev->ev_timeout); + } + + event_debug(( + "event_add: event %p, timeout in %d seconds %d useconds, call %p", + ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback)); + +#ifdef USE_REINSERT_TIMEOUT + event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx); +#else + event_queue_insert_timeout(base, ev); +#endif + + if (common_timeout) { + struct common_timeout_list *ctl = + get_common_timeout_list(base, &ev->ev_timeout); + if (ev == TAILQ_FIRST(&ctl->events)) { + common_timeout_schedule(ctl, &now, ev); + } + } else { + struct event* top = NULL; + /* See if the earliest timeout is now earlier than it + * was before: if so, we will need to tell the main + * thread to wake up earlier than it would otherwise. + * We double check the timeout of the top element to + * handle time distortions due to system suspension. + */ + if (min_heap_elt_is_top_(ev)) + notify = 1; + else if ((top = min_heap_top_(&base->timeheap)) != NULL && + evutil_timercmp(&top->ev_timeout, &now, <)) + notify = 1; + } + } + + /* if we are not in the right thread, we need to wake up the loop */ + if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) + evthread_notify_base(base); + + event_debug_note_add_(ev); + + return (res); +} + +static int +event_del_(struct event *ev, int blocking) +{ + int res; + struct event_base *base = ev->ev_base; + + if (EVUTIL_FAILURE_CHECK(!base)) { + event_warnx("%s: event has no event_base set.", __func__); + return -1; + } + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + res = event_del_nolock_(ev, blocking); + EVBASE_RELEASE_LOCK(base, th_base_lock); + + return (res); +} + +int +event_del(struct event *ev) +{ + return event_del_(ev, EVENT_DEL_AUTOBLOCK); +} + +int +event_del_block(struct event *ev) +{ + return event_del_(ev, EVENT_DEL_BLOCK); +} + +int +event_del_noblock(struct event *ev) +{ + return event_del_(ev, EVENT_DEL_NOBLOCK); +} + +/** Helper for event_del: always called with th_base_lock held. + * + * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK, + * EVEN_IF_FINALIZING} values. See those for more information. + */ +int +event_del_nolock_(struct event *ev, int blocking) +{ + struct event_base *base; + int res = 0, notify = 0; + + event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p", + ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback)); + + /* An event without a base has not been added */ + if (ev->ev_base == NULL) + return (-1); + + EVENT_BASE_ASSERT_LOCKED(ev->ev_base); + + if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) { + if (ev->ev_flags & EVLIST_FINALIZING) { + /* XXXX Debug */ + return 0; + } + } + + base = ev->ev_base; + + EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); + + /* See if we are just active executing this event in a loop */ + if (ev->ev_events & EV_SIGNAL) { + if (ev->ev_ncalls && ev->ev_pncalls) { + /* Abort loop */ + *ev->ev_pncalls = 0; + } + } + + if (ev->ev_flags & EVLIST_TIMEOUT) { + /* NOTE: We never need to notify the main thread because of a + * deleted timeout event: all that could happen if we don't is + * that the dispatch loop might wake up too early. But the + * point of notifying the main thread _is_ to wake up the + * dispatch loop early anyway, so we wouldn't gain anything by + * doing it. + */ + event_queue_remove_timeout(base, ev); + } + + if (ev->ev_flags & EVLIST_ACTIVE) + event_queue_remove_active(base, event_to_event_callback(ev)); + else if (ev->ev_flags & EVLIST_ACTIVE_LATER) + event_queue_remove_active_later(base, event_to_event_callback(ev)); + + if (ev->ev_flags & EVLIST_INSERTED) { + event_queue_remove_inserted(base, ev); + if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) + res = evmap_io_del_(base, ev->ev_fd, ev); + else + res = evmap_signal_del_(base, (int)ev->ev_fd, ev); + if (res == 1) { + /* evmap says we need to notify the main thread. */ + notify = 1; + res = 0; + } + /* If we do not have events, let's notify event base so it can + * exit without waiting */ + if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) + notify = 1; + } + + /* if we are not in the right thread, we need to wake up the loop */ + if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) + evthread_notify_base(base); + + event_debug_note_del_(ev); + + /* If the main thread is currently executing this event's callback, + * and we are not the main thread, then we want to wait until the + * callback is done before returning. That way, when this function + * returns, it will be safe to free the user-supplied argument. + */ +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (blocking != EVENT_DEL_NOBLOCK && + base->current_event == event_to_event_callback(ev) && + !EVBASE_IN_THREAD(base) && + (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) { + ++base->current_event_waiters; + EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); + } +#endif + + return (res); +} + +void +event_active(struct event *ev, int res, short ncalls) +{ + if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { + event_warnx("%s: event has no event_base set.", __func__); + return; + } + + EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); + + event_debug_assert_is_setup_(ev); + + event_active_nolock_(ev, res, ncalls); + + EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); +} + + +void +event_active_nolock_(struct event *ev, int res, short ncalls) +{ + struct event_base *base; + + event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p", + ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback)); + + base = ev->ev_base; + EVENT_BASE_ASSERT_LOCKED(base); + + if (ev->ev_flags & EVLIST_FINALIZING) { + /* XXXX debug */ + return; + } + + switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { + default: + case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER: + EVUTIL_ASSERT(0); + break; + case EVLIST_ACTIVE: + /* We get different kinds of events, add them together */ + ev->ev_res |= res; + return; + case EVLIST_ACTIVE_LATER: + ev->ev_res |= res; + break; + case 0: + ev->ev_res = res; + break; + } + + if (ev->ev_pri < base->event_running_priority) + base->event_continue = 1; + + if (ev->ev_events & EV_SIGNAL) { +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (base->current_event == event_to_event_callback(ev) && + !EVBASE_IN_THREAD(base)) { + ++base->current_event_waiters; + EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); + } +#endif + ev->ev_ncalls = ncalls; + ev->ev_pncalls = NULL; + } + + event_callback_activate_nolock_(base, event_to_event_callback(ev)); +} + +void +event_active_later_(struct event *ev, int res) +{ + EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); + event_active_later_nolock_(ev, res); + EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); +} + +void +event_active_later_nolock_(struct event *ev, int res) +{ + struct event_base *base = ev->ev_base; + EVENT_BASE_ASSERT_LOCKED(base); + + if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { + /* We get different kinds of events, add them together */ + ev->ev_res |= res; + return; + } + + ev->ev_res = res; + + event_callback_activate_later_nolock_(base, event_to_event_callback(ev)); +} + +int +event_callback_activate_(struct event_base *base, + struct event_callback *evcb) +{ + int r; + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + r = event_callback_activate_nolock_(base, evcb); + EVBASE_RELEASE_LOCK(base, th_base_lock); + return r; +} + +int +event_callback_activate_nolock_(struct event_base *base, + struct event_callback *evcb) +{ + int r = 1; + + if (evcb->evcb_flags & EVLIST_FINALIZING) + return 0; + + switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { + default: + EVUTIL_ASSERT(0); + EVUTIL_FALLTHROUGH; + case EVLIST_ACTIVE_LATER: + event_queue_remove_active_later(base, evcb); + r = 0; + break; + case EVLIST_ACTIVE: + return 0; + case 0: + break; + } + + event_queue_insert_active(base, evcb); + + if (EVBASE_NEED_NOTIFY(base)) + evthread_notify_base(base); + + return r; +} + +int +event_callback_activate_later_nolock_(struct event_base *base, + struct event_callback *evcb) +{ + if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) + return 0; + + event_queue_insert_active_later(base, evcb); + if (EVBASE_NEED_NOTIFY(base)) + evthread_notify_base(base); + return 1; +} + +void +event_callback_init_(struct event_base *base, + struct event_callback *cb) +{ + memset(cb, 0, sizeof(*cb)); + cb->evcb_pri = base->nactivequeues - 1; +} + +int +event_callback_cancel_(struct event_base *base, + struct event_callback *evcb) +{ + int r; + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + r = event_callback_cancel_nolock_(base, evcb, 0); + EVBASE_RELEASE_LOCK(base, th_base_lock); + return r; +} + +int +event_callback_cancel_nolock_(struct event_base *base, + struct event_callback *evcb, int even_if_finalizing) +{ + if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing) + return 0; + + if (evcb->evcb_flags & EVLIST_INIT) + return event_del_nolock_(event_callback_to_event(evcb), + even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK); + + switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { + default: + case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER: + EVUTIL_ASSERT(0); + break; + case EVLIST_ACTIVE: + /* We get different kinds of events, add them together */ + event_queue_remove_active(base, evcb); + return 0; + case EVLIST_ACTIVE_LATER: + event_queue_remove_active_later(base, evcb); + break; + case 0: + break; + } + + return 0; +} + +void +event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg) +{ + memset(cb, 0, sizeof(*cb)); + cb->evcb_cb_union.evcb_selfcb = fn; + cb->evcb_arg = arg; + cb->evcb_pri = priority; + cb->evcb_closure = EV_CLOSURE_CB_SELF; +} + +void +event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority) +{ + cb->evcb_pri = priority; +} + +void +event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb) +{ + if (!base) + base = current_base; + event_callback_cancel_(base, cb); +} + +#define MAX_DEFERREDS_QUEUED 32 +int +event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb) +{ + int r = 1; + if (!base) + base = current_base; + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) { + r = event_callback_activate_later_nolock_(base, cb); + } else { + r = event_callback_activate_nolock_(base, cb); + if (r) { + ++base->n_deferreds_queued; + } + } + EVBASE_RELEASE_LOCK(base, th_base_lock); + return r; +} + +static int +timeout_next(struct event_base *base, struct timeval **tv_p) +{ + /* Caller must hold th_base_lock */ + struct timeval now; + struct event *ev; + struct timeval *tv = *tv_p; + int res = 0; + + ev = min_heap_top_(&base->timeheap); + + if (ev == NULL) { + /* if no time-based events are active wait for I/O */ + *tv_p = NULL; + goto out; + } + + if (gettime(base, &now) == -1) { + res = -1; + goto out; + } + + if (evutil_timercmp(&ev->ev_timeout, &now, <=)) { + evutil_timerclear(tv); + goto out; + } + + evutil_timersub(&ev->ev_timeout, &now, tv); + + EVUTIL_ASSERT(tv->tv_sec >= 0); + EVUTIL_ASSERT(tv->tv_usec >= 0); + event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec)); + +out: + return (res); +} + +/* Activate every event whose timeout has elapsed. */ +static void +timeout_process(struct event_base *base) +{ + /* Caller must hold lock. */ + struct timeval now; + struct event *ev; + + if (min_heap_empty_(&base->timeheap)) { + return; + } + + gettime(base, &now); + + while ((ev = min_heap_top_(&base->timeheap))) { + if (evutil_timercmp(&ev->ev_timeout, &now, >)) + break; + + /* delete this event from the I/O queues */ + event_del_nolock_(ev, EVENT_DEL_NOBLOCK); + + event_debug(("timeout_process: event: %p, call %p", + ev, ev->ev_callback)); + event_active_nolock_(ev, EV_TIMEOUT, 1); + } +} + +#ifndef MAX +#define MAX(a,b) (((a)>(b))?(a):(b)) +#endif + +#define MAX_EVENT_COUNT(var, v) var = MAX(var, v) + +/* These are a fancy way to spell + if (~flags & EVLIST_INTERNAL) + base->event_count--/++; +*/ +#define DECR_EVENT_COUNT(base,flags) \ + ((base)->event_count -= !((flags) & EVLIST_INTERNAL)) +#define INCR_EVENT_COUNT(base,flags) do { \ + ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \ + MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \ +} while (0) + +static void +event_queue_remove_inserted(struct event_base *base, struct event *ev) +{ + EVENT_BASE_ASSERT_LOCKED(base); + if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) { + event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__, + ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED); + return; + } + DECR_EVENT_COUNT(base, ev->ev_flags); + ev->ev_flags &= ~EVLIST_INSERTED; +} +static void +event_queue_remove_active(struct event_base *base, struct event_callback *evcb) +{ + EVENT_BASE_ASSERT_LOCKED(base); + if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) { + event_errx(1, "%s: %p not on queue %x", __func__, + evcb, EVLIST_ACTIVE); + return; + } + DECR_EVENT_COUNT(base, evcb->evcb_flags); + evcb->evcb_flags &= ~EVLIST_ACTIVE; + base->event_count_active--; + + TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri], + evcb, evcb_active_next); +} +static void +event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb) +{ + EVENT_BASE_ASSERT_LOCKED(base); + if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) { + event_errx(1, "%s: %p not on queue %x", __func__, + evcb, EVLIST_ACTIVE_LATER); + return; + } + DECR_EVENT_COUNT(base, evcb->evcb_flags); + evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER; + base->event_count_active--; + + TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); +} +static void +event_queue_remove_timeout(struct event_base *base, struct event *ev) +{ + EVENT_BASE_ASSERT_LOCKED(base); + if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) { + event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__, + ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT); + return; + } + DECR_EVENT_COUNT(base, ev->ev_flags); + ev->ev_flags &= ~EVLIST_TIMEOUT; + + if (is_common_timeout(&ev->ev_timeout, base)) { + struct common_timeout_list *ctl = + get_common_timeout_list(base, &ev->ev_timeout); + TAILQ_REMOVE(&ctl->events, ev, + ev_timeout_pos.ev_next_with_common_timeout); + } else { + min_heap_erase_(&base->timeheap, ev); + } +} + +#ifdef USE_REINSERT_TIMEOUT +/* Remove and reinsert 'ev' into the timeout queue. */ +static void +event_queue_reinsert_timeout(struct event_base *base, struct event *ev, + int was_common, int is_common, int old_timeout_idx) +{ + struct common_timeout_list *ctl; + if (!(ev->ev_flags & EVLIST_TIMEOUT)) { + event_queue_insert_timeout(base, ev); + return; + } + + switch ((was_common<<1) | is_common) { + case 3: /* Changing from one common timeout to another */ + ctl = base->common_timeout_queues[old_timeout_idx]; + TAILQ_REMOVE(&ctl->events, ev, + ev_timeout_pos.ev_next_with_common_timeout); + ctl = get_common_timeout_list(base, &ev->ev_timeout); + insert_common_timeout_inorder(ctl, ev); + break; + case 2: /* Was common; is no longer common */ + ctl = base->common_timeout_queues[old_timeout_idx]; + TAILQ_REMOVE(&ctl->events, ev, + ev_timeout_pos.ev_next_with_common_timeout); + min_heap_push_(&base->timeheap, ev); + break; + case 1: /* Wasn't common; has become common. */ + min_heap_erase_(&base->timeheap, ev); + ctl = get_common_timeout_list(base, &ev->ev_timeout); + insert_common_timeout_inorder(ctl, ev); + break; + case 0: /* was in heap; is still on heap. */ + min_heap_adjust_(&base->timeheap, ev); + break; + default: + EVUTIL_ASSERT(0); /* unreachable */ + break; + } +} +#endif + +/* Add 'ev' to the common timeout list in 'ev'. */ +static void +insert_common_timeout_inorder(struct common_timeout_list *ctl, + struct event *ev) +{ + struct event *e; + /* By all logic, we should just be able to append 'ev' to the end of + * ctl->events, since the timeout on each 'ev' is set to {the common + * timeout} + {the time when we add the event}, and so the events + * should arrive in order of their timeeouts. But just in case + * there's some wacky threading issue going on, we do a search from + * the end of 'ev' to find the right insertion point. + */ + TAILQ_FOREACH_REVERSE(e, &ctl->events, + event_list, ev_timeout_pos.ev_next_with_common_timeout) { + /* This timercmp is a little sneaky, since both ev and e have + * magic values in tv_usec. Fortunately, they ought to have + * the _same_ magic values in tv_usec. Let's assert for that. + */ + EVUTIL_ASSERT( + is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout)); + if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) { + TAILQ_INSERT_AFTER(&ctl->events, e, ev, + ev_timeout_pos.ev_next_with_common_timeout); + return; + } + } + TAILQ_INSERT_HEAD(&ctl->events, ev, + ev_timeout_pos.ev_next_with_common_timeout); +} + +static void +event_queue_insert_inserted(struct event_base *base, struct event *ev) +{ + EVENT_BASE_ASSERT_LOCKED(base); + + if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) { + event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__, + ev, EV_SOCK_ARG(ev->ev_fd)); + return; + } + + INCR_EVENT_COUNT(base, ev->ev_flags); + + ev->ev_flags |= EVLIST_INSERTED; +} + +static void +event_queue_insert_active(struct event_base *base, struct event_callback *evcb) +{ + EVENT_BASE_ASSERT_LOCKED(base); + + if (evcb->evcb_flags & EVLIST_ACTIVE) { + /* Double insertion is possible for active events */ + return; + } + + INCR_EVENT_COUNT(base, evcb->evcb_flags); + + evcb->evcb_flags |= EVLIST_ACTIVE; + + base->event_count_active++; + MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); + EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); + TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], + evcb, evcb_active_next); +} + +static void +event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb) +{ + EVENT_BASE_ASSERT_LOCKED(base); + if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) { + /* Double insertion is possible */ + return; + } + + INCR_EVENT_COUNT(base, evcb->evcb_flags); + evcb->evcb_flags |= EVLIST_ACTIVE_LATER; + base->event_count_active++; + MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); + EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); + TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next); +} + +static void +event_queue_insert_timeout(struct event_base *base, struct event *ev) +{ + EVENT_BASE_ASSERT_LOCKED(base); + + if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) { + event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__, + ev, EV_SOCK_ARG(ev->ev_fd)); + return; + } + + INCR_EVENT_COUNT(base, ev->ev_flags); + + ev->ev_flags |= EVLIST_TIMEOUT; + + if (is_common_timeout(&ev->ev_timeout, base)) { + struct common_timeout_list *ctl = + get_common_timeout_list(base, &ev->ev_timeout); + insert_common_timeout_inorder(ctl, ev); + } else { + min_heap_push_(&base->timeheap, ev); + } +} + +static void +event_queue_make_later_events_active(struct event_base *base) +{ + struct event_callback *evcb; + EVENT_BASE_ASSERT_LOCKED(base); + + while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { + TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); + evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE; + EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); + TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next); + base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF); + } +} + +/* Functions for debugging */ + +const char * +event_get_version(void) +{ + return (EVENT__VERSION); +} + +ev_uint32_t +event_get_version_number(void) +{ + return (EVENT__NUMERIC_VERSION); +} + +/* + * No thread-safe interface needed - the information should be the same + * for all threads. + */ + +const char * +event_get_method(void) +{ + return (current_base->evsel->name); +} + +#ifndef EVENT__DISABLE_MM_REPLACEMENT +static void *(*mm_malloc_fn_)(size_t sz) = NULL; +static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL; +static void (*mm_free_fn_)(void *p) = NULL; + +void * +event_mm_malloc_(size_t sz) +{ + if (sz == 0) + return NULL; + + if (mm_malloc_fn_) + return mm_malloc_fn_(sz); + else + return malloc(sz); +} + +void * +event_mm_calloc_(size_t count, size_t size) +{ + if (count == 0 || size == 0) + return NULL; + + if (mm_malloc_fn_) { + size_t sz = count * size; + void *p = NULL; + if (count > EV_SIZE_MAX / size) + goto error; + p = mm_malloc_fn_(sz); + if (p) + return memset(p, 0, sz); + } else { + void *p = calloc(count, size); +#ifdef _WIN32 + /* Windows calloc doesn't reliably set ENOMEM */ + if (p == NULL) + goto error; +#endif + return p; + } + +error: + errno = ENOMEM; + return NULL; +} + +char * +event_mm_strdup_(const char *str) +{ + if (!str) { + errno = EINVAL; + return NULL; + } + + if (mm_malloc_fn_) { + size_t ln = strlen(str); + void *p = NULL; + if (ln == EV_SIZE_MAX) + goto error; + p = mm_malloc_fn_(ln+1); + if (p) + return memcpy(p, str, ln+1); + } else +#ifdef _WIN32 + return _strdup(str); +#else + return strdup(str); +#endif + +error: + errno = ENOMEM; + return NULL; +} + +void * +event_mm_realloc_(void *ptr, size_t sz) +{ + if (mm_realloc_fn_) + return mm_realloc_fn_(ptr, sz); + else + return realloc(ptr, sz); +} + +void +event_mm_free_(void *ptr) +{ + if (mm_free_fn_) + mm_free_fn_(ptr); + else + free(ptr); +} + +void +event_set_mem_functions(void *(*malloc_fn)(size_t sz), + void *(*realloc_fn)(void *ptr, size_t sz), + void (*free_fn)(void *ptr)) +{ + mm_malloc_fn_ = malloc_fn; + mm_realloc_fn_ = realloc_fn; + mm_free_fn_ = free_fn; +} +#endif + +#ifdef EVENT__HAVE_EVENTFD +static void +evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg) +{ + ev_uint64_t msg; + ev_ssize_t r; + struct event_base *base = arg; + + r = read(fd, (void*) &msg, sizeof(msg)); + if (r<0 && errno != EAGAIN) { + event_sock_warn(fd, "Error reading from eventfd"); + } + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + base->is_notify_pending = 0; + EVBASE_RELEASE_LOCK(base, th_base_lock); +} +#endif + +static void +evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg) +{ + unsigned char buf[1024]; + struct event_base *base = arg; +#ifdef _WIN32 + while (recv(fd, (char*)buf, sizeof(buf), 0) > 0) + ; +#else + while (read(fd, (char*)buf, sizeof(buf)) > 0) + ; +#endif + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + base->is_notify_pending = 0; + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + +int +evthread_make_base_notifiable(struct event_base *base) +{ + int r; + if (!base) + return -1; + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + r = evthread_make_base_notifiable_nolock_(base); + EVBASE_RELEASE_LOCK(base, th_base_lock); + return r; +} + +static int +evthread_make_base_notifiable_nolock_(struct event_base *base) +{ + void (*cb)(evutil_socket_t, short, void *); + int (*notify)(struct event_base *); + + if (base->th_notify_fn != NULL) { + /* The base is already notifiable: we're doing fine. */ + return 0; + } + +#if defined(EVENT__HAVE_WORKING_KQUEUE) + if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) { + base->th_notify_fn = event_kq_notify_base_; + /* No need to add an event here; the backend can wake + * itself up just fine. */ + return 0; + } +#endif + +#ifdef EVENT__HAVE_EVENTFD + base->th_notify_fd[0] = evutil_eventfd_(0, + EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK); + if (base->th_notify_fd[0] >= 0) { + base->th_notify_fd[1] = -1; + notify = evthread_notify_base_eventfd; + cb = evthread_notify_drain_eventfd; + } else +#endif + if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) { + notify = evthread_notify_base_default; + cb = evthread_notify_drain_default; + } else { + return -1; + } + + base->th_notify_fn = notify; + + /* prepare an event that we can use for wakeup */ + event_assign(&base->th_notify, base, base->th_notify_fd[0], + EV_READ|EV_PERSIST, cb, base); + + /* we need to mark this as internal event */ + base->th_notify.ev_flags |= EVLIST_INTERNAL; + event_priority_set(&base->th_notify, 0); + + return event_add_nolock_(&base->th_notify, NULL, 0); +} + +int +event_base_foreach_event_nolock_(struct event_base *base, + event_base_foreach_event_cb fn, void *arg) +{ + int r, i; + unsigned u; + struct event *ev; + + /* Start out with all the EVLIST_INSERTED events. */ + if ((r = evmap_foreach_event_(base, fn, arg))) + return r; + + /* Okay, now we deal with those events that have timeouts and are in + * the min-heap. */ + for (u = 0; u < base->timeheap.n; ++u) { + ev = base->timeheap.p[u]; + if (ev->ev_flags & EVLIST_INSERTED) { + /* we already processed this one */ + continue; + } + if ((r = fn(base, ev, arg))) + return r; + } + + /* Now for the events in one of the timeout queues. + * the min-heap. */ + for (i = 0; i < base->n_common_timeouts; ++i) { + struct common_timeout_list *ctl = + base->common_timeout_queues[i]; + TAILQ_FOREACH(ev, &ctl->events, + ev_timeout_pos.ev_next_with_common_timeout) { + if (ev->ev_flags & EVLIST_INSERTED) { + /* we already processed this one */ + continue; + } + if ((r = fn(base, ev, arg))) + return r; + } + } + + /* Finally, we deal wit all the active events that we haven't touched + * yet. */ + for (i = 0; i < base->nactivequeues; ++i) { + struct event_callback *evcb; + TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { + if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) { + /* This isn't an event (evlist_init clear), or + * we already processed it. (inserted or + * timeout set */ + continue; + } + ev = event_callback_to_event(evcb); + if ((r = fn(base, ev, arg))) + return r; + } + } + + return 0; +} + +/* Helper for event_base_dump_events: called on each event in the event base; + * dumps only the inserted events. */ +static int +dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg) +{ + FILE *output = arg; + const char *gloss = (e->ev_events & EV_SIGNAL) ? + "sig" : "fd "; + + if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT))) + return 0; + + fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s", + (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), + (e->ev_events&EV_READ)?" Read":"", + (e->ev_events&EV_WRITE)?" Write":"", + (e->ev_events&EV_CLOSED)?" EOF":"", + (e->ev_events&EV_SIGNAL)?" Signal":"", + (e->ev_events&EV_PERSIST)?" Persist":"", + (e->ev_events&EV_ET)?" ET":"", + (e->ev_flags&EVLIST_INTERNAL)?" Internal":""); + if (e->ev_flags & EVLIST_TIMEOUT) { + struct timeval tv; + tv.tv_sec = e->ev_timeout.tv_sec; + tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK; + evutil_timeradd(&tv, &base->tv_clock_diff, &tv); + fprintf(output, " Timeout=%ld.%06d", + (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK)); + } + fputc('\n', output); + + return 0; +} + +/* Helper for event_base_dump_events: called on each event in the event base; + * dumps only the active events. */ +static int +dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg) +{ + FILE *output = arg; + const char *gloss = (e->ev_events & EV_SIGNAL) ? + "sig" : "fd "; + + if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) + return 0; + + fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n", + (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri, + (e->ev_res&EV_READ)?" Read":"", + (e->ev_res&EV_WRITE)?" Write":"", + (e->ev_res&EV_CLOSED)?" EOF":"", + (e->ev_res&EV_SIGNAL)?" Signal":"", + (e->ev_res&EV_TIMEOUT)?" Timeout":"", + (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"", + (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":""); + + return 0; +} + +int +event_base_foreach_event(struct event_base *base, + event_base_foreach_event_cb fn, void *arg) +{ + int r; + if ((!fn) || (!base)) { + return -1; + } + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + r = event_base_foreach_event_nolock_(base, fn, arg); + EVBASE_RELEASE_LOCK(base, th_base_lock); + return r; +} + + +void +event_base_dump_events(struct event_base *base, FILE *output) +{ + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + fprintf(output, "Inserted events:\n"); + event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output); + + fprintf(output, "Active events:\n"); + event_base_foreach_event_nolock_(base, dump_active_event_fn, output); + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + +void +event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events) +{ + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + /* Activate any non timer events */ + if (!(events & EV_TIMEOUT)) { + evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED)); + } else { + /* If we want to activate timer events, loop and activate each event with + * the same fd in both the timeheap and common timeouts list */ + int i; + unsigned u; + struct event *ev; + + for (u = 0; u < base->timeheap.n; ++u) { + ev = base->timeheap.p[u]; + if (ev->ev_fd == fd) { + event_active_nolock_(ev, EV_TIMEOUT, 1); + } + } + + for (i = 0; i < base->n_common_timeouts; ++i) { + struct common_timeout_list *ctl = base->common_timeout_queues[i]; + TAILQ_FOREACH(ev, &ctl->events, + ev_timeout_pos.ev_next_with_common_timeout) { + if (ev->ev_fd == fd) { + event_active_nolock_(ev, EV_TIMEOUT, 1); + } + } + } + } + + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + +void +event_base_active_by_signal(struct event_base *base, int sig) +{ + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + evmap_signal_active_(base, sig, 1); + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + + +void +event_base_add_virtual_(struct event_base *base) +{ + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + base->virtual_event_count++; + MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count); + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + +void +event_base_del_virtual_(struct event_base *base) +{ + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + EVUTIL_ASSERT(base->virtual_event_count > 0); + base->virtual_event_count--; + if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base)) + evthread_notify_base(base); + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + +static void +event_free_debug_globals_locks(void) +{ +#ifndef EVENT__DISABLE_THREAD_SUPPORT +#ifndef EVENT__DISABLE_DEBUG_MODE + if (event_debug_map_lock_ != NULL) { + EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0); + event_debug_map_lock_ = NULL; + evthreadimpl_disable_lock_debugging_(); + } +#endif /* EVENT__DISABLE_DEBUG_MODE */ +#endif /* EVENT__DISABLE_THREAD_SUPPORT */ + return; +} + +static void +event_free_debug_globals(void) +{ + event_free_debug_globals_locks(); +} + +static void +event_free_evsig_globals(void) +{ + evsig_free_globals_(); +} + +static void +event_free_evutil_globals(void) +{ + evutil_free_globals_(); +} + +static void +event_free_globals(void) +{ + event_free_debug_globals(); + event_free_evsig_globals(); + event_free_evutil_globals(); +} + +void +libevent_global_shutdown(void) +{ + event_disable_debug_mode(); + event_free_globals(); +} + +#ifndef EVENT__DISABLE_THREAD_SUPPORT +int +event_global_setup_locks_(const int enable_locks) +{ +#ifndef EVENT__DISABLE_DEBUG_MODE + EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0); +#endif + if (evsig_global_setup_locks_(enable_locks) < 0) + return -1; + if (evutil_global_setup_locks_(enable_locks) < 0) + return -1; + if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0) + return -1; + return 0; +} +#endif + +void +event_base_assert_ok_(struct event_base *base) +{ + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + event_base_assert_ok_nolock_(base); + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + +void +event_base_assert_ok_nolock_(struct event_base *base) +{ + int i; + int count; + + /* First do checks on the per-fd and per-signal lists */ + evmap_check_integrity_(base); + + /* Check the heap property */ + for (i = 1; i < (int)base->timeheap.n; ++i) { + int parent = (i - 1) / 2; + struct event *ev, *p_ev; + ev = base->timeheap.p[i]; + p_ev = base->timeheap.p[parent]; + EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); + EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=)); + EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i); + } + + /* Check that the common timeouts are fine */ + for (i = 0; i < base->n_common_timeouts; ++i) { + struct common_timeout_list *ctl = base->common_timeout_queues[i]; + struct event *last=NULL, *ev; + + EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout); + + TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) { + if (last) + EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=)); + EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); + EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base)); + EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i); + last = ev; + } + } + + /* Check the active queues. */ + count = 0; + for (i = 0; i < base->nactivequeues; ++i) { + struct event_callback *evcb; + EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next); + TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { + EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE); + EVUTIL_ASSERT(evcb->evcb_pri == i); + ++count; + } + } + + { + struct event_callback *evcb; + TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) { + EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER); + ++count; + } + } + EVUTIL_ASSERT(count == base->event_count_active); +} diff --git a/probe-busybox/libevent-2.1.11-stable/event_iocp.c b/probe-busybox/libevent-2.1.11-stable/event_iocp.c new file mode 100644 index 00000000..6b2a2e15 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/event_iocp.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "evconfig-private.h" + +#ifndef _WIN32_WINNT +/* Minimum required for InitializeCriticalSectionAndSpinCount */ +#define _WIN32_WINNT 0x0403 +#endif +#include +#include +#include +#include +#include + +#include "event2/util.h" +#include "util-internal.h" +#include "iocp-internal.h" +#include "log-internal.h" +#include "mm-internal.h" +#include "event-internal.h" +#include "evthread-internal.h" + +#define NOTIFICATION_KEY ((ULONG_PTR)-1) + +void +event_overlapped_init_(struct event_overlapped *o, iocp_callback cb) +{ + memset(o, 0, sizeof(struct event_overlapped)); + o->cb = cb; +} + +static void +handle_entry(OVERLAPPED *o, ULONG_PTR completion_key, DWORD nBytes, int ok) +{ + struct event_overlapped *eo = + EVUTIL_UPCAST(o, struct event_overlapped, overlapped); + eo->cb(eo, completion_key, nBytes, ok); +} + +static void +loop(void *port_) +{ + struct event_iocp_port *port = port_; + long ms = port->ms; + HANDLE p = port->port; + + if (ms <= 0) + ms = INFINITE; + + while (1) { + OVERLAPPED *overlapped=NULL; + ULONG_PTR key=0; + DWORD bytes=0; + int ok = GetQueuedCompletionStatus(p, &bytes, &key, + &overlapped, ms); + EnterCriticalSection(&port->lock); + if (port->shutdown) { + if (--port->n_live_threads == 0) + ReleaseSemaphore(port->shutdownSemaphore, 1, + NULL); + LeaveCriticalSection(&port->lock); + return; + } + LeaveCriticalSection(&port->lock); + + if (key != NOTIFICATION_KEY && overlapped) + handle_entry(overlapped, key, bytes, ok); + else if (!overlapped) + break; + } + event_warnx("GetQueuedCompletionStatus exited with no event."); + EnterCriticalSection(&port->lock); + if (--port->n_live_threads == 0) + ReleaseSemaphore(port->shutdownSemaphore, 1, NULL); + LeaveCriticalSection(&port->lock); +} + +int +event_iocp_port_associate_(struct event_iocp_port *port, evutil_socket_t fd, + ev_uintptr_t key) +{ + HANDLE h; + h = CreateIoCompletionPort((HANDLE)fd, port->port, key, port->n_threads); + if (!h) + return -1; + return 0; +} + +static void * +get_extension_function(SOCKET s, const GUID *which_fn) +{ + void *ptr = NULL; + DWORD bytes=0; + WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER, + (GUID*)which_fn, sizeof(*which_fn), + &ptr, sizeof(ptr), + &bytes, NULL, NULL); + + /* No need to detect errors here: if ptr is set, then we have a good + function pointer. Otherwise, we should behave as if we had no + function pointer. + */ + return ptr; +} + +/* Mingw doesn't have these in its mswsock.h. The values are copied from + wine.h. Perhaps if we copy them exactly, the cargo will come again. +*/ +#ifndef WSAID_ACCEPTEX +#define WSAID_ACCEPTEX \ + {0xb5367df1,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}} +#endif +#ifndef WSAID_CONNECTEX +#define WSAID_CONNECTEX \ + {0x25a207b9,0xddf3,0x4660,{0x8e,0xe9,0x76,0xe5,0x8c,0x74,0x06,0x3e}} +#endif +#ifndef WSAID_GETACCEPTEXSOCKADDRS +#define WSAID_GETACCEPTEXSOCKADDRS \ + {0xb5367df2,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}} +#endif + +static int extension_fns_initialized = 0; + +static void +init_extension_functions(struct win32_extension_fns *ext) +{ + const GUID acceptex = WSAID_ACCEPTEX; + const GUID connectex = WSAID_CONNECTEX; + const GUID getacceptexsockaddrs = WSAID_GETACCEPTEXSOCKADDRS; + SOCKET s = socket(AF_INET, SOCK_STREAM, 0); + if (s == EVUTIL_INVALID_SOCKET) + return; + ext->AcceptEx = get_extension_function(s, &acceptex); + ext->ConnectEx = get_extension_function(s, &connectex); + ext->GetAcceptExSockaddrs = get_extension_function(s, + &getacceptexsockaddrs); + closesocket(s); + + extension_fns_initialized = 1; +} + +static struct win32_extension_fns the_extension_fns; + +const struct win32_extension_fns * +event_get_win32_extension_fns_(void) +{ + return &the_extension_fns; +} + +#define N_CPUS_DEFAULT 2 + +struct event_iocp_port * +event_iocp_port_launch_(int n_cpus) +{ + struct event_iocp_port *port; + int i; + + if (!extension_fns_initialized) + init_extension_functions(&the_extension_fns); + + if (!(port = mm_calloc(1, sizeof(struct event_iocp_port)))) + return NULL; + + if (n_cpus <= 0) + n_cpus = N_CPUS_DEFAULT; + port->n_threads = n_cpus * 2; + port->threads = mm_calloc(port->n_threads, sizeof(HANDLE)); + if (!port->threads) + goto err; + + port->port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, + n_cpus); + port->ms = -1; + if (!port->port) + goto err; + + port->shutdownSemaphore = CreateSemaphore(NULL, 0, 1, NULL); + if (!port->shutdownSemaphore) + goto err; + + for (i=0; in_threads; ++i) { + ev_uintptr_t th = _beginthread(loop, 0, port); + if (th == (ev_uintptr_t)-1) + goto err; + port->threads[i] = (HANDLE)th; + ++port->n_live_threads; + } + + InitializeCriticalSectionAndSpinCount(&port->lock, 1000); + + return port; +err: + if (port->port) + CloseHandle(port->port); + if (port->threads) + mm_free(port->threads); + if (port->shutdownSemaphore) + CloseHandle(port->shutdownSemaphore); + mm_free(port); + return NULL; +} + +static void +event_iocp_port_unlock_and_free_(struct event_iocp_port *port) +{ + DeleteCriticalSection(&port->lock); + CloseHandle(port->port); + CloseHandle(port->shutdownSemaphore); + mm_free(port->threads); + mm_free(port); +} + +static int +event_iocp_notify_all(struct event_iocp_port *port) +{ + int i, r, ok=1; + for (i=0; in_threads; ++i) { + r = PostQueuedCompletionStatus(port->port, 0, NOTIFICATION_KEY, + NULL); + if (!r) + ok = 0; + } + return ok ? 0 : -1; +} + +int +event_iocp_shutdown_(struct event_iocp_port *port, long waitMsec) +{ + DWORD ms = INFINITE; + int n; + + EnterCriticalSection(&port->lock); + port->shutdown = 1; + LeaveCriticalSection(&port->lock); + event_iocp_notify_all(port); + + if (waitMsec >= 0) + ms = waitMsec; + + WaitForSingleObject(port->shutdownSemaphore, ms); + EnterCriticalSection(&port->lock); + n = port->n_live_threads; + LeaveCriticalSection(&port->lock); + if (n == 0) { + event_iocp_port_unlock_and_free_(port); + return 0; + } else { + return -1; + } +} + +int +event_iocp_activate_overlapped_( + struct event_iocp_port *port, struct event_overlapped *o, + ev_uintptr_t key, ev_uint32_t n) +{ + BOOL r; + + r = PostQueuedCompletionStatus(port->port, n, key, &o->overlapped); + return (r==0) ? -1 : 0; +} + +struct event_iocp_port * +event_base_get_iocp_(struct event_base *base) +{ +#ifdef _WIN32 + return base->iocp; +#else + return NULL; +#endif +} diff --git a/probe-busybox/libevent-2.1.11-stable/event_rpcgen.py b/probe-busybox/libevent-2.1.11-stable/event_rpcgen.py new file mode 100755 index 00000000..0911ca25 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/event_rpcgen.py @@ -0,0 +1,1731 @@ +#!/usr/bin/env python +# +# Copyright (c) 2005-2007 Niels Provos +# Copyright (c) 2007-2012 Niels Provos and Nick Mathewson +# All rights reserved. +# +# Generates marshaling code based on libevent. + +# TODO: +# 1) use optparse to allow the strategy shell to parse options, and +# to allow the instantiated factory (for the specific output language) +# to parse remaining options +# 2) move the globals into a class that manages execution (including the +# progress outputs that space stderr at the moment) +# 3) emit other languages + +import sys +import re + +_NAME = "event_rpcgen.py" +_VERSION = "0.1" + +# Globals +line_count = 0 + +white = re.compile(r'\s+') +cppcomment = re.compile(r'\/\/.*$') +nonident = re.compile(r'[^a-zA-Z0-9_]') +structref = re.compile(r'^struct\[([a-zA-Z_][a-zA-Z0-9_]*)\]$') +structdef = re.compile(r'^struct +[a-zA-Z_][a-zA-Z0-9_]* *{$') + +headerdirect = [] +cppdirect = [] + +QUIETLY = 0 + +def declare(s): + if not QUIETLY: + print(s) + +def TranslateList(mylist, mydict): + return [x % mydict for x in mylist] + +# Exception class for parse errors +class RpcGenError(Exception): + def __init__(self, why): + self.why = why + def __str__(self): + return str(self.why) + +# Holds everything that makes a struct +class Struct: + def __init__(self, name): + self._name = name + self._entries = [] + self._tags = {} + declare(' Created struct: %s' % name) + + def AddEntry(self, entry): + if entry.Tag() in self._tags: + raise RpcGenError( + 'Entry "%s" duplicates tag number %d from "%s" ' + 'around line %d' % (entry.Name(), entry.Tag(), + self._tags[entry.Tag()], line_count)) + self._entries.append(entry) + self._tags[entry.Tag()] = entry.Name() + declare(' Added entry: %s' % entry.Name()) + + def Name(self): + return self._name + + def EntryTagName(self, entry): + """Creates the name inside an enumeration for distinguishing data + types.""" + name = "%s_%s" % (self._name, entry.Name()) + return name.upper() + + def PrintIndented(self, file, ident, code): + """Takes an array, add indentation to each entry and prints it.""" + for entry in code: + file.write('%s%s\n' % (ident, entry)) + +class StructCCode(Struct): + """ Knows how to generate C code for a struct """ + + def __init__(self, name): + Struct.__init__(self, name) + + def PrintTags(self, file): + """Prints the tag definitions for a structure.""" + file.write('/* Tag definition for %s */\n' % self._name) + file.write('enum %s_ {\n' % self._name.lower()) + for entry in self._entries: + file.write(' %s=%d,\n' % (self.EntryTagName(entry), entry.Tag())) + file.write(' %s_MAX_TAGS\n' % (self._name.upper())) + file.write('};\n\n') + + def PrintForwardDeclaration(self, file): + file.write('struct %s;\n' % self._name) + + def PrintDeclaration(self, file): + file.write('/* Structure declaration for %s */\n' % self._name) + file.write('struct %s_access_ {\n' % self._name) + for entry in self._entries: + dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name()) + dcl.extend( + entry.GetDeclaration('(*%s_get)' % entry.Name())) + if entry.Array(): + dcl.extend( + entry.AddDeclaration('(*%s_add)' % entry.Name())) + self.PrintIndented(file, ' ', dcl) + file.write('};\n\n') + + file.write('struct %s {\n' % self._name) + file.write(' struct %s_access_ *base;\n\n' % self._name) + for entry in self._entries: + dcl = entry.Declaration() + self.PrintIndented(file, ' ', dcl) + file.write('\n') + for entry in self._entries: + file.write(' ev_uint8_t %s_set;\n' % entry.Name()) + file.write('};\n\n') + + file.write("""struct %(name)s *%(name)s_new(void); +struct %(name)s *%(name)s_new_with_arg(void *); +void %(name)s_free(struct %(name)s *); +void %(name)s_clear(struct %(name)s *); +void %(name)s_marshal(struct evbuffer *, const struct %(name)s *); +int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *); +int %(name)s_complete(struct %(name)s *); +void evtag_marshal_%(name)s(struct evbuffer *, ev_uint32_t, + const struct %(name)s *); +int evtag_unmarshal_%(name)s(struct evbuffer *, ev_uint32_t, + struct %(name)s *);\n""" % { 'name' : self._name }) + + + # Write a setting function of every variable + for entry in self._entries: + self.PrintIndented(file, '', entry.AssignDeclaration( + entry.AssignFuncName())) + self.PrintIndented(file, '', entry.GetDeclaration( + entry.GetFuncName())) + if entry.Array(): + self.PrintIndented(file, '', entry.AddDeclaration( + entry.AddFuncName())) + + file.write('/* --- %s done --- */\n\n' % self._name) + + def PrintCode(self, file): + file.write(('/*\n' + ' * Implementation of %s\n' + ' */\n\n') % self._name) + + file.write('static struct %(name)s_access_ %(name)s_base__ = {\n' % \ + { 'name' : self._name }) + for entry in self._entries: + self.PrintIndented(file, ' ', entry.CodeBase()) + file.write('};\n\n') + + # Creation + file.write(( + 'struct %(name)s *\n' + '%(name)s_new(void)\n' + '{\n' + ' return %(name)s_new_with_arg(NULL);\n' + '}\n' + '\n' + 'struct %(name)s *\n' + '%(name)s_new_with_arg(void *unused)\n' + '{\n' + ' struct %(name)s *tmp;\n' + ' if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {\n' + ' event_warn("%%s: malloc", __func__);\n' + ' return (NULL);\n' + ' }\n' + ' tmp->base = &%(name)s_base__;\n\n') % { 'name' : self._name }) + + for entry in self._entries: + self.PrintIndented(file, ' ', entry.CodeInitialize('tmp')) + file.write(' tmp->%s_set = 0;\n\n' % entry.Name()) + + file.write(( + ' return (tmp);\n' + '}\n\n')) + + # Adding + for entry in self._entries: + if entry.Array(): + self.PrintIndented(file, '', entry.CodeAdd()) + file.write('\n') + + # Assigning + for entry in self._entries: + self.PrintIndented(file, '', entry.CodeAssign()) + file.write('\n') + + # Getting + for entry in self._entries: + self.PrintIndented(file, '', entry.CodeGet()) + file.write('\n') + + # Clearing + file.write(( 'void\n' + '%(name)s_clear(struct %(name)s *tmp)\n' + '{' + '\n') % { 'name' : self._name }) + for entry in self._entries: + self.PrintIndented(file, ' ', entry.CodeClear('tmp')) + + file.write('}\n\n') + + # Freeing + file.write(( 'void\n' + '%(name)s_free(struct %(name)s *tmp)\n' + '{' + '\n') % { 'name' : self._name }) + + for entry in self._entries: + self.PrintIndented(file, ' ', entry.CodeFree('tmp')) + + file.write((' free(tmp);\n' + '}\n\n')) + + # Marshaling + file.write(('void\n' + '%(name)s_marshal(struct evbuffer *evbuf, ' + 'const struct %(name)s *tmp)' + '{\n') % { 'name' : self._name }) + for entry in self._entries: + indent = ' ' + # Optional entries do not have to be set + if entry.Optional(): + indent += ' ' + file.write(' if (tmp->%s_set) {\n' % entry.Name()) + self.PrintIndented( + file, indent, + entry.CodeMarshal('evbuf', self.EntryTagName(entry), + entry.GetVarName('tmp'), + entry.GetVarLen('tmp'))) + if entry.Optional(): + file.write(' }\n') + + file.write('}\n\n') + + # Unmarshaling + file.write(('int\n' + '%(name)s_unmarshal(struct %(name)s *tmp, ' + ' struct evbuffer *evbuf)\n' + '{\n' + ' ev_uint32_t tag;\n' + ' while (evbuffer_get_length(evbuf) > 0) {\n' + ' if (evtag_peek(evbuf, &tag) == -1)\n' + ' return (-1);\n' + ' switch (tag) {\n' + '\n') % { 'name' : self._name }) + for entry in self._entries: + file.write(' case %s:\n' % self.EntryTagName(entry)) + if not entry.Array(): + file.write(( + ' if (tmp->%s_set)\n' + ' return (-1);' + '\n') % (entry.Name())) + + self.PrintIndented( + file, ' ', + entry.CodeUnmarshal('evbuf', + self.EntryTagName(entry), + entry.GetVarName('tmp'), + entry.GetVarLen('tmp'))) + + file.write(( ' tmp->%s_set = 1;\n' % entry.Name() + + ' break;\n' )) + file.write(( ' default:\n' + ' return -1;\n' + ' }\n' + ' }\n\n' )) + # Check if it was decoded completely + file.write(( ' if (%(name)s_complete(tmp) == -1)\n' + ' return (-1);' + '\n') % { 'name' : self._name }) + + # Successfully decoded + file.write(( ' return (0);\n' + '}\n\n')) + + # Checking if a structure has all the required data + file.write(( + 'int\n' + '%(name)s_complete(struct %(name)s *msg)\n' + '{\n' ) % { 'name' : self._name }) + for entry in self._entries: + if not entry.Optional(): + code = [ + 'if (!msg->%(name)s_set)', + ' return (-1);' ] + code = TranslateList(code, entry.GetTranslation()) + self.PrintIndented( + file, ' ', code) + + self.PrintIndented( + file, ' ', + entry.CodeComplete('msg', entry.GetVarName('msg'))) + file.write(( + ' return (0);\n' + '}\n\n' )) + + # Complete message unmarshaling + file.write(( + 'int\n' + 'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, ' + 'ev_uint32_t need_tag, struct %(name)s *msg)\n' + '{\n' + ' ev_uint32_t tag;\n' + ' int res = -1;\n' + '\n' + ' struct evbuffer *tmp = evbuffer_new();\n' + '\n' + ' if (evtag_unmarshal(evbuf, &tag, tmp) == -1' + ' || tag != need_tag)\n' + ' goto error;\n' + '\n' + ' if (%(name)s_unmarshal(msg, tmp) == -1)\n' + ' goto error;\n' + '\n' + ' res = 0;\n' + '\n' + ' error:\n' + ' evbuffer_free(tmp);\n' + ' return (res);\n' + '}\n\n' ) % { 'name' : self._name }) + + # Complete message marshaling + file.write(( + 'void\n' + 'evtag_marshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t tag, ' + 'const struct %(name)s *msg)\n' + '{\n' + ' struct evbuffer *buf_ = evbuffer_new();\n' + ' assert(buf_ != NULL);\n' + ' %(name)s_marshal(buf_, msg);\n' + ' evtag_marshal_buffer(evbuf, tag, buf_);\n ' + ' evbuffer_free(buf_);\n' + '}\n\n' ) % { 'name' : self._name }) + +class Entry: + def __init__(self, type, name, tag): + self._type = type + self._name = name + self._tag = int(tag) + self._ctype = type + self._optional = 0 + self._can_be_array = 0 + self._array = 0 + self._line_count = -1 + self._struct = None + self._refname = None + + self._optpointer = True + self._optaddarg = True + + def GetInitializer(self): + assert 0, "Entry does not provide initializer" + + def SetStruct(self, struct): + self._struct = struct + + def LineCount(self): + assert self._line_count != -1 + return self._line_count + + def SetLineCount(self, number): + self._line_count = number + + def Array(self): + return self._array + + def Optional(self): + return self._optional + + def Tag(self): + return self._tag + + def Name(self): + return self._name + + def Type(self): + return self._type + + def MakeArray(self, yes=1): + self._array = yes + + def MakeOptional(self): + self._optional = 1 + + def Verify(self): + if self.Array() and not self._can_be_array: + raise RpcGenError( + 'Entry "%s" cannot be created as an array ' + 'around line %d' % (self._name, self.LineCount())) + if not self._struct: + raise RpcGenError( + 'Entry "%s" does not know which struct it belongs to ' + 'around line %d' % (self._name, self.LineCount())) + if self._optional and self._array: + raise RpcGenError( + 'Entry "%s" has illegal combination of optional and array ' + 'around line %d' % (self._name, self.LineCount())) + + def GetTranslation(self, extradict = {}): + mapping = { + "parent_name" : self._struct.Name(), + "name" : self._name, + "ctype" : self._ctype, + "refname" : self._refname, + "optpointer" : self._optpointer and "*" or "", + "optreference" : self._optpointer and "&" or "", + "optaddarg" : + self._optaddarg and ", const %s value" % self._ctype or "" + } + for (k, v) in list(extradict.items()): + mapping[k] = v + + return mapping + + def GetVarName(self, var): + return '%(var)s->%(name)s_data' % self.GetTranslation({ 'var' : var }) + + def GetVarLen(self, var): + return 'sizeof(%s)' % self._ctype + + def GetFuncName(self): + return '%s_%s_get' % (self._struct.Name(), self._name) + + def GetDeclaration(self, funcname): + code = [ 'int %s(struct %s *, %s *);' % ( + funcname, self._struct.Name(), self._ctype ) ] + return code + + def CodeGet(self): + code = ( + 'int', + '%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, ' + '%(ctype)s *value)', + '{', + ' if (msg->%(name)s_set != 1)', + ' return (-1);', + ' *value = msg->%(name)s_data;', + ' return (0);', + '}' ) + code = '\n'.join(code) + code = code % self.GetTranslation() + return code.split('\n') + + def AssignFuncName(self): + return '%s_%s_assign' % (self._struct.Name(), self._name) + + def AddFuncName(self): + return '%s_%s_add' % (self._struct.Name(), self._name) + + def AssignDeclaration(self, funcname): + code = [ 'int %s(struct %s *, const %s);' % ( + funcname, self._struct.Name(), self._ctype ) ] + return code + + def CodeAssign(self): + code = [ 'int', + '%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,' + ' const %(ctype)s value)', + '{', + ' msg->%(name)s_set = 1;', + ' msg->%(name)s_data = value;', + ' return (0);', + '}' ] + code = '\n'.join(code) + code = code % self.GetTranslation() + return code.split('\n') + + def CodeClear(self, structname): + code = [ '%s->%s_set = 0;' % (structname, self.Name()) ] + + return code + + def CodeComplete(self, structname, var_name): + return [] + + def CodeFree(self, name): + return [] + + def CodeBase(self): + code = [ + '%(parent_name)s_%(name)s_assign,', + '%(parent_name)s_%(name)s_get,' + ] + if self.Array(): + code.append('%(parent_name)s_%(name)s_add,') + + code = '\n'.join(code) + code = code % self.GetTranslation() + return code.split('\n') + +class EntryBytes(Entry): + def __init__(self, type, name, tag, length): + # Init base class + Entry.__init__(self, type, name, tag) + + self._length = length + self._ctype = 'ev_uint8_t' + + def GetInitializer(self): + return "NULL" + + def GetVarLen(self, var): + return '(%s)' % self._length + + def CodeArrayAdd(self, varname, value): + # XXX: copy here + return [ '%(varname)s = NULL;' % { 'varname' : varname } ] + + def GetDeclaration(self, funcname): + code = [ 'int %s(struct %s *, %s **);' % ( + funcname, self._struct.Name(), self._ctype ) ] + return code + + def AssignDeclaration(self, funcname): + code = [ 'int %s(struct %s *, const %s *);' % ( + funcname, self._struct.Name(), self._ctype ) ] + return code + + def Declaration(self): + dcl = ['ev_uint8_t %s_data[%s];' % (self._name, self._length)] + + return dcl + + def CodeGet(self): + name = self._name + code = [ 'int', + '%s_%s_get(struct %s *msg, %s **value)' % ( + self._struct.Name(), name, + self._struct.Name(), self._ctype), + '{', + ' if (msg->%s_set != 1)' % name, + ' return (-1);', + ' *value = msg->%s_data;' % name, + ' return (0);', + '}' ] + return code + + def CodeAssign(self): + name = self._name + code = [ 'int', + '%s_%s_assign(struct %s *msg, const %s *value)' % ( + self._struct.Name(), name, + self._struct.Name(), self._ctype), + '{', + ' msg->%s_set = 1;' % name, + ' memcpy(msg->%s_data, value, %s);' % ( + name, self._length), + ' return (0);', + '}' ] + return code + + def CodeUnmarshal(self, buf, tag_name, var_name, var_len): + code = [ 'if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, ' + '%(var)s, %(varlen)s) == -1) {', + ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);', + ' return (-1);', + '}' + ] + return TranslateList(code, + self.GetTranslation({ + 'var' : var_name, + 'varlen' : var_len, + 'buf' : buf, + 'tag' : tag_name })) + + def CodeMarshal(self, buf, tag_name, var_name, var_len): + code = ['evtag_marshal(%s, %s, %s, %s);' % ( + buf, tag_name, var_name, var_len)] + return code + + def CodeClear(self, structname): + code = [ '%s->%s_set = 0;' % (structname, self.Name()), + 'memset(%s->%s_data, 0, sizeof(%s->%s_data));' % ( + structname, self._name, structname, self._name)] + + return code + + def CodeInitialize(self, name): + code = ['memset(%s->%s_data, 0, sizeof(%s->%s_data));' % ( + name, self._name, name, self._name)] + return code + + def Verify(self): + if not self._length: + raise RpcGenError( + 'Entry "%s" needs a length ' + 'around line %d' % (self._name, self.LineCount())) + + Entry.Verify(self) + +class EntryInt(Entry): + def __init__(self, type, name, tag, bits=32): + # Init base class + Entry.__init__(self, type, name, tag) + + self._can_be_array = 1 + if bits == 32: + self._ctype = 'ev_uint32_t' + self._marshal_type = 'int' + if bits == 64: + self._ctype = 'ev_uint64_t' + self._marshal_type = 'int64' + + def GetInitializer(self): + return "0" + + def CodeArrayFree(self, var): + return [] + + def CodeArrayAssign(self, varname, srcvar): + return [ '%(varname)s = %(srcvar)s;' % { 'varname' : varname, + 'srcvar' : srcvar } ] + + def CodeArrayAdd(self, varname, value): + """Returns a new entry of this type.""" + return [ '%(varname)s = %(value)s;' % { 'varname' : varname, + 'value' : value } ] + + def CodeUnmarshal(self, buf, tag_name, var_name, var_len): + code = [ + 'if (evtag_unmarshal_%(ma)s(%(buf)s, %(tag)s, &%(var)s) == -1) {', + ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);', + ' return (-1);', + '}' ] + code = '\n'.join(code) % self.GetTranslation({ + 'ma' : self._marshal_type, + 'buf' : buf, + 'tag' : tag_name, + 'var' : var_name }) + return code.split('\n') + + def CodeMarshal(self, buf, tag_name, var_name, var_len): + code = [ + 'evtag_marshal_%s(%s, %s, %s);' % ( + self._marshal_type, buf, tag_name, var_name)] + return code + + def Declaration(self): + dcl = ['%s %s_data;' % (self._ctype, self._name)] + + return dcl + + def CodeInitialize(self, name): + code = ['%s->%s_data = 0;' % (name, self._name)] + return code + +class EntryString(Entry): + def __init__(self, type, name, tag): + # Init base class + Entry.__init__(self, type, name, tag) + + self._can_be_array = 1 + self._ctype = 'char *' + + def GetInitializer(self): + return "NULL" + + def CodeArrayFree(self, varname): + code = [ + 'if (%(var)s != NULL) free(%(var)s);' ] + + return TranslateList(code, { 'var' : varname }) + + def CodeArrayAssign(self, varname, srcvar): + code = [ + 'if (%(var)s != NULL)', + ' free(%(var)s);', + '%(var)s = strdup(%(srcvar)s);', + 'if (%(var)s == NULL) {', + ' event_warnx("%%s: strdup", __func__);', + ' return (-1);', + '}' ] + + return TranslateList(code, { 'var' : varname, + 'srcvar' : srcvar }) + + def CodeArrayAdd(self, varname, value): + code = [ + 'if (%(value)s != NULL) {', + ' %(var)s = strdup(%(value)s);', + ' if (%(var)s == NULL) {', + ' goto error;', + ' }', + '} else {', + ' %(var)s = NULL;', + '}' ] + + return TranslateList(code, { 'var' : varname, + 'value' : value }) + + def GetVarLen(self, var): + return 'strlen(%s)' % self.GetVarName(var) + + def CodeMakeInitalize(self, varname): + return '%(varname)s = NULL;' % { 'varname' : varname } + + def CodeAssign(self): + name = self._name + code = """int +%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, + const %(ctype)s value) +{ + if (msg->%(name)s_data != NULL) + free(msg->%(name)s_data); + if ((msg->%(name)s_data = strdup(value)) == NULL) + return (-1); + msg->%(name)s_set = 1; + return (0); +}""" % self.GetTranslation() + + return code.split('\n') + + def CodeUnmarshal(self, buf, tag_name, var_name, var_len): + code = ['if (evtag_unmarshal_string(%(buf)s, %(tag)s, &%(var)s) == -1) {', + ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);', + ' return (-1);', + '}' + ] + code = '\n'.join(code) % self.GetTranslation({ + 'buf' : buf, + 'tag' : tag_name, + 'var' : var_name }) + return code.split('\n') + + def CodeMarshal(self, buf, tag_name, var_name, var_len): + code = ['evtag_marshal_string(%s, %s, %s);' % ( + buf, tag_name, var_name)] + return code + + def CodeClear(self, structname): + code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()), + ' free(%s->%s_data);' % (structname, self.Name()), + ' %s->%s_data = NULL;' % (structname, self.Name()), + ' %s->%s_set = 0;' % (structname, self.Name()), + '}' + ] + + return code + + def CodeInitialize(self, name): + code = ['%s->%s_data = NULL;' % (name, self._name)] + return code + + def CodeFree(self, name): + code = ['if (%s->%s_data != NULL)' % (name, self._name), + ' free (%s->%s_data);' % (name, self._name)] + + return code + + def Declaration(self): + dcl = ['char *%s_data;' % self._name] + + return dcl + +class EntryStruct(Entry): + def __init__(self, type, name, tag, refname): + # Init base class + Entry.__init__(self, type, name, tag) + + self._optpointer = False + self._can_be_array = 1 + self._refname = refname + self._ctype = 'struct %s*' % refname + self._optaddarg = False + + def GetInitializer(self): + return "NULL" + + def GetVarLen(self, var): + return '-1' + + def CodeArrayAdd(self, varname, value): + code = [ + '%(varname)s = %(refname)s_new();', + 'if (%(varname)s == NULL)', + ' goto error;' ] + + return TranslateList(code, self.GetTranslation({ 'varname' : varname })) + + def CodeArrayFree(self, var): + code = [ '%(refname)s_free(%(var)s);' % self.GetTranslation( + { 'var' : var }) ] + return code + + def CodeArrayAssign(self, var, srcvar): + code = [ + 'int had_error = 0;', + 'struct evbuffer *tmp = NULL;', + '%(refname)s_clear(%(var)s);', + 'if ((tmp = evbuffer_new()) == NULL) {', + ' event_warn("%%s: evbuffer_new()", __func__);', + ' had_error = 1;', + ' goto done;', + '}', + '%(refname)s_marshal(tmp, %(srcvar)s);', + 'if (%(refname)s_unmarshal(%(var)s, tmp) == -1) {', + ' event_warnx("%%s: %(refname)s_unmarshal", __func__);', + ' had_error = 1;', + ' goto done;', + '}', + 'done:' + 'if (tmp != NULL)', + ' evbuffer_free(tmp);', + 'if (had_error) {', + ' %(refname)s_clear(%(var)s);', + ' return (-1);', + '}' ] + + return TranslateList(code, self.GetTranslation({ + 'var' : var, + 'srcvar' : srcvar})) + + def CodeGet(self): + name = self._name + code = [ 'int', + '%s_%s_get(struct %s *msg, %s *value)' % ( + self._struct.Name(), name, + self._struct.Name(), self._ctype), + '{', + ' if (msg->%s_set != 1) {' % name, + ' msg->%s_data = %s_new();' % (name, self._refname), + ' if (msg->%s_data == NULL)' % name, + ' return (-1);', + ' msg->%s_set = 1;' % name, + ' }', + ' *value = msg->%s_data;' % name, + ' return (0);', + '}' ] + return code + + def CodeAssign(self): + name = self._name + code = """int +%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, + const %(ctype)s value) +{ + struct evbuffer *tmp = NULL; + if (msg->%(name)s_set) { + %(refname)s_clear(msg->%(name)s_data); + msg->%(name)s_set = 0; + } else { + msg->%(name)s_data = %(refname)s_new(); + if (msg->%(name)s_data == NULL) { + event_warn("%%s: %(refname)s_new()", __func__); + goto error; + } + } + if ((tmp = evbuffer_new()) == NULL) { + event_warn("%%s: evbuffer_new()", __func__); + goto error; + } + %(refname)s_marshal(tmp, value); + if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) { + event_warnx("%%s: %(refname)s_unmarshal", __func__); + goto error; + } + msg->%(name)s_set = 1; + evbuffer_free(tmp); + return (0); + error: + if (tmp != NULL) + evbuffer_free(tmp); + if (msg->%(name)s_data != NULL) { + %(refname)s_free(msg->%(name)s_data); + msg->%(name)s_data = NULL; + } + return (-1); +}""" % self.GetTranslation() + return code.split('\n') + + def CodeComplete(self, structname, var_name): + code = [ 'if (%(structname)s->%(name)s_set && ' + '%(refname)s_complete(%(var)s) == -1)', + ' return (-1);' ] + + return TranslateList(code, self.GetTranslation({ + 'structname' : structname, + 'var' : var_name })) + + def CodeUnmarshal(self, buf, tag_name, var_name, var_len): + code = ['%(var)s = %(refname)s_new();', + 'if (%(var)s == NULL)', + ' return (-1);', + 'if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag)s, ' + '%(var)s) == -1) {', + ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);', + ' return (-1);', + '}' + ] + code = '\n'.join(code) % self.GetTranslation({ + 'buf' : buf, + 'tag' : tag_name, + 'var' : var_name }) + return code.split('\n') + + def CodeMarshal(self, buf, tag_name, var_name, var_len): + code = ['evtag_marshal_%s(%s, %s, %s);' % ( + self._refname, buf, tag_name, var_name)] + return code + + def CodeClear(self, structname): + code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()), + ' %s_free(%s->%s_data);' % ( + self._refname, structname, self.Name()), + ' %s->%s_data = NULL;' % (structname, self.Name()), + ' %s->%s_set = 0;' % (structname, self.Name()), + '}' + ] + + return code + + def CodeInitialize(self, name): + code = ['%s->%s_data = NULL;' % (name, self._name)] + return code + + def CodeFree(self, name): + code = ['if (%s->%s_data != NULL)' % (name, self._name), + ' %s_free(%s->%s_data);' % ( + self._refname, name, self._name)] + + return code + + def Declaration(self): + dcl = ['%s %s_data;' % (self._ctype, self._name)] + + return dcl + +class EntryVarBytes(Entry): + def __init__(self, type, name, tag): + # Init base class + Entry.__init__(self, type, name, tag) + + self._ctype = 'ev_uint8_t *' + + def GetInitializer(self): + return "NULL" + + def GetVarLen(self, var): + return '%(var)s->%(name)s_length' % self.GetTranslation({ 'var' : var }) + + def CodeArrayAdd(self, varname, value): + # xxx: copy + return [ '%(varname)s = NULL;' % { 'varname' : varname } ] + + def GetDeclaration(self, funcname): + code = [ 'int %s(struct %s *, %s *, ev_uint32_t *);' % ( + funcname, self._struct.Name(), self._ctype ) ] + return code + + def AssignDeclaration(self, funcname): + code = [ 'int %s(struct %s *, const %s, ev_uint32_t);' % ( + funcname, self._struct.Name(), self._ctype ) ] + return code + + def CodeAssign(self): + name = self._name + code = [ 'int', + '%s_%s_assign(struct %s *msg, ' + 'const %s value, ev_uint32_t len)' % ( + self._struct.Name(), name, + self._struct.Name(), self._ctype), + '{', + ' if (msg->%s_data != NULL)' % name, + ' free (msg->%s_data);' % name, + ' msg->%s_data = malloc(len);' % name, + ' if (msg->%s_data == NULL)' % name, + ' return (-1);', + ' msg->%s_set = 1;' % name, + ' msg->%s_length = len;' % name, + ' memcpy(msg->%s_data, value, len);' % name, + ' return (0);', + '}' ] + return code + + def CodeGet(self): + name = self._name + code = [ 'int', + '%s_%s_get(struct %s *msg, %s *value, ev_uint32_t *plen)' % ( + self._struct.Name(), name, + self._struct.Name(), self._ctype), + '{', + ' if (msg->%s_set != 1)' % name, + ' return (-1);', + ' *value = msg->%s_data;' % name, + ' *plen = msg->%s_length;' % name, + ' return (0);', + '}' ] + return code + + def CodeUnmarshal(self, buf, tag_name, var_name, var_len): + code = ['if (evtag_payload_length(%(buf)s, &%(varlen)s) == -1)', + ' return (-1);', + # We do not want DoS opportunities + 'if (%(varlen)s > evbuffer_get_length(%(buf)s))', + ' return (-1);', + 'if ((%(var)s = malloc(%(varlen)s)) == NULL)', + ' return (-1);', + 'if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, %(var)s, ' + '%(varlen)s) == -1) {', + ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);', + ' return (-1);', + '}' + ] + code = '\n'.join(code) % self.GetTranslation({ + 'buf' : buf, + 'tag' : tag_name, + 'var' : var_name, + 'varlen' : var_len }) + return code.split('\n') + + def CodeMarshal(self, buf, tag_name, var_name, var_len): + code = ['evtag_marshal(%s, %s, %s, %s);' % ( + buf, tag_name, var_name, var_len)] + return code + + def CodeClear(self, structname): + code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()), + ' free (%s->%s_data);' % (structname, self.Name()), + ' %s->%s_data = NULL;' % (structname, self.Name()), + ' %s->%s_length = 0;' % (structname, self.Name()), + ' %s->%s_set = 0;' % (structname, self.Name()), + '}' + ] + + return code + + def CodeInitialize(self, name): + code = ['%s->%s_data = NULL;' % (name, self._name), + '%s->%s_length = 0;' % (name, self._name) ] + return code + + def CodeFree(self, name): + code = ['if (%s->%s_data != NULL)' % (name, self._name), + ' free(%s->%s_data);' % (name, self._name)] + + return code + + def Declaration(self): + dcl = ['ev_uint8_t *%s_data;' % self._name, + 'ev_uint32_t %s_length;' % self._name] + + return dcl + +class EntryArray(Entry): + def __init__(self, entry): + # Init base class + Entry.__init__(self, entry._type, entry._name, entry._tag) + + self._entry = entry + self._refname = entry._refname + self._ctype = self._entry._ctype + self._optional = True + self._optpointer = self._entry._optpointer + self._optaddarg = self._entry._optaddarg + + # provide a new function for accessing the variable name + def GetVarName(var_name): + return '%(var)s->%(name)s_data[%(index)s]' % \ + self._entry.GetTranslation({'var' : var_name, + 'index' : self._index}) + self._entry.GetVarName = GetVarName + + def GetInitializer(self): + return "NULL" + + def GetVarName(self, var_name): + return var_name + + def GetVarLen(self, var_name): + return '-1' + + def GetDeclaration(self, funcname): + """Allows direct access to elements of the array.""" + code = [ + 'int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);' % + self.GetTranslation({ 'funcname' : funcname }) ] + return code + + def AssignDeclaration(self, funcname): + code = [ 'int %s(struct %s *, int, const %s);' % ( + funcname, self._struct.Name(), self._ctype ) ] + return code + + def AddDeclaration(self, funcname): + code = [ + '%(ctype)s %(optpointer)s ' + '%(funcname)s(struct %(parent_name)s *msg%(optaddarg)s);' % \ + self.GetTranslation({ 'funcname' : funcname }) ] + return code + + def CodeGet(self): + code = """int +%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset, + %(ctype)s *value) +{ + if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length) + return (-1); + *value = msg->%(name)s_data[offset]; + return (0); +}""" % self.GetTranslation() + + return code.split('\n') + + def CodeAssign(self): + code = [ + 'int', + '%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,', + ' const %(ctype)s value)', + '{', + ' if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)', + ' return (-1);\n', + ' {' ] + code = TranslateList(code, self.GetTranslation()) + + codearrayassign = self._entry.CodeArrayAssign( + 'msg->%(name)s_data[off]' % self.GetTranslation(), 'value') + code += [' ' + x for x in codearrayassign] + + code += TranslateList([ + ' }', + ' return (0);', + '}' ], self.GetTranslation()) + + return code + + def CodeAdd(self): + codearrayadd = self._entry.CodeArrayAdd( + 'msg->%(name)s_data[msg->%(name)s_length - 1]' % self.GetTranslation(), + 'value') + code = [ + 'static int', + '%(parent_name)s_%(name)s_expand_to_hold_more(' + 'struct %(parent_name)s *msg)', + '{', + ' int tobe_allocated = msg->%(name)s_num_allocated;', + ' %(ctype)s* new_data = NULL;', + ' tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;', + ' new_data = (%(ctype)s*) realloc(msg->%(name)s_data,', + ' tobe_allocated * sizeof(%(ctype)s));', + ' if (new_data == NULL)', + ' return -1;', + ' msg->%(name)s_data = new_data;', + ' msg->%(name)s_num_allocated = tobe_allocated;', + ' return 0;' + '}', + '', + '%(ctype)s %(optpointer)s', + '%(parent_name)s_%(name)s_add(' + 'struct %(parent_name)s *msg%(optaddarg)s)', + '{', + ' if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {', + ' if (%(parent_name)s_%(name)s_expand_to_hold_more(msg)<0)', + ' goto error;', + ' }' ] + + code = TranslateList(code, self.GetTranslation()) + + code += [' ' + x for x in codearrayadd] + + code += TranslateList([ + ' msg->%(name)s_set = 1;', + ' return %(optreference)s(msg->%(name)s_data[' + 'msg->%(name)s_length - 1]);', + 'error:', + ' --msg->%(name)s_length;', + ' return (NULL);', + '}' ], self.GetTranslation()) + + return code + + def CodeComplete(self, structname, var_name): + self._index = 'i' + tmp = self._entry.CodeComplete(structname, self._entry.GetVarName(var_name)) + # skip the whole loop if there is nothing to check + if not tmp: + return [] + + translate = self.GetTranslation({ 'structname' : structname }) + code = [ + '{', + ' int i;', + ' for (i = 0; i < %(structname)s->%(name)s_length; ++i) {' ] + + code = TranslateList(code, translate) + + code += [' ' + x for x in tmp] + + code += [ + ' }', + '}' ] + + return code + + def CodeUnmarshal(self, buf, tag_name, var_name, var_len): + translate = self.GetTranslation({ 'var' : var_name, + 'buf' : buf, + 'tag' : tag_name, + 'init' : self._entry.GetInitializer()}) + code = [ + 'if (%(var)s->%(name)s_length >= %(var)s->%(name)s_num_allocated &&', + ' %(parent_name)s_%(name)s_expand_to_hold_more(%(var)s) < 0) {', + ' puts("HEY NOW");', + ' return (-1);', + '}'] + + # the unmarshal code directly returns + code = TranslateList(code, translate) + + self._index = '%(var)s->%(name)s_length' % translate + code += self._entry.CodeUnmarshal(buf, tag_name, + self._entry.GetVarName(var_name), + self._entry.GetVarLen(var_name)) + + code += [ '++%(var)s->%(name)s_length;' % translate ] + + return code + + def CodeMarshal(self, buf, tag_name, var_name, var_len): + code = ['{', + ' int i;', + ' for (i = 0; i < %(var)s->%(name)s_length; ++i) {' ] + + self._index = 'i' + code += self._entry.CodeMarshal(buf, tag_name, + self._entry.GetVarName(var_name), + self._entry.GetVarLen(var_name)) + code += [' }', + '}' + ] + + code = "\n".join(code) % self.GetTranslation({ 'var' : var_name }) + + return code.split('\n') + + def CodeClear(self, structname): + translate = self.GetTranslation({ 'structname' : structname }) + codearrayfree = self._entry.CodeArrayFree( + '%(structname)s->%(name)s_data[i]' % self.GetTranslation( + { 'structname' : structname } )) + + code = [ 'if (%(structname)s->%(name)s_set == 1) {' ] + + if codearrayfree: + code += [ + ' int i;', + ' for (i = 0; i < %(structname)s->%(name)s_length; ++i) {' ] + + code = TranslateList(code, translate) + + if codearrayfree: + code += [' ' + x for x in codearrayfree] + code += [ + ' }' ] + + code += TranslateList([ + ' free(%(structname)s->%(name)s_data);', + ' %(structname)s->%(name)s_data = NULL;', + ' %(structname)s->%(name)s_set = 0;', + ' %(structname)s->%(name)s_length = 0;', + ' %(structname)s->%(name)s_num_allocated = 0;', + '}' + ], translate) + + return code + + def CodeInitialize(self, name): + code = ['%s->%s_data = NULL;' % (name, self._name), + '%s->%s_length = 0;' % (name, self._name), + '%s->%s_num_allocated = 0;' % (name, self._name)] + return code + + def CodeFree(self, structname): + code = self.CodeClear(structname); + + code += TranslateList([ + 'free(%(structname)s->%(name)s_data);' ], + self.GetTranslation({'structname' : structname })) + + return code + + def Declaration(self): + dcl = ['%s *%s_data;' % (self._ctype, self._name), + 'int %s_length;' % self._name, + 'int %s_num_allocated;' % self._name ] + + return dcl + +def NormalizeLine(line): + global white + global cppcomment + + line = cppcomment.sub('', line) + line = line.strip() + line = white.sub(' ', line) + + return line + +def ProcessOneEntry(factory, newstruct, entry): + optional = 0 + array = 0 + entry_type = '' + name = '' + tag = '' + tag_set = None + separator = '' + fixed_length = '' + + tokens = entry.split(' ') + while tokens: + token = tokens[0] + tokens = tokens[1:] + + if not entry_type: + if not optional and token == 'optional': + optional = 1 + continue + + if not array and token == 'array': + array = 1 + continue + + if not entry_type: + entry_type = token + continue + + if not name: + res = re.match(r'^([^\[\]]+)(\[.*\])?$', token) + if not res: + raise RpcGenError( + 'Cannot parse name: \"%s\" ' + 'around line %d' % (entry, line_count)) + name = res.group(1) + fixed_length = res.group(2) + if fixed_length: + fixed_length = fixed_length[1:-1] + continue + + if not separator: + separator = token + if separator != '=': + raise RpcGenError('Expected "=" after name \"%s\" got %s' + % (name, token)) + continue + + if not tag_set: + tag_set = 1 + if not re.match(r'^(0x)?[0-9]+$', token): + raise RpcGenError('Expected tag number: \"%s\"' % entry) + tag = int(token, 0) + continue + + raise RpcGenError('Cannot parse \"%s\"' % entry) + + if not tag_set: + raise RpcGenError('Need tag number: \"%s\"' % entry) + + # Create the right entry + if entry_type == 'bytes': + if fixed_length: + newentry = factory.EntryBytes(entry_type, name, tag, fixed_length) + else: + newentry = factory.EntryVarBytes(entry_type, name, tag) + elif entry_type == 'int' and not fixed_length: + newentry = factory.EntryInt(entry_type, name, tag) + elif entry_type == 'int64' and not fixed_length: + newentry = factory.EntryInt(entry_type, name, tag, bits=64) + elif entry_type == 'string' and not fixed_length: + newentry = factory.EntryString(entry_type, name, tag) + else: + res = structref.match(entry_type) + if res: + # References another struct defined in our file + newentry = factory.EntryStruct(entry_type, name, tag, res.group(1)) + else: + raise RpcGenError('Bad type: "%s" in "%s"' % (entry_type, entry)) + + structs = [] + + if optional: + newentry.MakeOptional() + if array: + newentry.MakeArray() + + newentry.SetStruct(newstruct) + newentry.SetLineCount(line_count) + newentry.Verify() + + if array: + # We need to encapsulate this entry into a struct + newname = newentry.Name()+ '_array' + + # Now borgify the new entry. + newentry = factory.EntryArray(newentry) + newentry.SetStruct(newstruct) + newentry.SetLineCount(line_count) + newentry.MakeArray() + + newstruct.AddEntry(newentry) + + return structs + +def ProcessStruct(factory, data): + tokens = data.split(' ') + + # First three tokens are: 'struct' 'name' '{' + newstruct = factory.Struct(tokens[1]) + + inside = ' '.join(tokens[3:-1]) + + tokens = inside.split(';') + + structs = [] + + for entry in tokens: + entry = NormalizeLine(entry) + if not entry: + continue + + # It's possible that new structs get defined in here + structs.extend(ProcessOneEntry(factory, newstruct, entry)) + + structs.append(newstruct) + return structs + +def GetNextStruct(file): + global line_count + global cppdirect + + got_struct = 0 + + processed_lines = [] + + have_c_comment = 0 + data = '' + while 1: + line = file.readline() + if not line: + break + + line_count += 1 + line = line[:-1] + + if not have_c_comment and re.search(r'/\*', line): + if re.search(r'/\*.*?\*/', line): + line = re.sub(r'/\*.*?\*/', '', line) + else: + line = re.sub(r'/\*.*$', '', line) + have_c_comment = 1 + + if have_c_comment: + if not re.search(r'\*/', line): + continue + have_c_comment = 0 + line = re.sub(r'^.*\*/', '', line) + + line = NormalizeLine(line) + + if not line: + continue + + if not got_struct: + if re.match(r'#include ["<].*[>"]', line): + cppdirect.append(line) + continue + + if re.match(r'^#(if( |def)|endif)', line): + cppdirect.append(line) + continue + + if re.match(r'^#define', line): + headerdirect.append(line) + continue + + if not structdef.match(line): + raise RpcGenError('Missing struct on line %d: %s' + % (line_count, line)) + else: + got_struct = 1 + data += line + continue + + # We are inside the struct + tokens = line.split('}') + if len(tokens) == 1: + data += ' ' + line + continue + + if len(tokens[1]): + raise RpcGenError('Trailing garbage after struct on line %d' + % line_count) + + # We found the end of the struct + data += ' %s}' % tokens[0] + break + + # Remove any comments, that might be in there + data = re.sub(r'/\*.*\*/', '', data) + + return data + + +def Parse(factory, file): + """ + Parses the input file and returns C code and corresponding header file. + """ + + entities = [] + + while 1: + # Just gets the whole struct nicely formatted + data = GetNextStruct(file) + + if not data: + break + + entities.extend(ProcessStruct(factory, data)) + + return entities + +class CCodeGenerator: + def __init__(self): + pass + + def GuardName(self, name): + # Use the complete provided path to the input file, with all + # non-identifier characters replaced with underscores, to + # reduce the chance of a collision between guard macros. + return 'EVENT_RPCOUT_' + nonident.sub('_', name).upper() + '_' + + def HeaderPreamble(self, name): + guard = self.GuardName(name) + pre = ( + '/*\n' + ' * Automatically generated from %s\n' + ' */\n\n' + '#ifndef %s\n' + '#define %s\n\n' ) % ( + name, guard, guard) + + for statement in headerdirect: + pre += '%s\n' % statement + if headerdirect: + pre += '\n' + + pre += ( + '#include /* for ev_uint*_t */\n' + '#include \n' + ) + + return pre + + def HeaderPostamble(self, name): + guard = self.GuardName(name) + return '#endif /* %s */' % guard + + def BodyPreamble(self, name, header_file): + global _NAME + global _VERSION + + slash = header_file.rfind('/') + if slash != -1: + header_file = header_file[slash+1:] + + pre = ( '/*\n' + ' * Automatically generated from %s\n' + ' * by %s/%s. DO NOT EDIT THIS FILE.\n' + ' */\n\n' ) % (name, _NAME, _VERSION) + pre += ( '#include \n' + '#include \n' + '#include \n' + '#include \n' + '#include \n' + '#include \n' + '#include \n\n' + '#if defined(EVENT__HAVE___func__)\n' + '# ifndef __func__\n' + '# define __func__ __func__\n' + '# endif\n' + '#elif defined(EVENT__HAVE___FUNCTION__)\n' + '# define __func__ __FUNCTION__\n' + '#else\n' + '# define __func__ __FILE__\n' + '#endif\n\n' + ) + + for statement in cppdirect: + pre += '%s\n' % statement + + pre += '\n#include "%s"\n\n' % header_file + + pre += 'void event_warn(const char *fmt, ...);\n' + pre += 'void event_warnx(const char *fmt, ...);\n\n' + + return pre + + def HeaderFilename(self, filename): + return '.'.join(filename.split('.')[:-1]) + '.h' + + def CodeFilename(self, filename): + return '.'.join(filename.split('.')[:-1]) + '.gen.c' + + def Struct(self, name): + return StructCCode(name) + + def EntryBytes(self, entry_type, name, tag, fixed_length): + return EntryBytes(entry_type, name, tag, fixed_length) + + def EntryVarBytes(self, entry_type, name, tag): + return EntryVarBytes(entry_type, name, tag) + + def EntryInt(self, entry_type, name, tag, bits=32): + return EntryInt(entry_type, name, tag, bits) + + def EntryString(self, entry_type, name, tag): + return EntryString(entry_type, name, tag) + + def EntryStruct(self, entry_type, name, tag, struct_name): + return EntryStruct(entry_type, name, tag, struct_name) + + def EntryArray(self, entry): + return EntryArray(entry) + +class Usage(RpcGenError): + def __init__(self, argv0): + RpcGenError.__init__("usage: %s input.rpc [[output.h] output.c]" + % argv0) + +class CommandLine: + def __init__(self, argv): + """Initialize a command-line to launch event_rpcgen, as if + from a command-line with CommandLine(sys.argv). If you're + calling this directly, remember to provide a dummy value + for sys.argv[0] + """ + self.filename = None + self.header_file = None + self.impl_file = None + self.factory = CCodeGenerator() + + if len(argv) >= 2 and argv[1] == '--quiet': + global QUIETLY + QUIETLY = 1 + del argv[1] + + if len(argv) < 2 or len(argv) > 4: + raise Usage(argv[0]) + + self.filename = argv[1].replace('\\', '/') + if len(argv) == 3: + self.impl_file = argv[2].replace('\\', '/') + if len(argv) == 4: + self.header_file = argv[2].replace('\\', '/') + self.impl_file = argv[3].replace('\\', '/') + + if not self.filename: + raise Usage(argv[0]) + + if not self.impl_file: + self.impl_file = self.factory.CodeFilename(self.filename) + + if not self.header_file: + self.header_file = self.factory.HeaderFilename(self.impl_file) + + if not self.impl_file.endswith('.c'): + raise RpcGenError("can only generate C implementation files") + if not self.header_file.endswith('.h'): + raise RpcGenError("can only generate C header files") + + def run(self): + filename = self.filename + header_file = self.header_file + impl_file = self.impl_file + factory = self.factory + + declare('Reading \"%s\"' % filename) + + fp = open(filename, 'r') + entities = Parse(factory, fp) + fp.close() + + declare('... creating "%s"' % header_file) + header_fp = open(header_file, 'w') + header_fp.write(factory.HeaderPreamble(filename)) + + # Create forward declarations: allows other structs to reference + # each other + for entry in entities: + entry.PrintForwardDeclaration(header_fp) + header_fp.write('\n') + + for entry in entities: + entry.PrintTags(header_fp) + entry.PrintDeclaration(header_fp) + header_fp.write(factory.HeaderPostamble(filename)) + header_fp.close() + + declare('... creating "%s"' % impl_file) + impl_fp = open(impl_file, 'w') + impl_fp.write(factory.BodyPreamble(filename, header_file)) + for entry in entities: + entry.PrintCode(impl_fp) + impl_fp.close() + +if __name__ == '__main__': + try: + CommandLine(sys.argv).run() + sys.exit(0) + + except RpcGenError as e: + sys.stderr.write(e) + sys.exit(1) + + except EnvironmentError as e: + if e.filename and e.strerror: + sys.stderr.write("%s: %s" % (e.filename, e.strerror)) + sys.exit(1) + elif e.strerror: + sys.stderr.write(e.strerror) + sys.exit(1) + else: + raise diff --git a/probe-busybox/libevent-2.1.11-stable/event_tagging.c b/probe-busybox/libevent-2.1.11-stable/event_tagging.c new file mode 100644 index 00000000..b021e8c8 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/event_tagging.c @@ -0,0 +1,609 @@ +/* + * Copyright (c) 2003-2009 Niels Provos + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_PARAM_H +#include +#endif + +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#include +#undef WIN32_LEAN_AND_MEAN +#endif + +#ifdef EVENT__HAVE_SYS_IOCTL_H +#include +#endif +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +#include +#include +#include +#include +#ifndef _WIN32 +#include +#endif +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif +#include + +#include "event2/event.h" +#include "event2/tag.h" +#include "event2/buffer.h" +#include "log-internal.h" +#include "mm-internal.h" +#include "util-internal.h" + +/* + Here's our wire format: + + Stream = TaggedData* + + TaggedData = Tag Length Data + where the integer value of 'Length' is the length of 'data'. + + Tag = HByte* LByte + where HByte is a byte with the high bit set, and LByte is a byte + with the high bit clear. The integer value of the tag is taken + by concatenating the lower 7 bits from all the tags. So for example, + the tag 0x66 is encoded as [66], whereas the tag 0x166 is encoded as + [82 66] + + Length = Integer + + Integer = NNibbles Nibble* Padding? + where NNibbles is a 4-bit value encoding the number of nibbles-1, + and each Nibble is 4 bits worth of encoded integer, in big-endian + order. If the total encoded integer size is an odd number of nibbles, + a final padding nibble with value 0 is appended. +*/ + +EVENT2_EXPORT_SYMBOL +int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf); +EVENT2_EXPORT_SYMBOL +int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf); +EVENT2_EXPORT_SYMBOL +int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag); +EVENT2_EXPORT_SYMBOL +int evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf); + +void +evtag_init(void) +{ +} + +/* + * We encode integers by nibbles; the first nibble contains the number + * of significant nibbles - 1; this allows us to encode up to 64-bit + * integers. This function is byte-order independent. + * + * @param number a 32-bit unsigned integer to encode + * @param data a pointer to where the data should be written. Must + * have at least 5 bytes free. + * @return the number of bytes written into data. + */ + +#define ENCODE_INT_INTERNAL(data, number) do { \ + int off = 1, nibbles = 0; \ + \ + memset(data, 0, sizeof(number)+1); \ + while (number) { \ + if (off & 0x1) \ + data[off/2] = (data[off/2] & 0xf0) | (number & 0x0f); \ + else \ + data[off/2] = (data[off/2] & 0x0f) | \ + ((number & 0x0f) << 4); \ + number >>= 4; \ + off++; \ + } \ + \ + if (off > 2) \ + nibbles = off - 2; \ + \ + /* Off - 1 is the number of encoded nibbles */ \ + data[0] = (data[0] & 0x0f) | ((nibbles & 0x0f) << 4); \ + \ + return ((off + 1) / 2); \ +} while (0) + +static inline int +encode_int_internal(ev_uint8_t *data, ev_uint32_t number) +{ + ENCODE_INT_INTERNAL(data, number); +} + +static inline int +encode_int64_internal(ev_uint8_t *data, ev_uint64_t number) +{ + ENCODE_INT_INTERNAL(data, number); +} + +void +evtag_encode_int(struct evbuffer *evbuf, ev_uint32_t number) +{ + ev_uint8_t data[5]; + int len = encode_int_internal(data, number); + evbuffer_add(evbuf, data, len); +} + +void +evtag_encode_int64(struct evbuffer *evbuf, ev_uint64_t number) +{ + ev_uint8_t data[9]; + int len = encode_int64_internal(data, number); + evbuffer_add(evbuf, data, len); +} + +/* + * Support variable length encoding of tags; we use the high bit in each + * octet as a continuation signal. + */ + +int +evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag) +{ + int bytes = 0; + ev_uint8_t data[5]; + + memset(data, 0, sizeof(data)); + do { + ev_uint8_t lower = tag & 0x7f; + tag >>= 7; + + if (tag) + lower |= 0x80; + + data[bytes++] = lower; + } while (tag); + + if (evbuf != NULL) + evbuffer_add(evbuf, data, bytes); + + return (bytes); +} + +static int +decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain) +{ + ev_uint32_t number = 0; + size_t len = evbuffer_get_length(evbuf); + ev_uint8_t *data; + size_t count = 0; + int shift = 0, done = 0; + + /* + * the encoding of a number is at most one byte more than its + * storage size. however, it may also be much smaller. + */ + data = evbuffer_pullup( + evbuf, len < sizeof(number) + 1 ? len : sizeof(number) + 1); + if (!data) + return (-1); + + while (count++ < len) { + ev_uint8_t lower = *data++; + if (shift >= 28) { + /* Make sure it fits into 32 bits */ + if (shift > 28) + return (-1); + if ((lower & 0x7f) > 15) + return (-1); + } + number |= (lower & (unsigned)0x7f) << shift; + shift += 7; + + if (!(lower & 0x80)) { + done = 1; + break; + } + } + + if (!done) + return (-1); + + if (dodrain) + evbuffer_drain(evbuf, count); + + if (ptag != NULL) + *ptag = number; + + return count > INT_MAX ? INT_MAX : (int)(count); +} + +int +evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf) +{ + return (decode_tag_internal(ptag, evbuf, 1 /* dodrain */)); +} + +/* + * Marshal a data type, the general format is as follows: + * + * tag number: one byte; length: var bytes; payload: var bytes + */ + +void +evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag, + const void *data, ev_uint32_t len) +{ + evtag_encode_tag(evbuf, tag); + evtag_encode_int(evbuf, len); + evbuffer_add(evbuf, (void *)data, len); +} + +void +evtag_marshal_buffer(struct evbuffer *evbuf, ev_uint32_t tag, + struct evbuffer *data) +{ + evtag_encode_tag(evbuf, tag); + /* XXX support more than UINT32_MAX data */ + evtag_encode_int(evbuf, (ev_uint32_t)evbuffer_get_length(data)); + evbuffer_add_buffer(evbuf, data); +} + +/* Marshaling for integers */ +void +evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, ev_uint32_t integer) +{ + ev_uint8_t data[5]; + int len = encode_int_internal(data, integer); + + evtag_encode_tag(evbuf, tag); + evtag_encode_int(evbuf, len); + evbuffer_add(evbuf, data, len); +} + +void +evtag_marshal_int64(struct evbuffer *evbuf, ev_uint32_t tag, + ev_uint64_t integer) +{ + ev_uint8_t data[9]; + int len = encode_int64_internal(data, integer); + + evtag_encode_tag(evbuf, tag); + evtag_encode_int(evbuf, len); + evbuffer_add(evbuf, data, len); +} + +void +evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, const char *string) +{ + /* TODO support strings longer than UINT32_MAX ? */ + evtag_marshal(buf, tag, string, (ev_uint32_t)strlen(string)); +} + +void +evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, struct timeval *tv) +{ + ev_uint8_t data[10]; + int len = encode_int_internal(data, tv->tv_sec); + len += encode_int_internal(data + len, tv->tv_usec); + evtag_marshal(evbuf, tag, data, len); +} + +#define DECODE_INT_INTERNAL(number, maxnibbles, pnumber, evbuf, offset) \ +do { \ + ev_uint8_t *data; \ + ev_ssize_t len = evbuffer_get_length(evbuf) - offset; \ + int nibbles = 0; \ + \ + if (len <= 0) \ + return (-1); \ + \ + /* XXX(niels): faster? */ \ + data = evbuffer_pullup(evbuf, offset + 1) + offset; \ + if (!data) \ + return (-1); \ + \ + nibbles = ((data[0] & 0xf0) >> 4) + 1; \ + if (nibbles > maxnibbles || (nibbles >> 1) + 1 > len) \ + return (-1); \ + len = (nibbles >> 1) + 1; \ + \ + data = evbuffer_pullup(evbuf, offset + len) + offset; \ + if (!data) \ + return (-1); \ + \ + while (nibbles > 0) { \ + number <<= 4; \ + if (nibbles & 0x1) \ + number |= data[nibbles >> 1] & 0x0f; \ + else \ + number |= (data[nibbles >> 1] & 0xf0) >> 4; \ + nibbles--; \ + } \ + \ + *pnumber = number; \ + \ + return (int)(len); \ +} while (0) + +/* Internal: decode an integer from an evbuffer, without draining it. + * Only integers up to 32-bits are supported. + * + * @param evbuf the buffer to read from + * @param offset an index into the buffer at which we should start reading. + * @param pnumber a pointer to receive the integer. + * @return The length of the number as encoded, or -1 on error. + */ + +static int +decode_int_internal(ev_uint32_t *pnumber, struct evbuffer *evbuf, int offset) +{ + ev_uint32_t number = 0; + DECODE_INT_INTERNAL(number, 8, pnumber, evbuf, offset); +} + +static int +decode_int64_internal(ev_uint64_t *pnumber, struct evbuffer *evbuf, int offset) +{ + ev_uint64_t number = 0; + DECODE_INT_INTERNAL(number, 16, pnumber, evbuf, offset); +} + +int +evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf) +{ + int res = decode_int_internal(pnumber, evbuf, 0); + if (res != -1) + evbuffer_drain(evbuf, res); + + return (res == -1 ? -1 : 0); +} + +int +evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf) +{ + int res = decode_int64_internal(pnumber, evbuf, 0); + if (res != -1) + evbuffer_drain(evbuf, res); + + return (res == -1 ? -1 : 0); +} + +int +evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag) +{ + return (decode_tag_internal(ptag, evbuf, 0 /* dodrain */)); +} + +int +evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength) +{ + int res, len; + + len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */); + if (len == -1) + return (-1); + + res = decode_int_internal(plength, evbuf, len); + if (res == -1) + return (-1); + + *plength += res + len; + + return (0); +} + +int +evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength) +{ + int res, len; + + len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */); + if (len == -1) + return (-1); + + res = decode_int_internal(plength, evbuf, len); + if (res == -1) + return (-1); + + return (0); +} + +/* just unmarshals the header and returns the length of the remaining data */ + +int +evtag_unmarshal_header(struct evbuffer *evbuf, ev_uint32_t *ptag) +{ + ev_uint32_t len; + + if (decode_tag_internal(ptag, evbuf, 1 /* dodrain */) == -1) + return (-1); + if (evtag_decode_int(&len, evbuf) == -1) + return (-1); + + if (evbuffer_get_length(evbuf) < len) + return (-1); + + return (len); +} + +int +evtag_consume(struct evbuffer *evbuf) +{ + int len; + if ((len = evtag_unmarshal_header(evbuf, NULL)) == -1) + return (-1); + evbuffer_drain(evbuf, len); + + return (0); +} + +/* Reads the data type from an event buffer */ + +int +evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, struct evbuffer *dst) +{ + int len; + + if ((len = evtag_unmarshal_header(src, ptag)) == -1) + return (-1); + + if (evbuffer_add(dst, evbuffer_pullup(src, len), len) == -1) + return (-1); + + evbuffer_drain(src, len); + + return (len); +} + +/* Marshaling for integers */ + +int +evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag, + ev_uint32_t *pinteger) +{ + ev_uint32_t tag; + ev_uint32_t len; + int result; + + if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1) + return (-1); + if (need_tag != tag) + return (-1); + if (evtag_decode_int(&len, evbuf) == -1) + return (-1); + + if (evbuffer_get_length(evbuf) < len) + return (-1); + + result = decode_int_internal(pinteger, evbuf, 0); + evbuffer_drain(evbuf, len); + if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/ + return (-1); + else + return result; +} + +int +evtag_unmarshal_int64(struct evbuffer *evbuf, ev_uint32_t need_tag, + ev_uint64_t *pinteger) +{ + ev_uint32_t tag; + ev_uint32_t len; + int result; + + if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1) + return (-1); + if (need_tag != tag) + return (-1); + if (evtag_decode_int(&len, evbuf) == -1) + return (-1); + + if (evbuffer_get_length(evbuf) < len) + return (-1); + + result = decode_int64_internal(pinteger, evbuf, 0); + evbuffer_drain(evbuf, len); + if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/ + return (-1); + else + return result; +} + +/* Unmarshal a fixed length tag */ + +int +evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data, + size_t len) +{ + ev_uint32_t tag; + int tag_len; + + /* Now unmarshal a tag and check that it matches the tag we want */ + if ((tag_len = evtag_unmarshal_header(src, &tag)) < 0 || + tag != need_tag) + return (-1); + + if ((size_t)tag_len != len) + return (-1); + + evbuffer_remove(src, data, len); + return (0); +} + +int +evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag, + char **pstring) +{ + ev_uint32_t tag; + int tag_len; + + if ((tag_len = evtag_unmarshal_header(evbuf, &tag)) == -1 || + tag != need_tag) + return (-1); + + *pstring = mm_malloc(tag_len + 1); + if (*pstring == NULL) { + event_warn("%s: malloc", __func__); + return -1; + } + evbuffer_remove(evbuf, *pstring, tag_len); + (*pstring)[tag_len] = '\0'; + + return (0); +} + +int +evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag, + struct timeval *ptv) +{ + ev_uint32_t tag; + ev_uint32_t integer; + int len, offset, offset2; + int result = -1; + + if ((len = evtag_unmarshal_header(evbuf, &tag)) == -1) + return (-1); + if (tag != need_tag) + goto done; + if ((offset = decode_int_internal(&integer, evbuf, 0)) == -1) + goto done; + ptv->tv_sec = integer; + if ((offset2 = decode_int_internal(&integer, evbuf, offset)) == -1) + goto done; + ptv->tv_usec = integer; + if (offset + offset2 > len) /* XXX Should this be != instead of > ? */ + goto done; + + result = 0; + done: + evbuffer_drain(evbuf, len); + return result; +} diff --git a/probe-busybox/libevent-2.1.11-stable/evmap-internal.h b/probe-busybox/libevent-2.1.11-stable/evmap-internal.h new file mode 100644 index 00000000..dfc81d50 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evmap-internal.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVMAP_INTERNAL_H_INCLUDED_ +#define EVMAP_INTERNAL_H_INCLUDED_ + +/** @file evmap-internal.h + * + * An event_map is a utility structure to map each fd or signal to zero or + * more events. Functions to manipulate event_maps should only be used from + * inside libevent. They generally need to hold the lock on the corresponding + * event_base. + **/ + +struct event_base; +struct event; + +/** Initialize an event_map for use. + */ +void evmap_io_initmap_(struct event_io_map* ctx); +void evmap_signal_initmap_(struct event_signal_map* ctx); + +/** Remove all entries from an event_map. + + @param ctx the map to clear. + */ +void evmap_io_clear_(struct event_io_map* ctx); +void evmap_signal_clear_(struct event_signal_map* ctx); + +/** Add an IO event (some combination of EV_READ or EV_WRITE) to an + event_base's list of events on a given file descriptor, and tell the + underlying eventops about the fd if its state has changed. + + Requires that ev is not already added. + + @param base the event_base to operate on. + @param fd the file descriptor corresponding to ev. + @param ev the event to add. +*/ +int evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev); +/** Remove an IO event (some combination of EV_READ or EV_WRITE) to an + event_base's list of events on a given file descriptor, and tell the + underlying eventops about the fd if its state has changed. + + @param base the event_base to operate on. + @param fd the file descriptor corresponding to ev. + @param ev the event to remove. + */ +int evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev); +/** Active the set of events waiting on an event_base for a given fd. + + @param base the event_base to operate on. + @param fd the file descriptor that has become active. + @param events a bitmask of EV_READ|EV_WRITE|EV_ET. +*/ +void evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events); + + +/* These functions behave in the same way as evmap_io_*, except they work on + * signals rather than fds. signals use a linear map everywhere; fds use + * either a linear map or a hashtable. */ +int evmap_signal_add_(struct event_base *base, int signum, struct event *ev); +int evmap_signal_del_(struct event_base *base, int signum, struct event *ev); +void evmap_signal_active_(struct event_base *base, evutil_socket_t signum, int ncalls); + +/* Return the fdinfo object associated with a given fd. If the fd has no + * events associated with it, the result may be NULL. + */ +void *evmap_io_get_fdinfo_(struct event_io_map *ctx, evutil_socket_t fd); + +/* Helper for event_reinit(): Tell the backend to re-add every fd and signal + * for which we have a pending event. + */ +int evmap_reinit_(struct event_base *base); + +/* Helper for event_base_free(): Call event_del() on every pending fd and + * signal event. + */ +void evmap_delete_all_(struct event_base *base); + +/* Helper for event_base_assert_ok_(): Check referential integrity of the + * evmaps. + */ +void evmap_check_integrity_(struct event_base *base); + +/* Helper: Call fn on every fd or signal event, passing as its arguments the + * provided event_base, the event, and arg. If fn returns 0, process the next + * event. If it returns any other value, return that value and process no + * more events. + */ +int evmap_foreach_event_(struct event_base *base, + event_base_foreach_event_cb fn, + void *arg); + +#endif /* EVMAP_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/evmap.c b/probe-busybox/libevent-2.1.11-stable/evmap.c new file mode 100644 index 00000000..ffc991f5 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evmap.c @@ -0,0 +1,1062 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef _WIN32 +#include +#define WIN32_LEAN_AND_MEAN +#include +#undef WIN32_LEAN_AND_MEAN +#endif +#include +#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H) +#include +#endif +#include +#include +#include +#ifndef _WIN32 +#include +#endif +#include +#include +#include +#include +#include + +#include "event-internal.h" +#include "evmap-internal.h" +#include "mm-internal.h" +#include "changelist-internal.h" + +/** An entry for an evmap_io list: notes all the events that want to read or + write on a given fd, and the number of each. + */ +struct evmap_io { + struct event_dlist events; + ev_uint16_t nread; + ev_uint16_t nwrite; + ev_uint16_t nclose; +}; + +/* An entry for an evmap_signal list: notes all the events that want to know + when a signal triggers. */ +struct evmap_signal { + struct event_dlist events; +}; + +/* On some platforms, fds start at 0 and increment by 1 as they are + allocated, and old numbers get used. For these platforms, we + implement io maps just like signal maps: as an array of pointers to + struct evmap_io. But on other platforms (windows), sockets are not + 0-indexed, not necessarily consecutive, and not necessarily reused. + There, we use a hashtable to implement evmap_io. +*/ +#ifdef EVMAP_USE_HT +struct event_map_entry { + HT_ENTRY(event_map_entry) map_node; + evutil_socket_t fd; + union { /* This is a union in case we need to make more things that can + be in the hashtable. */ + struct evmap_io evmap_io; + } ent; +}; + +/* Helper used by the event_io_map hashtable code; tries to return a good hash + * of the fd in e->fd. */ +static inline unsigned +hashsocket(struct event_map_entry *e) +{ + /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to + * matter. Our hashtable implementation really likes low-order bits, + * though, so let's do the rotate-and-add trick. */ + unsigned h = (unsigned) e->fd; + h += (h >> 2) | (h << 30); + return h; +} + +/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2 + * have the same e->fd. */ +static inline int +eqsocket(struct event_map_entry *e1, struct event_map_entry *e2) +{ + return e1->fd == e2->fd; +} + +HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket) +HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket, + 0.5, mm_malloc, mm_realloc, mm_free) + +#define GET_IO_SLOT(x, map, slot, type) \ + do { \ + struct event_map_entry key_, *ent_; \ + key_.fd = slot; \ + ent_ = HT_FIND(event_io_map, map, &key_); \ + (x) = ent_ ? &ent_->ent.type : NULL; \ + } while (0); + +#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ + do { \ + struct event_map_entry key_, *ent_; \ + key_.fd = slot; \ + HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \ + event_map_entry, &key_, ptr, \ + { \ + ent_ = *ptr; \ + }, \ + { \ + ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \ + if (EVUTIL_UNLIKELY(ent_ == NULL)) \ + return (-1); \ + ent_->fd = slot; \ + (ctor)(&ent_->ent.type); \ + HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \ + }); \ + (x) = &ent_->ent.type; \ + } while (0) + +void evmap_io_initmap_(struct event_io_map *ctx) +{ + HT_INIT(event_io_map, ctx); +} + +void evmap_io_clear_(struct event_io_map *ctx) +{ + struct event_map_entry **ent, **next, *this; + for (ent = HT_START(event_io_map, ctx); ent; ent = next) { + this = *ent; + next = HT_NEXT_RMV(event_io_map, ctx, ent); + mm_free(this); + } + HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */ +} +#endif + +/* Set the variable 'x' to the field in event_map 'map' with fields of type + 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL + if there are no entries for 'slot'. Does no bounds-checking. */ +#define GET_SIGNAL_SLOT(x, map, slot, type) \ + (x) = (struct type *)((map)->entries[slot]) +/* As GET_SLOT, but construct the entry for 'slot' if it is not present, + by allocating enough memory for a 'struct type', and initializing the new + value by calling the function 'ctor' on it. Makes the function + return -1 on allocation failure. + */ +#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ + do { \ + if ((map)->entries[slot] == NULL) { \ + (map)->entries[slot] = \ + mm_calloc(1,sizeof(struct type)+fdinfo_len); \ + if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \ + return (-1); \ + (ctor)((struct type *)(map)->entries[slot]); \ + } \ + (x) = (struct type *)((map)->entries[slot]); \ + } while (0) + +/* If we aren't using hashtables, then define the IO_SLOT macros and functions + as thin aliases over the SIGNAL_SLOT versions. */ +#ifndef EVMAP_USE_HT +#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type) +#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \ + GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) +#define FDINFO_OFFSET sizeof(struct evmap_io) +void +evmap_io_initmap_(struct event_io_map* ctx) +{ + evmap_signal_initmap_(ctx); +} +void +evmap_io_clear_(struct event_io_map* ctx) +{ + evmap_signal_clear_(ctx); +} +#endif + + +/** Expand 'map' with new entries of width 'msize' until it is big enough + to store a value in 'slot'. + */ +static int +evmap_make_space(struct event_signal_map *map, int slot, int msize) +{ + if (map->nentries <= slot) { + int nentries = map->nentries ? map->nentries : 32; + void **tmp; + + if (slot > INT_MAX / 2) + return (-1); + + while (nentries <= slot) + nentries <<= 1; + + if (nentries > INT_MAX / msize) + return (-1); + + tmp = (void **)mm_realloc(map->entries, nentries * msize); + if (tmp == NULL) + return (-1); + + memset(&tmp[map->nentries], 0, + (nentries - map->nentries) * msize); + + map->nentries = nentries; + map->entries = tmp; + } + + return (0); +} + +void +evmap_signal_initmap_(struct event_signal_map *ctx) +{ + ctx->nentries = 0; + ctx->entries = NULL; +} + +void +evmap_signal_clear_(struct event_signal_map *ctx) +{ + if (ctx->entries != NULL) { + int i; + for (i = 0; i < ctx->nentries; ++i) { + if (ctx->entries[i] != NULL) + mm_free(ctx->entries[i]); + } + mm_free(ctx->entries); + ctx->entries = NULL; + } + ctx->nentries = 0; +} + + +/* code specific to file descriptors */ + +/** Constructor for struct evmap_io */ +static void +evmap_io_init(struct evmap_io *entry) +{ + LIST_INIT(&entry->events); + entry->nread = 0; + entry->nwrite = 0; + entry->nclose = 0; +} + + +/* return -1 on error, 0 on success if nothing changed in the event backend, + * and 1 on success if something did. */ +int +evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev) +{ + const struct eventop *evsel = base->evsel; + struct event_io_map *io = &base->io; + struct evmap_io *ctx = NULL; + int nread, nwrite, nclose, retval = 0; + short res = 0, old = 0; + struct event *old_ev; + + EVUTIL_ASSERT(fd == ev->ev_fd); + + if (fd < 0) + return 0; + +#ifndef EVMAP_USE_HT + if (fd >= io->nentries) { + if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1) + return (-1); + } +#endif + GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init, + evsel->fdinfo_len); + + nread = ctx->nread; + nwrite = ctx->nwrite; + nclose = ctx->nclose; + + if (nread) + old |= EV_READ; + if (nwrite) + old |= EV_WRITE; + if (nclose) + old |= EV_CLOSED; + + if (ev->ev_events & EV_READ) { + if (++nread == 1) + res |= EV_READ; + } + if (ev->ev_events & EV_WRITE) { + if (++nwrite == 1) + res |= EV_WRITE; + } + if (ev->ev_events & EV_CLOSED) { + if (++nclose == 1) + res |= EV_CLOSED; + } + if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) { + event_warnx("Too many events reading or writing on fd %d", + (int)fd); + return -1; + } + if (EVENT_DEBUG_MODE_IS_ON() && + (old_ev = LIST_FIRST(&ctx->events)) && + (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) { + event_warnx("Tried to mix edge-triggered and non-edge-triggered" + " events on fd %d", (int)fd); + return -1; + } + + if (res) { + void *extra = ((char*)ctx) + sizeof(struct evmap_io); + /* XXX(niels): we cannot mix edge-triggered and + * level-triggered, we should probably assert on + * this. */ + if (evsel->add(base, ev->ev_fd, + old, (ev->ev_events & EV_ET) | res, extra) == -1) + return (-1); + retval = 1; + } + + ctx->nread = (ev_uint16_t) nread; + ctx->nwrite = (ev_uint16_t) nwrite; + ctx->nclose = (ev_uint16_t) nclose; + LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next); + + return (retval); +} + +/* return -1 on error, 0 on success if nothing changed in the event backend, + * and 1 on success if something did. */ +int +evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev) +{ + const struct eventop *evsel = base->evsel; + struct event_io_map *io = &base->io; + struct evmap_io *ctx; + int nread, nwrite, nclose, retval = 0; + short res = 0, old = 0; + + if (fd < 0) + return 0; + + EVUTIL_ASSERT(fd == ev->ev_fd); + +#ifndef EVMAP_USE_HT + if (fd >= io->nentries) + return (-1); +#endif + + GET_IO_SLOT(ctx, io, fd, evmap_io); + + nread = ctx->nread; + nwrite = ctx->nwrite; + nclose = ctx->nclose; + + if (nread) + old |= EV_READ; + if (nwrite) + old |= EV_WRITE; + if (nclose) + old |= EV_CLOSED; + + if (ev->ev_events & EV_READ) { + if (--nread == 0) + res |= EV_READ; + EVUTIL_ASSERT(nread >= 0); + } + if (ev->ev_events & EV_WRITE) { + if (--nwrite == 0) + res |= EV_WRITE; + EVUTIL_ASSERT(nwrite >= 0); + } + if (ev->ev_events & EV_CLOSED) { + if (--nclose == 0) + res |= EV_CLOSED; + EVUTIL_ASSERT(nclose >= 0); + } + + if (res) { + void *extra = ((char*)ctx) + sizeof(struct evmap_io); + if (evsel->del(base, ev->ev_fd, + old, (ev->ev_events & EV_ET) | res, extra) == -1) { + retval = -1; + } else { + retval = 1; + } + } + + ctx->nread = nread; + ctx->nwrite = nwrite; + ctx->nclose = nclose; + LIST_REMOVE(ev, ev_io_next); + + return (retval); +} + +void +evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events) +{ + struct event_io_map *io = &base->io; + struct evmap_io *ctx; + struct event *ev; + +#ifndef EVMAP_USE_HT + if (fd < 0 || fd >= io->nentries) + return; +#endif + GET_IO_SLOT(ctx, io, fd, evmap_io); + + if (NULL == ctx) + return; + LIST_FOREACH(ev, &ctx->events, ev_io_next) { + if (ev->ev_events & events) + event_active_nolock_(ev, ev->ev_events & events, 1); + } +} + +/* code specific to signals */ + +static void +evmap_signal_init(struct evmap_signal *entry) +{ + LIST_INIT(&entry->events); +} + + +int +evmap_signal_add_(struct event_base *base, int sig, struct event *ev) +{ + const struct eventop *evsel = base->evsigsel; + struct event_signal_map *map = &base->sigmap; + struct evmap_signal *ctx = NULL; + + if (sig < 0 || sig >= NSIG) + return (-1); + + if (sig >= map->nentries) { + if (evmap_make_space( + map, sig, sizeof(struct evmap_signal *)) == -1) + return (-1); + } + GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init, + base->evsigsel->fdinfo_len); + + if (LIST_EMPTY(&ctx->events)) { + if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL) + == -1) + return (-1); + } + + LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next); + + return (1); +} + +int +evmap_signal_del_(struct event_base *base, int sig, struct event *ev) +{ + const struct eventop *evsel = base->evsigsel; + struct event_signal_map *map = &base->sigmap; + struct evmap_signal *ctx; + + if (sig < 0 || sig >= map->nentries) + return (-1); + + GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); + + LIST_REMOVE(ev, ev_signal_next); + + if (LIST_FIRST(&ctx->events) == NULL) { + if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1) + return (-1); + } + + return (1); +} + +void +evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls) +{ + struct event_signal_map *map = &base->sigmap; + struct evmap_signal *ctx; + struct event *ev; + + if (sig < 0 || sig >= map->nentries) + return; + GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); + + if (!ctx) + return; + LIST_FOREACH(ev, &ctx->events, ev_signal_next) + event_active_nolock_(ev, EV_SIGNAL, ncalls); +} + +void * +evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd) +{ + struct evmap_io *ctx; + GET_IO_SLOT(ctx, map, fd, evmap_io); + if (ctx) + return ((char*)ctx) + sizeof(struct evmap_io); + else + return NULL; +} + +/* Callback type for evmap_io_foreach_fd */ +typedef int (*evmap_io_foreach_fd_cb)( + struct event_base *, evutil_socket_t, struct evmap_io *, void *); + +/* Multipurpose helper function: Iterate over every file descriptor event_base + * for which we could have EV_READ or EV_WRITE events. For each such fd, call + * fn(base, signum, evmap_io, arg), where fn is the user-provided + * function, base is the event_base, signum is the signal number, evmap_io + * is an evmap_io structure containing a list of events pending on the + * file descriptor, and arg is the user-supplied argument. + * + * If fn returns 0, continue on to the next signal. Otherwise, return the same + * value that fn returned. + * + * Note that there is no guarantee that the file descriptors will be processed + * in any particular order. + */ +static int +evmap_io_foreach_fd(struct event_base *base, + evmap_io_foreach_fd_cb fn, + void *arg) +{ + evutil_socket_t fd; + struct event_io_map *iomap = &base->io; + int r = 0; +#ifdef EVMAP_USE_HT + struct event_map_entry **mapent; + HT_FOREACH(mapent, event_io_map, iomap) { + struct evmap_io *ctx = &(*mapent)->ent.evmap_io; + fd = (*mapent)->fd; +#else + for (fd = 0; fd < iomap->nentries; ++fd) { + struct evmap_io *ctx = iomap->entries[fd]; + if (!ctx) + continue; +#endif + if ((r = fn(base, fd, ctx, arg))) + break; + } + return r; +} + +/* Callback type for evmap_signal_foreach_signal */ +typedef int (*evmap_signal_foreach_signal_cb)( + struct event_base *, int, struct evmap_signal *, void *); + +/* Multipurpose helper function: Iterate over every signal number in the + * event_base for which we could have signal events. For each such signal, + * call fn(base, signum, evmap_signal, arg), where fn is the user-provided + * function, base is the event_base, signum is the signal number, evmap_signal + * is an evmap_signal structure containing a list of events pending on the + * signal, and arg is the user-supplied argument. + * + * If fn returns 0, continue on to the next signal. Otherwise, return the same + * value that fn returned. + */ +static int +evmap_signal_foreach_signal(struct event_base *base, + evmap_signal_foreach_signal_cb fn, + void *arg) +{ + struct event_signal_map *sigmap = &base->sigmap; + int r = 0; + int signum; + + for (signum = 0; signum < sigmap->nentries; ++signum) { + struct evmap_signal *ctx = sigmap->entries[signum]; + if (!ctx) + continue; + if ((r = fn(base, signum, ctx, arg))) + break; + } + return r; +} + +/* Helper for evmap_reinit_: tell the backend to add every fd for which we have + * pending events, with the appropriate combination of EV_READ, EV_WRITE, and + * EV_ET. */ +static int +evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd, + struct evmap_io *ctx, void *arg) +{ + const struct eventop *evsel = base->evsel; + void *extra; + int *result = arg; + short events = 0; + struct event *ev; + EVUTIL_ASSERT(ctx); + + extra = ((char*)ctx) + sizeof(struct evmap_io); + if (ctx->nread) + events |= EV_READ; + if (ctx->nwrite) + events |= EV_WRITE; + if (ctx->nclose) + events |= EV_CLOSED; + if (evsel->fdinfo_len) + memset(extra, 0, evsel->fdinfo_len); + if (events && + (ev = LIST_FIRST(&ctx->events)) && + (ev->ev_events & EV_ET)) + events |= EV_ET; + if (evsel->add(base, fd, 0, events, extra) == -1) + *result = -1; + + return 0; +} + +/* Helper for evmap_reinit_: tell the backend to add every signal for which we + * have pending events. */ +static int +evmap_signal_reinit_iter_fn(struct event_base *base, + int signum, struct evmap_signal *ctx, void *arg) +{ + const struct eventop *evsel = base->evsigsel; + int *result = arg; + + if (!LIST_EMPTY(&ctx->events)) { + if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1) + *result = -1; + } + return 0; +} + +int +evmap_reinit_(struct event_base *base) +{ + int result = 0; + + evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result); + if (result < 0) + return -1; + evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result); + if (result < 0) + return -1; + return 0; +} + +/* Helper for evmap_delete_all_: delete every event in an event_dlist. */ +static int +delete_all_in_dlist(struct event_dlist *dlist) +{ + struct event *ev; + while ((ev = LIST_FIRST(dlist))) + event_del(ev); + return 0; +} + +/* Helper for evmap_delete_all_: delete every event pending on an fd. */ +static int +evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd, + struct evmap_io *io_info, void *arg) +{ + return delete_all_in_dlist(&io_info->events); +} + +/* Helper for evmap_delete_all_: delete every event pending on a signal. */ +static int +evmap_signal_delete_all_iter_fn(struct event_base *base, int signum, + struct evmap_signal *sig_info, void *arg) +{ + return delete_all_in_dlist(&sig_info->events); +} + +void +evmap_delete_all_(struct event_base *base) +{ + evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL); + evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL); +} + +/** Per-fd structure for use with changelists. It keeps track, for each fd or + * signal using the changelist, of where its entry in the changelist is. + */ +struct event_changelist_fdinfo { + int idxplus1; /* this is the index +1, so that memset(0) will make it + * a no-such-element */ +}; + +void +event_changelist_init_(struct event_changelist *changelist) +{ + changelist->changes = NULL; + changelist->changes_size = 0; + changelist->n_changes = 0; +} + +/** Helper: return the changelist_fdinfo corresponding to a given change. */ +static inline struct event_changelist_fdinfo * +event_change_get_fdinfo(struct event_base *base, + const struct event_change *change) +{ + char *ptr; + if (change->read_change & EV_CHANGE_SIGNAL) { + struct evmap_signal *ctx; + GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal); + ptr = ((char*)ctx) + sizeof(struct evmap_signal); + } else { + struct evmap_io *ctx; + GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io); + ptr = ((char*)ctx) + sizeof(struct evmap_io); + } + return (void*)ptr; +} + +/** Callback helper for event_changelist_assert_ok */ +static int +event_changelist_assert_ok_foreach_iter_fn( + struct event_base *base, + evutil_socket_t fd, struct evmap_io *io, void *arg) +{ + struct event_changelist *changelist = &base->changelist; + struct event_changelist_fdinfo *f; + f = (void*) + ( ((char*)io) + sizeof(struct evmap_io) ); + if (f->idxplus1) { + struct event_change *c = &changelist->changes[f->idxplus1 - 1]; + EVUTIL_ASSERT(c->fd == fd); + } + return 0; +} + +/** Make sure that the changelist is consistent with the evmap structures. */ +static void +event_changelist_assert_ok(struct event_base *base) +{ + int i; + struct event_changelist *changelist = &base->changelist; + + EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes); + for (i = 0; i < changelist->n_changes; ++i) { + struct event_change *c = &changelist->changes[i]; + struct event_changelist_fdinfo *f; + EVUTIL_ASSERT(c->fd >= 0); + f = event_change_get_fdinfo(base, c); + EVUTIL_ASSERT(f); + EVUTIL_ASSERT(f->idxplus1 == i + 1); + } + + evmap_io_foreach_fd(base, + event_changelist_assert_ok_foreach_iter_fn, + NULL); +} + +#ifdef DEBUG_CHANGELIST +#define event_changelist_check(base) event_changelist_assert_ok((base)) +#else +#define event_changelist_check(base) ((void)0) +#endif + +void +event_changelist_remove_all_(struct event_changelist *changelist, + struct event_base *base) +{ + int i; + + event_changelist_check(base); + + for (i = 0; i < changelist->n_changes; ++i) { + struct event_change *ch = &changelist->changes[i]; + struct event_changelist_fdinfo *fdinfo = + event_change_get_fdinfo(base, ch); + EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1); + fdinfo->idxplus1 = 0; + } + + changelist->n_changes = 0; + + event_changelist_check(base); +} + +void +event_changelist_freemem_(struct event_changelist *changelist) +{ + if (changelist->changes) + mm_free(changelist->changes); + event_changelist_init_(changelist); /* zero it all out. */ +} + +/** Increase the size of 'changelist' to hold more changes. */ +static int +event_changelist_grow(struct event_changelist *changelist) +{ + int new_size; + struct event_change *new_changes; + if (changelist->changes_size < 64) + new_size = 64; + else + new_size = changelist->changes_size * 2; + + new_changes = mm_realloc(changelist->changes, + new_size * sizeof(struct event_change)); + + if (EVUTIL_UNLIKELY(new_changes == NULL)) + return (-1); + + changelist->changes = new_changes; + changelist->changes_size = new_size; + + return (0); +} + +/** Return a pointer to the changelist entry for the file descriptor or signal + * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its + * old_events field to old_events. + */ +static struct event_change * +event_changelist_get_or_construct(struct event_changelist *changelist, + evutil_socket_t fd, + short old_events, + struct event_changelist_fdinfo *fdinfo) +{ + struct event_change *change; + + if (fdinfo->idxplus1 == 0) { + int idx; + EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size); + + if (changelist->n_changes == changelist->changes_size) { + if (event_changelist_grow(changelist) < 0) + return NULL; + } + + idx = changelist->n_changes++; + change = &changelist->changes[idx]; + fdinfo->idxplus1 = idx + 1; + + memset(change, 0, sizeof(struct event_change)); + change->fd = fd; + change->old_events = old_events; + } else { + change = &changelist->changes[fdinfo->idxplus1 - 1]; + EVUTIL_ASSERT(change->fd == fd); + } + return change; +} + +int +event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events, + void *p) +{ + struct event_changelist *changelist = &base->changelist; + struct event_changelist_fdinfo *fdinfo = p; + struct event_change *change; + ev_uint8_t evchange = EV_CHANGE_ADD | (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); + + event_changelist_check(base); + + change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); + if (!change) + return -1; + + /* An add replaces any previous delete, but doesn't result in a no-op, + * since the delete might fail (because the fd had been closed since + * the last add, for instance. */ + + if (events & (EV_READ|EV_SIGNAL)) + change->read_change = evchange; + if (events & EV_WRITE) + change->write_change = evchange; + if (events & EV_CLOSED) + change->close_change = evchange; + + event_changelist_check(base); + return (0); +} + +int +event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events, + void *p) +{ + struct event_changelist *changelist = &base->changelist; + struct event_changelist_fdinfo *fdinfo = p; + struct event_change *change; + ev_uint8_t del = EV_CHANGE_DEL | (events & EV_ET); + + event_changelist_check(base); + change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); + event_changelist_check(base); + if (!change) + return -1; + + /* A delete on an event set that doesn't contain the event to be + deleted produces a no-op. This effectively emoves any previous + uncommitted add, rather than replacing it: on those platforms where + "add, delete, dispatch" is not the same as "no-op, dispatch", we + want the no-op behavior. + + If we have a no-op item, we could remove it it from the list + entirely, but really there's not much point: skipping the no-op + change when we do the dispatch later is far cheaper than rejuggling + the array now. + + As this stands, it also lets through deletions of events that are + not currently set. + */ + + if (events & (EV_READ|EV_SIGNAL)) { + if (!(change->old_events & (EV_READ | EV_SIGNAL))) + change->read_change = 0; + else + change->read_change = del; + } + if (events & EV_WRITE) { + if (!(change->old_events & EV_WRITE)) + change->write_change = 0; + else + change->write_change = del; + } + if (events & EV_CLOSED) { + if (!(change->old_events & EV_CLOSED)) + change->close_change = 0; + else + change->close_change = del; + } + + event_changelist_check(base); + return (0); +} + +/* Helper for evmap_check_integrity_: verify that all of the events pending on + * given fd are set up correctly, and that the nread and nwrite counts on that + * fd are correct. */ +static int +evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd, + struct evmap_io *io_info, void *arg) +{ + struct event *ev; + int n_read = 0, n_write = 0, n_close = 0; + + /* First, make sure the list itself isn't corrupt. Otherwise, + * running LIST_FOREACH could be an exciting adventure. */ + EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next); + + LIST_FOREACH(ev, &io_info->events, ev_io_next) { + EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); + EVUTIL_ASSERT(ev->ev_fd == fd); + EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL)); + EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))); + if (ev->ev_events & EV_READ) + ++n_read; + if (ev->ev_events & EV_WRITE) + ++n_write; + if (ev->ev_events & EV_CLOSED) + ++n_close; + } + + EVUTIL_ASSERT(n_read == io_info->nread); + EVUTIL_ASSERT(n_write == io_info->nwrite); + EVUTIL_ASSERT(n_close == io_info->nclose); + + return 0; +} + +/* Helper for evmap_check_integrity_: verify that all of the events pending + * on given signal are set up correctly. */ +static int +evmap_signal_check_integrity_fn(struct event_base *base, + int signum, struct evmap_signal *sig_info, void *arg) +{ + struct event *ev; + /* First, make sure the list itself isn't corrupt. */ + EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next); + + LIST_FOREACH(ev, &sig_info->events, ev_io_next) { + EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); + EVUTIL_ASSERT(ev->ev_fd == signum); + EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL)); + EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))); + } + return 0; +} + +void +evmap_check_integrity_(struct event_base *base) +{ + evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL); + evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL); + + if (base->evsel->add == event_changelist_add_) + event_changelist_assert_ok(base); +} + +/* Helper type for evmap_foreach_event_: Bundles a function to call on every + * event, and the user-provided void* to use as its third argument. */ +struct evmap_foreach_event_helper { + event_base_foreach_event_cb fn; + void *arg; +}; + +/* Helper for evmap_foreach_event_: calls a provided function on every event + * pending on a given fd. */ +static int +evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd, + struct evmap_io *io_info, void *arg) +{ + struct evmap_foreach_event_helper *h = arg; + struct event *ev; + int r; + LIST_FOREACH(ev, &io_info->events, ev_io_next) { + if ((r = h->fn(base, ev, h->arg))) + return r; + } + return 0; +} + +/* Helper for evmap_foreach_event_: calls a provided function on every event + * pending on a given signal. */ +static int +evmap_signal_foreach_event_fn(struct event_base *base, int signum, + struct evmap_signal *sig_info, void *arg) +{ + struct event *ev; + struct evmap_foreach_event_helper *h = arg; + int r; + LIST_FOREACH(ev, &sig_info->events, ev_signal_next) { + if ((r = h->fn(base, ev, h->arg))) + return r; + } + return 0; +} + +int +evmap_foreach_event_(struct event_base *base, + event_base_foreach_event_cb fn, void *arg) +{ + struct evmap_foreach_event_helper h; + int r; + h.fn = fn; + h.arg = arg; + if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h))) + return r; + return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h); +} + diff --git a/probe-busybox/libevent-2.1.11-stable/evport.c b/probe-busybox/libevent-2.1.11-stable/evport.c new file mode 100644 index 00000000..a014386b --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evport.c @@ -0,0 +1,451 @@ +/* + * Submitted by David Pacheco (dp.spambait@gmail.com) + * + * Copyright 2006-2007 Niels Provos + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright (c) 2007 Sun Microsystems. All rights reserved. + * Use is subject to license terms. + */ + +/* + * evport.c: event backend using Solaris 10 event ports. See port_create(3C). + * This implementation is loosely modeled after the one used for select(2) (in + * select.c). + * + * The outstanding events are tracked in a data structure called evport_data. + * Each entry in the ed_fds array corresponds to a file descriptor, and contains + * pointers to the read and write events that correspond to that fd. (That is, + * when the file is readable, the "read" event should handle it, etc.) + * + * evport_add and evport_del update this data structure. evport_dispatch uses it + * to determine where to callback when an event occurs (which it gets from + * port_getn). + * + * Helper functions are used: grow() grows the file descriptor array as + * necessary when large fd's come in. reassociate() takes care of maintaining + * the proper file-descriptor/event-port associations. + * + * As in the select(2) implementation, signals are handled by evsignal. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef EVENT__HAVE_EVENT_PORTS + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "event2/thread.h" + +#include "evthread-internal.h" +#include "event-internal.h" +#include "log-internal.h" +#include "evsignal-internal.h" +#include "evmap-internal.h" + +#define INITIAL_EVENTS_PER_GETN 8 +#define MAX_EVENTS_PER_GETN 4096 + +/* + * Per-file-descriptor information about what events we're subscribed to. These + * fields are NULL if no event is subscribed to either of them. + */ + +struct fd_info { + /* combinations of EV_READ and EV_WRITE */ + short fdi_what; + /* Index of this fd within ed_pending, plus 1. Zero if this fd is + * not in ed_pending. (The +1 is a hack so that memset(0) will set + * it to a nil index. */ + int pending_idx_plus_1; +}; + +#define FDI_HAS_READ(fdi) ((fdi)->fdi_what & EV_READ) +#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_what & EV_WRITE) +#define FDI_HAS_EVENTS(fdi) (FDI_HAS_READ(fdi) || FDI_HAS_WRITE(fdi)) +#define FDI_TO_SYSEVENTS(fdi) (FDI_HAS_READ(fdi) ? POLLIN : 0) | \ + (FDI_HAS_WRITE(fdi) ? POLLOUT : 0) + +struct evport_data { + int ed_port; /* event port for system events */ + /* How many elements of ed_pending should we look at? */ + int ed_npending; + /* How many elements are allocated in ed_pending and pevtlist? */ + int ed_maxevents; + /* fdi's that we need to reassoc */ + int *ed_pending; + /* storage space for incoming events. */ + port_event_t *ed_pevtlist; + +}; + +static void* evport_init(struct event_base *); +static int evport_add(struct event_base *, int fd, short old, short events, void *); +static int evport_del(struct event_base *, int fd, short old, short events, void *); +static int evport_dispatch(struct event_base *, struct timeval *); +static void evport_dealloc(struct event_base *); +static int grow(struct evport_data *, int min_events); + +const struct eventop evportops = { + "evport", + evport_init, + evport_add, + evport_del, + evport_dispatch, + evport_dealloc, + 1, /* need reinit */ + 0, /* features */ + sizeof(struct fd_info), /* fdinfo length */ +}; + +/* + * Initialize the event port implementation. + */ + +static void* +evport_init(struct event_base *base) +{ + struct evport_data *evpd; + + if (!(evpd = mm_calloc(1, sizeof(struct evport_data)))) + return (NULL); + + if ((evpd->ed_port = port_create()) == -1) { + mm_free(evpd); + return (NULL); + } + + if (grow(evpd, INITIAL_EVENTS_PER_GETN) < 0) { + close(evpd->ed_port); + mm_free(evpd); + return NULL; + } + + evpd->ed_npending = 0; + + evsig_init_(base); + + return (evpd); +} + +static int +grow(struct evport_data *data, int min_events) +{ + int newsize; + int *new_pending; + port_event_t *new_pevtlist; + if (data->ed_maxevents) { + newsize = data->ed_maxevents; + do { + newsize *= 2; + } while (newsize < min_events); + } else { + newsize = min_events; + } + + new_pending = mm_realloc(data->ed_pending, sizeof(int)*newsize); + if (new_pending == NULL) + return -1; + data->ed_pending = new_pending; + new_pevtlist = mm_realloc(data->ed_pevtlist, sizeof(port_event_t)*newsize); + if (new_pevtlist == NULL) + return -1; + data->ed_pevtlist = new_pevtlist; + + data->ed_maxevents = newsize; + return 0; +} + +#ifdef CHECK_INVARIANTS +/* + * Checks some basic properties about the evport_data structure. Because it + * checks all file descriptors, this function can be expensive when the maximum + * file descriptor ever used is rather large. + */ + +static void +check_evportop(struct evport_data *evpd) +{ + EVUTIL_ASSERT(evpd); + EVUTIL_ASSERT(evpd->ed_port > 0); +} + +/* + * Verifies very basic integrity of a given port_event. + */ +static void +check_event(port_event_t* pevt) +{ + /* + * We've only registered for PORT_SOURCE_FD events. The only + * other thing we can legitimately receive is PORT_SOURCE_ALERT, + * but since we're not using port_alert either, we can assume + * PORT_SOURCE_FD. + */ + EVUTIL_ASSERT(pevt->portev_source == PORT_SOURCE_FD); +} + +#else +#define check_evportop(epop) +#define check_event(pevt) +#endif /* CHECK_INVARIANTS */ + +/* + * (Re)associates the given file descriptor with the event port. The OS events + * are specified (implicitly) from the fd_info struct. + */ +static int +reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd) +{ + int sysevents = FDI_TO_SYSEVENTS(fdip); + + if (sysevents != 0) { + if (port_associate(epdp->ed_port, PORT_SOURCE_FD, + fd, sysevents, fdip) == -1) { + event_warn("port_associate"); + return (-1); + } + } + + check_evportop(epdp); + + return (0); +} + +/* + * Main event loop - polls port_getn for some number of events, and processes + * them. + */ + +static int +evport_dispatch(struct event_base *base, struct timeval *tv) +{ + int i, res; + struct evport_data *epdp = base->evbase; + port_event_t *pevtlist = epdp->ed_pevtlist; + + /* + * port_getn will block until it has at least nevents events. It will + * also return how many it's given us (which may be more than we asked + * for, as long as it's less than our maximum (ed_maxevents)) in + * nevents. + */ + int nevents = 1; + + /* + * We have to convert a struct timeval to a struct timespec + * (only difference is nanoseconds vs. microseconds). If no time-based + * events are active, we should wait for I/O (and tv == NULL). + */ + struct timespec ts; + struct timespec *ts_p = NULL; + if (tv != NULL) { + ts.tv_sec = tv->tv_sec; + ts.tv_nsec = tv->tv_usec * 1000; + ts_p = &ts; + } + + /* + * Before doing anything else, we need to reassociate the events we hit + * last time which need reassociation. See comment at the end of the + * loop below. + */ + for (i = 0; i < epdp->ed_npending; ++i) { + struct fd_info *fdi = NULL; + const int fd = epdp->ed_pending[i]; + if (fd != -1) { + /* We might have cleared out this event; we need + * to be sure that it's still set. */ + fdi = evmap_io_get_fdinfo_(&base->io, fd); + } + + if (fdi != NULL && FDI_HAS_EVENTS(fdi)) { + reassociate(epdp, fdi, fd); + /* epdp->ed_pending[i] = -1; */ + fdi->pending_idx_plus_1 = 0; + } + } + + EVBASE_RELEASE_LOCK(base, th_base_lock); + + res = port_getn(epdp->ed_port, pevtlist, epdp->ed_maxevents, + (unsigned int *) &nevents, ts_p); + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + if (res == -1) { + if (errno == EINTR || errno == EAGAIN) { + return (0); + } else if (errno == ETIME) { + if (nevents == 0) + return (0); + } else { + event_warn("port_getn"); + return (-1); + } + } + + event_debug(("%s: port_getn reports %d events", __func__, nevents)); + + for (i = 0; i < nevents; ++i) { + port_event_t *pevt = &pevtlist[i]; + int fd = (int) pevt->portev_object; + struct fd_info *fdi = pevt->portev_user; + /*EVUTIL_ASSERT(evmap_io_get_fdinfo_(&base->io, fd) == fdi);*/ + + check_evportop(epdp); + check_event(pevt); + epdp->ed_pending[i] = fd; + fdi->pending_idx_plus_1 = i + 1; + + /* + * Figure out what kind of event it was + * (because we have to pass this to the callback) + */ + res = 0; + if (pevt->portev_events & (POLLERR|POLLHUP)) { + res = EV_READ | EV_WRITE; + } else { + if (pevt->portev_events & POLLIN) + res |= EV_READ; + if (pevt->portev_events & POLLOUT) + res |= EV_WRITE; + } + + /* + * Check for the error situations or a hangup situation + */ + if (pevt->portev_events & (POLLERR|POLLHUP|POLLNVAL)) + res |= EV_READ|EV_WRITE; + + evmap_io_active_(base, fd, res); + } /* end of all events gotten */ + epdp->ed_npending = nevents; + + if (nevents == epdp->ed_maxevents && + epdp->ed_maxevents < MAX_EVENTS_PER_GETN) { + /* we used all the space this time. We should be ready + * for more events next time around. */ + grow(epdp, epdp->ed_maxevents * 2); + } + + check_evportop(epdp); + + return (0); +} + + +/* + * Adds the given event (so that you will be notified when it happens via + * the callback function). + */ + +static int +evport_add(struct event_base *base, int fd, short old, short events, void *p) +{ + struct evport_data *evpd = base->evbase; + struct fd_info *fdi = p; + + check_evportop(evpd); + + fdi->fdi_what |= events; + + return reassociate(evpd, fdi, fd); +} + +/* + * Removes the given event from the list of events to wait for. + */ + +static int +evport_del(struct event_base *base, int fd, short old, short events, void *p) +{ + struct evport_data *evpd = base->evbase; + struct fd_info *fdi = p; + int associated = ! fdi->pending_idx_plus_1; + + check_evportop(evpd); + + fdi->fdi_what &= ~(events &(EV_READ|EV_WRITE)); + + if (associated) { + if (!FDI_HAS_EVENTS(fdi) && + port_dissociate(evpd->ed_port, PORT_SOURCE_FD, fd) == -1) { + /* + * Ignore EBADFD error the fd could have been closed + * before event_del() was called. + */ + if (errno != EBADFD) { + event_warn("port_dissociate"); + return (-1); + } + } else { + if (FDI_HAS_EVENTS(fdi)) { + return (reassociate(evpd, fdi, fd)); + } + } + } else { + if ((fdi->fdi_what & (EV_READ|EV_WRITE)) == 0) { + const int i = fdi->pending_idx_plus_1 - 1; + EVUTIL_ASSERT(evpd->ed_pending[i] == fd); + evpd->ed_pending[i] = -1; + fdi->pending_idx_plus_1 = 0; + } + } + return 0; +} + + +static void +evport_dealloc(struct event_base *base) +{ + struct evport_data *evpd = base->evbase; + + evsig_dealloc_(base); + + close(evpd->ed_port); + + if (evpd->ed_pending) + mm_free(evpd->ed_pending); + if (evpd->ed_pevtlist) + mm_free(evpd->ed_pevtlist); + + mm_free(evpd); +} + +#endif /* EVENT__HAVE_EVENT_PORTS */ diff --git a/probe-busybox/libevent-2.1.11-stable/evrpc-internal.h b/probe-busybox/libevent-2.1.11-stable/evrpc-internal.h new file mode 100644 index 00000000..9eb37638 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evrpc-internal.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2006-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVRPC_INTERNAL_H_INCLUDED_ +#define EVRPC_INTERNAL_H_INCLUDED_ + +#include "event2/http.h" +#include "http-internal.h" + +struct evrpc; +struct evrpc_request_wrapper; + +#define EVRPC_URI_PREFIX "/.rpc." + +struct evrpc_hook { + TAILQ_ENTRY(evrpc_hook) next; + + /* returns EVRPC_TERMINATE; if the rpc should be aborted. + * a hook is is allowed to rewrite the evbuffer + */ + int (*process)(void *, struct evhttp_request *, + struct evbuffer *, void *); + void *process_arg; +}; + +TAILQ_HEAD(evrpc_hook_list, evrpc_hook); + +/* + * this is shared between the base and the pool, so that we can reuse + * the hook adding functions; we alias both evrpc_pool and evrpc_base + * to this common structure. + */ + +struct evrpc_hook_ctx; +TAILQ_HEAD(evrpc_pause_list, evrpc_hook_ctx); + +struct evrpc_hooks_ { + /* hooks for processing outbound and inbound rpcs */ + struct evrpc_hook_list in_hooks; + struct evrpc_hook_list out_hooks; + + struct evrpc_pause_list pause_requests; +}; + +#define input_hooks common.in_hooks +#define output_hooks common.out_hooks +#define paused_requests common.pause_requests + +struct evrpc_base { + struct evrpc_hooks_ common; + + /* the HTTP server under which we register our RPC calls */ + struct evhttp* http_server; + + /* a list of all RPCs registered with us */ + TAILQ_HEAD(evrpc_list, evrpc) registered_rpcs; +}; + +struct evrpc_req_generic; +void evrpc_reqstate_free_(struct evrpc_req_generic* rpc_state); + +/* A pool for holding evhttp_connection objects */ +struct evrpc_pool { + struct evrpc_hooks_ common; + + struct event_base *base; + + struct evconq connections; + + int timeout; + + TAILQ_HEAD(evrpc_requestq, evrpc_request_wrapper) (requests); +}; + +struct evrpc_hook_ctx { + TAILQ_ENTRY(evrpc_hook_ctx) next; + + void *ctx; + void (*cb)(void *, enum EVRPC_HOOK_RESULT); +}; + +struct evrpc_meta { + TAILQ_ENTRY(evrpc_meta) next; + char *key; + + void *data; + size_t data_size; +}; + +TAILQ_HEAD(evrpc_meta_list, evrpc_meta); + +struct evrpc_hook_meta { + struct evrpc_meta_list meta_data; + struct evhttp_connection *evcon; +}; + +/* allows association of meta data with a request */ +static void evrpc_hook_associate_meta_(struct evrpc_hook_meta **pctx, + struct evhttp_connection *evcon); + +/* creates a new meta data store */ +static struct evrpc_hook_meta *evrpc_hook_meta_new_(void); + +/* frees the meta data associated with a request */ +static void evrpc_hook_context_free_(struct evrpc_hook_meta *ctx); + +/* the server side of an rpc */ + +/* We alias the RPC specific structs to this voided one */ +struct evrpc_req_generic { + /* + * allows association of meta data via hooks - needs to be + * synchronized with evrpc_request_wrapper + */ + struct evrpc_hook_meta *hook_meta; + + /* the unmarshaled request object */ + void *request; + + /* the empty reply object that needs to be filled in */ + void *reply; + + /* + * the static structure for this rpc; that can be used to + * automatically unmarshal and marshal the http buffers. + */ + struct evrpc *rpc; + + /* + * the http request structure on which we need to answer. + */ + struct evhttp_request* http_req; + + /* + * Temporary data store for marshaled data + */ + struct evbuffer* rpc_data; +}; + +/* the client side of an rpc request */ +struct evrpc_request_wrapper { + /* + * allows association of meta data via hooks - needs to be + * synchronized with evrpc_req_generic. + */ + struct evrpc_hook_meta *hook_meta; + + TAILQ_ENTRY(evrpc_request_wrapper) next; + + /* pool on which this rpc request is being made */ + struct evrpc_pool *pool; + + /* connection on which the request is being sent */ + struct evhttp_connection *evcon; + + /* the actual request */ + struct evhttp_request *req; + + /* event for implementing request timeouts */ + struct event ev_timeout; + + /* the name of the rpc */ + char *name; + + /* callback */ + void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg); + void *cb_arg; + + void *request; + void *reply; + + /* unmarshals the buffer into the proper request structure */ + void (*request_marshal)(struct evbuffer *, void *); + + /* removes all stored state in the reply */ + void (*reply_clear)(void *); + + /* marshals the reply into a buffer */ + int (*reply_unmarshal)(void *, struct evbuffer*); +}; + +#endif /* EVRPC_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/evrpc.c b/probe-busybox/libevent-2.1.11-stable/evrpc.c new file mode 100644 index 00000000..4a60ca5f --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evrpc.c @@ -0,0 +1,1171 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#include +#undef WIN32_LEAN_AND_MEAN +#endif + +#include +#ifndef _WIN32 +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#include +#include +#ifndef _WIN32 +#include +#endif +#include +#include +#include + +#include + +#include "event2/event.h" +#include "event2/event_struct.h" +#include "event2/rpc.h" +#include "event2/rpc_struct.h" +#include "evrpc-internal.h" +#include "event2/http.h" +#include "event2/buffer.h" +#include "event2/tag.h" +#include "event2/http_struct.h" +#include "event2/http_compat.h" +#include "event2/util.h" +#include "util-internal.h" +#include "log-internal.h" +#include "mm-internal.h" + +struct evrpc_base * +evrpc_init(struct evhttp *http_server) +{ + struct evrpc_base* base = mm_calloc(1, sizeof(struct evrpc_base)); + if (base == NULL) + return (NULL); + + /* we rely on the tagging sub system */ + evtag_init(); + + TAILQ_INIT(&base->registered_rpcs); + TAILQ_INIT(&base->input_hooks); + TAILQ_INIT(&base->output_hooks); + + TAILQ_INIT(&base->paused_requests); + + base->http_server = http_server; + + return (base); +} + +void +evrpc_free(struct evrpc_base *base) +{ + struct evrpc *rpc; + struct evrpc_hook *hook; + struct evrpc_hook_ctx *pause; + int r; + + while ((rpc = TAILQ_FIRST(&base->registered_rpcs)) != NULL) { + r = evrpc_unregister_rpc(base, rpc->uri); + EVUTIL_ASSERT(r == 0); + } + while ((pause = TAILQ_FIRST(&base->paused_requests)) != NULL) { + TAILQ_REMOVE(&base->paused_requests, pause, next); + mm_free(pause); + } + while ((hook = TAILQ_FIRST(&base->input_hooks)) != NULL) { + r = evrpc_remove_hook(base, EVRPC_INPUT, hook); + EVUTIL_ASSERT(r); + } + while ((hook = TAILQ_FIRST(&base->output_hooks)) != NULL) { + r = evrpc_remove_hook(base, EVRPC_OUTPUT, hook); + EVUTIL_ASSERT(r); + } + mm_free(base); +} + +void * +evrpc_add_hook(void *vbase, + enum EVRPC_HOOK_TYPE hook_type, + int (*cb)(void *, struct evhttp_request *, struct evbuffer *, void *), + void *cb_arg) +{ + struct evrpc_hooks_ *base = vbase; + struct evrpc_hook_list *head = NULL; + struct evrpc_hook *hook = NULL; + switch (hook_type) { + case EVRPC_INPUT: + head = &base->in_hooks; + break; + case EVRPC_OUTPUT: + head = &base->out_hooks; + break; + default: + EVUTIL_ASSERT(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT); + } + + hook = mm_calloc(1, sizeof(struct evrpc_hook)); + EVUTIL_ASSERT(hook != NULL); + + hook->process = cb; + hook->process_arg = cb_arg; + TAILQ_INSERT_TAIL(head, hook, next); + + return (hook); +} + +static int +evrpc_remove_hook_internal(struct evrpc_hook_list *head, void *handle) +{ + struct evrpc_hook *hook = NULL; + TAILQ_FOREACH(hook, head, next) { + if (hook == handle) { + TAILQ_REMOVE(head, hook, next); + mm_free(hook); + return (1); + } + } + + return (0); +} + +/* + * remove the hook specified by the handle + */ + +int +evrpc_remove_hook(void *vbase, enum EVRPC_HOOK_TYPE hook_type, void *handle) +{ + struct evrpc_hooks_ *base = vbase; + struct evrpc_hook_list *head = NULL; + switch (hook_type) { + case EVRPC_INPUT: + head = &base->in_hooks; + break; + case EVRPC_OUTPUT: + head = &base->out_hooks; + break; + default: + EVUTIL_ASSERT(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT); + } + + return (evrpc_remove_hook_internal(head, handle)); +} + +static int +evrpc_process_hooks(struct evrpc_hook_list *head, void *ctx, + struct evhttp_request *req, struct evbuffer *evbuf) +{ + struct evrpc_hook *hook; + TAILQ_FOREACH(hook, head, next) { + int res = hook->process(ctx, req, evbuf, hook->process_arg); + if (res != EVRPC_CONTINUE) + return (res); + } + + return (EVRPC_CONTINUE); +} + +static void evrpc_pool_schedule(struct evrpc_pool *pool); +static void evrpc_request_cb(struct evhttp_request *, void *); + +/* + * Registers a new RPC with the HTTP server. The evrpc object is expected + * to have been filled in via the EVRPC_REGISTER_OBJECT macro which in turn + * calls this function. + */ + +static char * +evrpc_construct_uri(const char *uri) +{ + char *constructed_uri; + size_t constructed_uri_len; + + constructed_uri_len = strlen(EVRPC_URI_PREFIX) + strlen(uri) + 1; + if ((constructed_uri = mm_malloc(constructed_uri_len)) == NULL) + event_err(1, "%s: failed to register rpc at %s", + __func__, uri); + memcpy(constructed_uri, EVRPC_URI_PREFIX, strlen(EVRPC_URI_PREFIX)); + memcpy(constructed_uri + strlen(EVRPC_URI_PREFIX), uri, strlen(uri)); + constructed_uri[constructed_uri_len - 1] = '\0'; + + return (constructed_uri); +} + +int +evrpc_register_rpc(struct evrpc_base *base, struct evrpc *rpc, + void (*cb)(struct evrpc_req_generic *, void *), void *cb_arg) +{ + char *constructed_uri = evrpc_construct_uri(rpc->uri); + + rpc->base = base; + rpc->cb = cb; + rpc->cb_arg = cb_arg; + + TAILQ_INSERT_TAIL(&base->registered_rpcs, rpc, next); + + evhttp_set_cb(base->http_server, + constructed_uri, + evrpc_request_cb, + rpc); + + mm_free(constructed_uri); + + return (0); +} + +int +evrpc_unregister_rpc(struct evrpc_base *base, const char *name) +{ + char *registered_uri = NULL; + struct evrpc *rpc; + int r; + + /* find the right rpc; linear search might be slow */ + TAILQ_FOREACH(rpc, &base->registered_rpcs, next) { + if (strcmp(rpc->uri, name) == 0) + break; + } + if (rpc == NULL) { + /* We did not find an RPC with this name */ + return (-1); + } + TAILQ_REMOVE(&base->registered_rpcs, rpc, next); + + registered_uri = evrpc_construct_uri(name); + + /* remove the http server callback */ + r = evhttp_del_cb(base->http_server, registered_uri); + EVUTIL_ASSERT(r == 0); + + mm_free(registered_uri); + + mm_free((char *)rpc->uri); + mm_free(rpc); + return (0); +} + +static int evrpc_pause_request(void *vbase, void *ctx, + void (*cb)(void *, enum EVRPC_HOOK_RESULT)); +static void evrpc_request_cb_closure(void *, enum EVRPC_HOOK_RESULT); + +static void +evrpc_request_cb(struct evhttp_request *req, void *arg) +{ + struct evrpc *rpc = arg; + struct evrpc_req_generic *rpc_state = NULL; + + /* let's verify the outside parameters */ + if (req->type != EVHTTP_REQ_POST || + evbuffer_get_length(req->input_buffer) <= 0) + goto error; + + rpc_state = mm_calloc(1, sizeof(struct evrpc_req_generic)); + if (rpc_state == NULL) + goto error; + rpc_state->rpc = rpc; + rpc_state->http_req = req; + rpc_state->rpc_data = NULL; + + if (TAILQ_FIRST(&rpc->base->input_hooks) != NULL) { + int hook_res; + + evrpc_hook_associate_meta_(&rpc_state->hook_meta, req->evcon); + + /* + * allow hooks to modify the outgoing request + */ + hook_res = evrpc_process_hooks(&rpc->base->input_hooks, + rpc_state, req, req->input_buffer); + switch (hook_res) { + case EVRPC_TERMINATE: + goto error; + case EVRPC_PAUSE: + evrpc_pause_request(rpc->base, rpc_state, + evrpc_request_cb_closure); + return; + case EVRPC_CONTINUE: + break; + default: + EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE || + hook_res == EVRPC_CONTINUE || + hook_res == EVRPC_PAUSE); + } + } + + evrpc_request_cb_closure(rpc_state, EVRPC_CONTINUE); + return; + +error: + if (rpc_state) + evrpc_reqstate_free_(rpc_state); + evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL); + return; +} + +static void +evrpc_request_cb_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res) +{ + struct evrpc_req_generic *rpc_state = arg; + struct evrpc *rpc; + struct evhttp_request *req; + + EVUTIL_ASSERT(rpc_state); + rpc = rpc_state->rpc; + req = rpc_state->http_req; + + if (hook_res == EVRPC_TERMINATE) + goto error; + + /* let's check that we can parse the request */ + rpc_state->request = rpc->request_new(rpc->request_new_arg); + if (rpc_state->request == NULL) + goto error; + + if (rpc->request_unmarshal( + rpc_state->request, req->input_buffer) == -1) { + /* we failed to parse the request; that's a bummer */ + goto error; + } + + /* at this point, we have a well formed request, prepare the reply */ + + rpc_state->reply = rpc->reply_new(rpc->reply_new_arg); + if (rpc_state->reply == NULL) + goto error; + + /* give the rpc to the user; they can deal with it */ + rpc->cb(rpc_state, rpc->cb_arg); + + return; + +error: + evrpc_reqstate_free_(rpc_state); + evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL); + return; +} + + +void +evrpc_reqstate_free_(struct evrpc_req_generic* rpc_state) +{ + struct evrpc *rpc; + EVUTIL_ASSERT(rpc_state != NULL); + rpc = rpc_state->rpc; + + /* clean up all memory */ + if (rpc_state->hook_meta != NULL) + evrpc_hook_context_free_(rpc_state->hook_meta); + if (rpc_state->request != NULL) + rpc->request_free(rpc_state->request); + if (rpc_state->reply != NULL) + rpc->reply_free(rpc_state->reply); + if (rpc_state->rpc_data != NULL) + evbuffer_free(rpc_state->rpc_data); + mm_free(rpc_state); +} + +static void +evrpc_request_done_closure(void *, enum EVRPC_HOOK_RESULT); + +void +evrpc_request_done(struct evrpc_req_generic *rpc_state) +{ + struct evhttp_request *req; + struct evrpc *rpc; + + EVUTIL_ASSERT(rpc_state); + + req = rpc_state->http_req; + rpc = rpc_state->rpc; + + if (rpc->reply_complete(rpc_state->reply) == -1) { + /* the reply was not completely filled in. error out */ + goto error; + } + + if ((rpc_state->rpc_data = evbuffer_new()) == NULL) { + /* out of memory */ + goto error; + } + + /* serialize the reply */ + rpc->reply_marshal(rpc_state->rpc_data, rpc_state->reply); + + if (TAILQ_FIRST(&rpc->base->output_hooks) != NULL) { + int hook_res; + + evrpc_hook_associate_meta_(&rpc_state->hook_meta, req->evcon); + + /* do hook based tweaks to the request */ + hook_res = evrpc_process_hooks(&rpc->base->output_hooks, + rpc_state, req, rpc_state->rpc_data); + switch (hook_res) { + case EVRPC_TERMINATE: + goto error; + case EVRPC_PAUSE: + if (evrpc_pause_request(rpc->base, rpc_state, + evrpc_request_done_closure) == -1) + goto error; + return; + case EVRPC_CONTINUE: + break; + default: + EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE || + hook_res == EVRPC_CONTINUE || + hook_res == EVRPC_PAUSE); + } + } + + evrpc_request_done_closure(rpc_state, EVRPC_CONTINUE); + return; + +error: + evrpc_reqstate_free_(rpc_state); + evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL); + return; +} + +void * +evrpc_get_request(struct evrpc_req_generic *req) +{ + return req->request; +} + +void * +evrpc_get_reply(struct evrpc_req_generic *req) +{ + return req->reply; +} + +static void +evrpc_request_done_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res) +{ + struct evrpc_req_generic *rpc_state = arg; + struct evhttp_request *req; + EVUTIL_ASSERT(rpc_state); + req = rpc_state->http_req; + + if (hook_res == EVRPC_TERMINATE) + goto error; + + /* on success, we are going to transmit marshaled binary data */ + if (evhttp_find_header(req->output_headers, "Content-Type") == NULL) { + evhttp_add_header(req->output_headers, + "Content-Type", "application/octet-stream"); + } + evhttp_send_reply(req, HTTP_OK, "OK", rpc_state->rpc_data); + + evrpc_reqstate_free_(rpc_state); + + return; + +error: + evrpc_reqstate_free_(rpc_state); + evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL); + return; +} + + +/* Client implementation of RPC site */ + +static int evrpc_schedule_request(struct evhttp_connection *connection, + struct evrpc_request_wrapper *ctx); + +struct evrpc_pool * +evrpc_pool_new(struct event_base *base) +{ + struct evrpc_pool *pool = mm_calloc(1, sizeof(struct evrpc_pool)); + if (pool == NULL) + return (NULL); + + TAILQ_INIT(&pool->connections); + TAILQ_INIT(&pool->requests); + + TAILQ_INIT(&pool->paused_requests); + + TAILQ_INIT(&pool->input_hooks); + TAILQ_INIT(&pool->output_hooks); + + pool->base = base; + pool->timeout = -1; + + return (pool); +} + +static void +evrpc_request_wrapper_free(struct evrpc_request_wrapper *request) +{ + if (request->hook_meta != NULL) + evrpc_hook_context_free_(request->hook_meta); + mm_free(request->name); + mm_free(request); +} + +void +evrpc_pool_free(struct evrpc_pool *pool) +{ + struct evhttp_connection *connection; + struct evrpc_request_wrapper *request; + struct evrpc_hook_ctx *pause; + struct evrpc_hook *hook; + int r; + + while ((request = TAILQ_FIRST(&pool->requests)) != NULL) { + TAILQ_REMOVE(&pool->requests, request, next); + evrpc_request_wrapper_free(request); + } + + while ((pause = TAILQ_FIRST(&pool->paused_requests)) != NULL) { + TAILQ_REMOVE(&pool->paused_requests, pause, next); + mm_free(pause); + } + + while ((connection = TAILQ_FIRST(&pool->connections)) != NULL) { + TAILQ_REMOVE(&pool->connections, connection, next); + evhttp_connection_free(connection); + } + + while ((hook = TAILQ_FIRST(&pool->input_hooks)) != NULL) { + r = evrpc_remove_hook(pool, EVRPC_INPUT, hook); + EVUTIL_ASSERT(r); + } + + while ((hook = TAILQ_FIRST(&pool->output_hooks)) != NULL) { + r = evrpc_remove_hook(pool, EVRPC_OUTPUT, hook); + EVUTIL_ASSERT(r); + } + + mm_free(pool); +} + +/* + * Add a connection to the RPC pool. A request scheduled on the pool + * may use any available connection. + */ + +void +evrpc_pool_add_connection(struct evrpc_pool *pool, + struct evhttp_connection *connection) +{ + EVUTIL_ASSERT(connection->http_server == NULL); + TAILQ_INSERT_TAIL(&pool->connections, connection, next); + + /* + * associate an event base with this connection + */ + if (pool->base != NULL) + evhttp_connection_set_base(connection, pool->base); + + /* + * unless a timeout was specifically set for a connection, + * the connection inherits the timeout from the pool. + */ + if (!evutil_timerisset(&connection->timeout)) + evhttp_connection_set_timeout(connection, pool->timeout); + + /* + * if we have any requests pending, schedule them with the new + * connections. + */ + + if (TAILQ_FIRST(&pool->requests) != NULL) { + struct evrpc_request_wrapper *request = + TAILQ_FIRST(&pool->requests); + TAILQ_REMOVE(&pool->requests, request, next); + evrpc_schedule_request(connection, request); + } +} + +void +evrpc_pool_remove_connection(struct evrpc_pool *pool, + struct evhttp_connection *connection) +{ + TAILQ_REMOVE(&pool->connections, connection, next); +} + +void +evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs) +{ + struct evhttp_connection *evcon; + TAILQ_FOREACH(evcon, &pool->connections, next) { + evhttp_connection_set_timeout(evcon, timeout_in_secs); + } + pool->timeout = timeout_in_secs; +} + + +static void evrpc_reply_done(struct evhttp_request *, void *); +static void evrpc_request_timeout(evutil_socket_t, short, void *); + +/* + * Finds a connection object associated with the pool that is currently + * idle and can be used to make a request. + */ +static struct evhttp_connection * +evrpc_pool_find_connection(struct evrpc_pool *pool) +{ + struct evhttp_connection *connection; + TAILQ_FOREACH(connection, &pool->connections, next) { + if (TAILQ_FIRST(&connection->requests) == NULL) + return (connection); + } + + return (NULL); +} + +/* + * Prototypes responsible for evrpc scheduling and hooking + */ + +static void evrpc_schedule_request_closure(void *ctx, enum EVRPC_HOOK_RESULT); + +/* + * We assume that the ctx is no longer queued on the pool. + */ +static int +evrpc_schedule_request(struct evhttp_connection *connection, + struct evrpc_request_wrapper *ctx) +{ + struct evhttp_request *req = NULL; + struct evrpc_pool *pool = ctx->pool; + struct evrpc_status status; + + if ((req = evhttp_request_new(evrpc_reply_done, ctx)) == NULL) + goto error; + + /* serialize the request data into the output buffer */ + ctx->request_marshal(req->output_buffer, ctx->request); + + /* we need to know the connection that we might have to abort */ + ctx->evcon = connection; + + /* if we get paused we also need to know the request */ + ctx->req = req; + + if (TAILQ_FIRST(&pool->output_hooks) != NULL) { + int hook_res; + + evrpc_hook_associate_meta_(&ctx->hook_meta, connection); + + /* apply hooks to the outgoing request */ + hook_res = evrpc_process_hooks(&pool->output_hooks, + ctx, req, req->output_buffer); + + switch (hook_res) { + case EVRPC_TERMINATE: + goto error; + case EVRPC_PAUSE: + /* we need to be explicitly resumed */ + if (evrpc_pause_request(pool, ctx, + evrpc_schedule_request_closure) == -1) + goto error; + return (0); + case EVRPC_CONTINUE: + /* we can just continue */ + break; + default: + EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE || + hook_res == EVRPC_CONTINUE || + hook_res == EVRPC_PAUSE); + } + } + + evrpc_schedule_request_closure(ctx, EVRPC_CONTINUE); + return (0); + +error: + memset(&status, 0, sizeof(status)); + status.error = EVRPC_STATUS_ERR_UNSTARTED; + (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg); + evrpc_request_wrapper_free(ctx); + return (-1); +} + +static void +evrpc_schedule_request_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res) +{ + struct evrpc_request_wrapper *ctx = arg; + struct evhttp_connection *connection = ctx->evcon; + struct evhttp_request *req = ctx->req; + struct evrpc_pool *pool = ctx->pool; + struct evrpc_status status; + char *uri = NULL; + int res = 0; + + if (hook_res == EVRPC_TERMINATE) + goto error; + + uri = evrpc_construct_uri(ctx->name); + if (uri == NULL) + goto error; + + if (pool->timeout > 0) { + /* + * a timeout after which the whole rpc is going to be aborted. + */ + struct timeval tv; + evutil_timerclear(&tv); + tv.tv_sec = pool->timeout; + evtimer_add(&ctx->ev_timeout, &tv); + } + + /* start the request over the connection */ + res = evhttp_make_request(connection, req, EVHTTP_REQ_POST, uri); + mm_free(uri); + + if (res == -1) + goto error; + + return; + +error: + memset(&status, 0, sizeof(status)); + status.error = EVRPC_STATUS_ERR_UNSTARTED; + (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg); + evrpc_request_wrapper_free(ctx); +} + +/* we just queue the paused request on the pool under the req object */ +static int +evrpc_pause_request(void *vbase, void *ctx, + void (*cb)(void *, enum EVRPC_HOOK_RESULT)) +{ + struct evrpc_hooks_ *base = vbase; + struct evrpc_hook_ctx *pause = mm_malloc(sizeof(*pause)); + if (pause == NULL) + return (-1); + + pause->ctx = ctx; + pause->cb = cb; + + TAILQ_INSERT_TAIL(&base->pause_requests, pause, next); + return (0); +} + +int +evrpc_resume_request(void *vbase, void *ctx, enum EVRPC_HOOK_RESULT res) +{ + struct evrpc_hooks_ *base = vbase; + struct evrpc_pause_list *head = &base->pause_requests; + struct evrpc_hook_ctx *pause; + + TAILQ_FOREACH(pause, head, next) { + if (pause->ctx == ctx) + break; + } + + if (pause == NULL) + return (-1); + + (*pause->cb)(pause->ctx, res); + TAILQ_REMOVE(head, pause, next); + mm_free(pause); + return (0); +} + +int +evrpc_make_request(struct evrpc_request_wrapper *ctx) +{ + struct evrpc_pool *pool = ctx->pool; + + /* initialize the event structure for this rpc */ + evtimer_assign(&ctx->ev_timeout, pool->base, evrpc_request_timeout, ctx); + + /* we better have some available connections on the pool */ + EVUTIL_ASSERT(TAILQ_FIRST(&pool->connections) != NULL); + + /* + * if no connection is available, we queue the request on the pool, + * the next time a connection is empty, the rpc will be send on that. + */ + TAILQ_INSERT_TAIL(&pool->requests, ctx, next); + + evrpc_pool_schedule(pool); + + return (0); +} + + +struct evrpc_request_wrapper * +evrpc_make_request_ctx( + struct evrpc_pool *pool, void *request, void *reply, + const char *rpcname, + void (*req_marshal)(struct evbuffer*, void *), + void (*rpl_clear)(void *), + int (*rpl_unmarshal)(void *, struct evbuffer *), + void (*cb)(struct evrpc_status *, void *, void *, void *), + void *cbarg) +{ + struct evrpc_request_wrapper *ctx = (struct evrpc_request_wrapper *) + mm_malloc(sizeof(struct evrpc_request_wrapper)); + if (ctx == NULL) + return (NULL); + + ctx->pool = pool; + ctx->hook_meta = NULL; + ctx->evcon = NULL; + ctx->name = mm_strdup(rpcname); + if (ctx->name == NULL) { + mm_free(ctx); + return (NULL); + } + ctx->cb = cb; + ctx->cb_arg = cbarg; + ctx->request = request; + ctx->reply = reply; + ctx->request_marshal = req_marshal; + ctx->reply_clear = rpl_clear; + ctx->reply_unmarshal = rpl_unmarshal; + + return (ctx); +} + +static void +evrpc_reply_done_closure(void *, enum EVRPC_HOOK_RESULT); + +static void +evrpc_reply_done(struct evhttp_request *req, void *arg) +{ + struct evrpc_request_wrapper *ctx = arg; + struct evrpc_pool *pool = ctx->pool; + int hook_res = EVRPC_CONTINUE; + + /* cancel any timeout we might have scheduled */ + event_del(&ctx->ev_timeout); + + ctx->req = req; + + /* we need to get the reply now */ + if (req == NULL) { + evrpc_reply_done_closure(ctx, EVRPC_CONTINUE); + return; + } + + if (TAILQ_FIRST(&pool->input_hooks) != NULL) { + evrpc_hook_associate_meta_(&ctx->hook_meta, ctx->evcon); + + /* apply hooks to the incoming request */ + hook_res = evrpc_process_hooks(&pool->input_hooks, + ctx, req, req->input_buffer); + + switch (hook_res) { + case EVRPC_TERMINATE: + case EVRPC_CONTINUE: + break; + case EVRPC_PAUSE: + /* + * if we get paused we also need to know the + * request. unfortunately, the underlying + * layer is going to free it. we need to + * request ownership explicitly + */ + evhttp_request_own(req); + + evrpc_pause_request(pool, ctx, + evrpc_reply_done_closure); + return; + default: + EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE || + hook_res == EVRPC_CONTINUE || + hook_res == EVRPC_PAUSE); + } + } + + evrpc_reply_done_closure(ctx, hook_res); + + /* http request is being freed by underlying layer */ +} + +static void +evrpc_reply_done_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res) +{ + struct evrpc_request_wrapper *ctx = arg; + struct evhttp_request *req = ctx->req; + struct evrpc_pool *pool = ctx->pool; + struct evrpc_status status; + int res = -1; + + memset(&status, 0, sizeof(status)); + status.http_req = req; + + /* we need to get the reply now */ + if (req == NULL) { + status.error = EVRPC_STATUS_ERR_TIMEOUT; + } else if (hook_res == EVRPC_TERMINATE) { + status.error = EVRPC_STATUS_ERR_HOOKABORTED; + } else { + res = ctx->reply_unmarshal(ctx->reply, req->input_buffer); + if (res == -1) + status.error = EVRPC_STATUS_ERR_BADPAYLOAD; + } + + if (res == -1) { + /* clear everything that we might have written previously */ + ctx->reply_clear(ctx->reply); + } + + (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg); + + evrpc_request_wrapper_free(ctx); + + /* the http layer owned the original request structure, but if we + * got paused, we asked for ownership and need to free it here. */ + if (req != NULL && evhttp_request_is_owned(req)) + evhttp_request_free(req); + + /* see if we can schedule another request */ + evrpc_pool_schedule(pool); +} + +static void +evrpc_pool_schedule(struct evrpc_pool *pool) +{ + struct evrpc_request_wrapper *ctx = TAILQ_FIRST(&pool->requests); + struct evhttp_connection *evcon; + + /* if no requests are pending, we have no work */ + if (ctx == NULL) + return; + + if ((evcon = evrpc_pool_find_connection(pool)) != NULL) { + TAILQ_REMOVE(&pool->requests, ctx, next); + evrpc_schedule_request(evcon, ctx); + } +} + +static void +evrpc_request_timeout(evutil_socket_t fd, short what, void *arg) +{ + struct evrpc_request_wrapper *ctx = arg; + struct evhttp_connection *evcon = ctx->evcon; + EVUTIL_ASSERT(evcon != NULL); + + evhttp_connection_fail_(evcon, EVREQ_HTTP_TIMEOUT); +} + +/* + * frees potential meta data associated with a request. + */ + +static void +evrpc_meta_data_free(struct evrpc_meta_list *meta_data) +{ + struct evrpc_meta *entry; + EVUTIL_ASSERT(meta_data != NULL); + + while ((entry = TAILQ_FIRST(meta_data)) != NULL) { + TAILQ_REMOVE(meta_data, entry, next); + mm_free(entry->key); + mm_free(entry->data); + mm_free(entry); + } +} + +static struct evrpc_hook_meta * +evrpc_hook_meta_new_(void) +{ + struct evrpc_hook_meta *ctx; + ctx = mm_malloc(sizeof(struct evrpc_hook_meta)); + EVUTIL_ASSERT(ctx != NULL); + + TAILQ_INIT(&ctx->meta_data); + ctx->evcon = NULL; + + return (ctx); +} + +static void +evrpc_hook_associate_meta_(struct evrpc_hook_meta **pctx, + struct evhttp_connection *evcon) +{ + struct evrpc_hook_meta *ctx = *pctx; + if (ctx == NULL) + *pctx = ctx = evrpc_hook_meta_new_(); + ctx->evcon = evcon; +} + +static void +evrpc_hook_context_free_(struct evrpc_hook_meta *ctx) +{ + evrpc_meta_data_free(&ctx->meta_data); + mm_free(ctx); +} + +/* Adds meta data */ +void +evrpc_hook_add_meta(void *ctx, const char *key, + const void *data, size_t data_size) +{ + struct evrpc_request_wrapper *req = ctx; + struct evrpc_hook_meta *store = NULL; + struct evrpc_meta *meta = NULL; + + if ((store = req->hook_meta) == NULL) + store = req->hook_meta = evrpc_hook_meta_new_(); + + meta = mm_malloc(sizeof(struct evrpc_meta)); + EVUTIL_ASSERT(meta != NULL); + meta->key = mm_strdup(key); + EVUTIL_ASSERT(meta->key != NULL); + meta->data_size = data_size; + meta->data = mm_malloc(data_size); + EVUTIL_ASSERT(meta->data != NULL); + memcpy(meta->data, data, data_size); + + TAILQ_INSERT_TAIL(&store->meta_data, meta, next); +} + +int +evrpc_hook_find_meta(void *ctx, const char *key, void **data, size_t *data_size) +{ + struct evrpc_request_wrapper *req = ctx; + struct evrpc_meta *meta = NULL; + + if (req->hook_meta == NULL) + return (-1); + + TAILQ_FOREACH(meta, &req->hook_meta->meta_data, next) { + if (strcmp(meta->key, key) == 0) { + *data = meta->data; + *data_size = meta->data_size; + return (0); + } + } + + return (-1); +} + +struct evhttp_connection * +evrpc_hook_get_connection(void *ctx) +{ + struct evrpc_request_wrapper *req = ctx; + return (req->hook_meta != NULL ? req->hook_meta->evcon : NULL); +} + +int +evrpc_send_request_generic(struct evrpc_pool *pool, + void *request, void *reply, + void (*cb)(struct evrpc_status *, void *, void *, void *), + void *cb_arg, + const char *rpcname, + void (*req_marshal)(struct evbuffer *, void *), + void (*rpl_clear)(void *), + int (*rpl_unmarshal)(void *, struct evbuffer *)) +{ + struct evrpc_status status; + struct evrpc_request_wrapper *ctx; + ctx = evrpc_make_request_ctx(pool, request, reply, + rpcname, req_marshal, rpl_clear, rpl_unmarshal, cb, cb_arg); + if (ctx == NULL) + goto error; + return (evrpc_make_request(ctx)); +error: + memset(&status, 0, sizeof(status)); + status.error = EVRPC_STATUS_ERR_UNSTARTED; + (*(cb))(&status, request, reply, cb_arg); + return (-1); +} + +/** Takes a request object and fills it in with the right magic */ +static struct evrpc * +evrpc_register_object(const char *name, + void *(*req_new)(void*), void *req_new_arg, void (*req_free)(void *), + int (*req_unmarshal)(void *, struct evbuffer *), + void *(*rpl_new)(void*), void *rpl_new_arg, void (*rpl_free)(void *), + int (*rpl_complete)(void *), + void (*rpl_marshal)(struct evbuffer *, void *)) +{ + struct evrpc* rpc = (struct evrpc *)mm_calloc(1, sizeof(struct evrpc)); + if (rpc == NULL) + return (NULL); + rpc->uri = mm_strdup(name); + if (rpc->uri == NULL) { + mm_free(rpc); + return (NULL); + } + rpc->request_new = req_new; + rpc->request_new_arg = req_new_arg; + rpc->request_free = req_free; + rpc->request_unmarshal = req_unmarshal; + rpc->reply_new = rpl_new; + rpc->reply_new_arg = rpl_new_arg; + rpc->reply_free = rpl_free; + rpc->reply_complete = rpl_complete; + rpc->reply_marshal = rpl_marshal; + return (rpc); +} + +int +evrpc_register_generic(struct evrpc_base *base, const char *name, + void (*callback)(struct evrpc_req_generic *, void *), void *cbarg, + void *(*req_new)(void *), void *req_new_arg, void (*req_free)(void *), + int (*req_unmarshal)(void *, struct evbuffer *), + void *(*rpl_new)(void *), void *rpl_new_arg, void (*rpl_free)(void *), + int (*rpl_complete)(void *), + void (*rpl_marshal)(struct evbuffer *, void *)) +{ + struct evrpc* rpc = + evrpc_register_object(name, req_new, req_new_arg, req_free, req_unmarshal, + rpl_new, rpl_new_arg, rpl_free, rpl_complete, rpl_marshal); + if (rpc == NULL) + return (-1); + evrpc_register_rpc(base, rpc, + (void (*)(struct evrpc_req_generic*, void *))callback, cbarg); + return (0); +} + +/** accessors for obscure and undocumented functionality */ +struct evrpc_pool * +evrpc_request_get_pool(struct evrpc_request_wrapper *ctx) +{ + return (ctx->pool); +} + +void +evrpc_request_set_pool(struct evrpc_request_wrapper *ctx, + struct evrpc_pool *pool) +{ + ctx->pool = pool; +} + +void +evrpc_request_set_cb(struct evrpc_request_wrapper *ctx, + void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg), + void *cb_arg) +{ + ctx->cb = cb; + ctx->cb_arg = cb_arg; +} diff --git a/probe-busybox/libevent-2.1.11-stable/evsignal-internal.h b/probe-busybox/libevent-2.1.11-stable/evsignal-internal.h new file mode 100644 index 00000000..5cff03b5 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evsignal-internal.h @@ -0,0 +1,65 @@ +/* + * Copyright 2000-2007 Niels Provos + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVSIGNAL_INTERNAL_H_INCLUDED_ +#define EVSIGNAL_INTERNAL_H_INCLUDED_ + +#ifndef evutil_socket_t +#include "event2/util.h" +#endif +#include + +typedef void (*ev_sighandler_t)(int); + +/* Data structure for the default signal-handling implementation in signal.c + */ +struct evsig_info { + /* Event watching ev_signal_pair[1] */ + struct event ev_signal; + /* Socketpair used to send notifications from the signal handler */ + evutil_socket_t ev_signal_pair[2]; + /* True iff we've added the ev_signal event yet. */ + int ev_signal_added; + /* Count of the number of signals we're currently watching. */ + int ev_n_signals_added; + + /* Array of previous signal handler objects before Libevent started + * messing with them. Used to restore old signal handlers. */ +#ifdef EVENT__HAVE_SIGACTION + struct sigaction **sh_old; +#else + ev_sighandler_t **sh_old; +#endif + /* Size of sh_old. */ + int sh_old_max; +}; +int evsig_init_(struct event_base *); +void evsig_dealloc_(struct event_base *); + +void evsig_set_base_(struct event_base *base); +void evsig_free_globals_(void); + +#endif /* EVSIGNAL_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/evthread-internal.h b/probe-busybox/libevent-2.1.11-stable/evthread-internal.h new file mode 100644 index 00000000..5fdf3161 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evthread-internal.h @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVTHREAD_INTERNAL_H_INCLUDED_ +#define EVTHREAD_INTERNAL_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include "event2/thread.h" +#include "util-internal.h" + +struct event_base; + +#ifndef _WIN32 +/* On Windows, the way we currently make DLLs, it's not allowed for us to + * have shared global structures. Thus, we only do the direct-call-to-function + * code path if we know that the local shared library system supports it. + */ +#define EVTHREAD_EXPOSE_STRUCTS +#endif + +#if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS) +/* Global function pointers to lock-related functions. NULL if locking isn't + enabled. */ +EVENT2_EXPORT_SYMBOL +extern struct evthread_lock_callbacks evthread_lock_fns_; +EVENT2_EXPORT_SYMBOL +extern struct evthread_condition_callbacks evthread_cond_fns_; +extern unsigned long (*evthread_id_fn_)(void); +EVENT2_EXPORT_SYMBOL +extern int evthread_lock_debugging_enabled_; + +/** Return the ID of the current thread, or 1 if threading isn't enabled. */ +#define EVTHREAD_GET_ID() \ + (evthread_id_fn_ ? evthread_id_fn_() : 1) + +/** Return true iff we're in the thread that is currently (or most recently) + * running a given event_base's loop. Requires lock. */ +#define EVBASE_IN_THREAD(base) \ + (evthread_id_fn_ == NULL || \ + (base)->th_owner_id == evthread_id_fn_()) + +/** Return true iff we need to notify the base's main thread about changes to + * its state, because it's currently running the main loop in another + * thread. Requires lock. */ +#define EVBASE_NEED_NOTIFY(base) \ + (evthread_id_fn_ != NULL && \ + (base)->running_loop && \ + (base)->th_owner_id != evthread_id_fn_()) + +/** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to + NULL if locking is not enabled. */ +#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \ + ((lockvar) = evthread_lock_fns_.alloc ? \ + evthread_lock_fns_.alloc(locktype) : NULL) + +/** Free a given lock, if it is present and locking is enabled. */ +#define EVTHREAD_FREE_LOCK(lockvar, locktype) \ + do { \ + void *lock_tmp_ = (lockvar); \ + if (lock_tmp_ && evthread_lock_fns_.free) \ + evthread_lock_fns_.free(lock_tmp_, (locktype)); \ + } while (0) + +/** Acquire a lock. */ +#define EVLOCK_LOCK(lockvar,mode) \ + do { \ + if (lockvar) \ + evthread_lock_fns_.lock(mode, lockvar); \ + } while (0) + +/** Release a lock */ +#define EVLOCK_UNLOCK(lockvar,mode) \ + do { \ + if (lockvar) \ + evthread_lock_fns_.unlock(mode, lockvar); \ + } while (0) + +/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */ +#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \ + do { \ + if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \ + void *tmp = lockvar1; \ + lockvar1 = lockvar2; \ + lockvar2 = tmp; \ + } \ + } while (0) + +/** Lock an event_base, if it is set up for locking. Acquires the lock + in the base structure whose field is named 'lockvar'. */ +#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \ + EVLOCK_LOCK((base)->lockvar, 0); \ + } while (0) + +/** Unlock an event_base, if it is set up for locking. */ +#define EVBASE_RELEASE_LOCK(base, lockvar) do { \ + EVLOCK_UNLOCK((base)->lockvar, 0); \ + } while (0) + +/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is + * locked and held by us. */ +#define EVLOCK_ASSERT_LOCKED(lock) \ + do { \ + if ((lock) && evthread_lock_debugging_enabled_) { \ + EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \ + } \ + } while (0) + +/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we + * manage to get it. */ +static inline int EVLOCK_TRY_LOCK_(void *lock); +static inline int +EVLOCK_TRY_LOCK_(void *lock) +{ + if (lock && evthread_lock_fns_.lock) { + int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock); + return !r; + } else { + /* Locking is disabled either globally or for this thing; + * of course we count as having the lock. */ + return 1; + } +} + +/** Allocate a new condition variable and store it in the void *, condvar */ +#define EVTHREAD_ALLOC_COND(condvar) \ + do { \ + (condvar) = evthread_cond_fns_.alloc_condition ? \ + evthread_cond_fns_.alloc_condition(0) : NULL; \ + } while (0) +/** Deallocate and free a condition variable in condvar */ +#define EVTHREAD_FREE_COND(cond) \ + do { \ + if (cond) \ + evthread_cond_fns_.free_condition((cond)); \ + } while (0) +/** Signal one thread waiting on cond */ +#define EVTHREAD_COND_SIGNAL(cond) \ + ( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 ) +/** Signal all threads waiting on cond */ +#define EVTHREAD_COND_BROADCAST(cond) \ + ( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 ) +/** Wait until the condition 'cond' is signalled. Must be called while + * holding 'lock'. The lock will be released until the condition is + * signalled, at which point it will be acquired again. Returns 0 for + * success, -1 for failure. */ +#define EVTHREAD_COND_WAIT(cond, lock) \ + ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 ) +/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1 + * on timeout. */ +#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \ + ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 ) + +/** True iff locking functions have been configured. */ +#define EVTHREAD_LOCKING_ENABLED() \ + (evthread_lock_fns_.lock != NULL) + +#elif ! defined(EVENT__DISABLE_THREAD_SUPPORT) + +unsigned long evthreadimpl_get_id_(void); +EVENT2_EXPORT_SYMBOL +int evthreadimpl_is_lock_debugging_enabled_(void); +EVENT2_EXPORT_SYMBOL +void *evthreadimpl_lock_alloc_(unsigned locktype); +EVENT2_EXPORT_SYMBOL +void evthreadimpl_lock_free_(void *lock, unsigned locktype); +EVENT2_EXPORT_SYMBOL +int evthreadimpl_lock_lock_(unsigned mode, void *lock); +EVENT2_EXPORT_SYMBOL +int evthreadimpl_lock_unlock_(unsigned mode, void *lock); +EVENT2_EXPORT_SYMBOL +void *evthreadimpl_cond_alloc_(unsigned condtype); +EVENT2_EXPORT_SYMBOL +void evthreadimpl_cond_free_(void *cond); +EVENT2_EXPORT_SYMBOL +int evthreadimpl_cond_signal_(void *cond, int broadcast); +EVENT2_EXPORT_SYMBOL +int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv); +int evthreadimpl_locking_enabled_(void); + +#define EVTHREAD_GET_ID() evthreadimpl_get_id_() +#define EVBASE_IN_THREAD(base) \ + ((base)->th_owner_id == evthreadimpl_get_id_()) +#define EVBASE_NEED_NOTIFY(base) \ + ((base)->running_loop && \ + ((base)->th_owner_id != evthreadimpl_get_id_())) + +#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \ + ((lockvar) = evthreadimpl_lock_alloc_(locktype)) + +#define EVTHREAD_FREE_LOCK(lockvar, locktype) \ + do { \ + void *lock_tmp_ = (lockvar); \ + if (lock_tmp_) \ + evthreadimpl_lock_free_(lock_tmp_, (locktype)); \ + } while (0) + +/** Acquire a lock. */ +#define EVLOCK_LOCK(lockvar,mode) \ + do { \ + if (lockvar) \ + evthreadimpl_lock_lock_(mode, lockvar); \ + } while (0) + +/** Release a lock */ +#define EVLOCK_UNLOCK(lockvar,mode) \ + do { \ + if (lockvar) \ + evthreadimpl_lock_unlock_(mode, lockvar); \ + } while (0) + +/** Lock an event_base, if it is set up for locking. Acquires the lock + in the base structure whose field is named 'lockvar'. */ +#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \ + EVLOCK_LOCK((base)->lockvar, 0); \ + } while (0) + +/** Unlock an event_base, if it is set up for locking. */ +#define EVBASE_RELEASE_LOCK(base, lockvar) do { \ + EVLOCK_UNLOCK((base)->lockvar, 0); \ + } while (0) + +/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is + * locked and held by us. */ +#define EVLOCK_ASSERT_LOCKED(lock) \ + do { \ + if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \ + EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \ + } \ + } while (0) + +/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we + * manage to get it. */ +static inline int EVLOCK_TRY_LOCK_(void *lock); +static inline int +EVLOCK_TRY_LOCK_(void *lock) +{ + if (lock) { + int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock); + return !r; + } else { + /* Locking is disabled either globally or for this thing; + * of course we count as having the lock. */ + return 1; + } +} + +/** Allocate a new condition variable and store it in the void *, condvar */ +#define EVTHREAD_ALLOC_COND(condvar) \ + do { \ + (condvar) = evthreadimpl_cond_alloc_(0); \ + } while (0) +/** Deallocate and free a condition variable in condvar */ +#define EVTHREAD_FREE_COND(cond) \ + do { \ + if (cond) \ + evthreadimpl_cond_free_((cond)); \ + } while (0) +/** Signal one thread waiting on cond */ +#define EVTHREAD_COND_SIGNAL(cond) \ + ( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 ) +/** Signal all threads waiting on cond */ +#define EVTHREAD_COND_BROADCAST(cond) \ + ( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 ) +/** Wait until the condition 'cond' is signalled. Must be called while + * holding 'lock'. The lock will be released until the condition is + * signalled, at which point it will be acquired again. Returns 0 for + * success, -1 for failure. */ +#define EVTHREAD_COND_WAIT(cond, lock) \ + ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 ) +/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1 + * on timeout. */ +#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \ + ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 ) + +#define EVTHREAD_LOCKING_ENABLED() \ + (evthreadimpl_locking_enabled_()) + +#else /* EVENT__DISABLE_THREAD_SUPPORT */ + +#define EVTHREAD_GET_ID() 1 +#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_ +#define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_ + +#define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_ +#define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_ +#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_ +#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_ + +#define EVBASE_IN_THREAD(base) 1 +#define EVBASE_NEED_NOTIFY(base) 0 +#define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_ +#define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_ +#define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_ + +#define EVLOCK_TRY_LOCK_(lock) 1 + +#define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_ +#define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_ +#define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_ +#define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_ +#define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_ +#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_ + +#define EVTHREAD_LOCKING_ENABLED() 0 + +#endif + +/* This code is shared between both lock impls */ +#if ! defined(EVENT__DISABLE_THREAD_SUPPORT) +/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */ +#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \ + do { \ + if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \ + void *tmp = lockvar1; \ + lockvar1 = lockvar2; \ + lockvar2 = tmp; \ + } \ + } while (0) + +/** Acquire both lock1 and lock2. Always allocates locks in the same order, + * so that two threads locking two locks with LOCK2 will not deadlock. */ +#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \ + do { \ + void *lock1_tmplock_ = (lock1); \ + void *lock2_tmplock_ = (lock2); \ + EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \ + EVLOCK_LOCK(lock1_tmplock_,mode1); \ + if (lock2_tmplock_ != lock1_tmplock_) \ + EVLOCK_LOCK(lock2_tmplock_,mode2); \ + } while (0) +/** Release both lock1 and lock2. */ +#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \ + do { \ + void *lock1_tmplock_ = (lock1); \ + void *lock2_tmplock_ = (lock2); \ + EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \ + if (lock2_tmplock_ != lock1_tmplock_) \ + EVLOCK_UNLOCK(lock2_tmplock_,mode2); \ + EVLOCK_UNLOCK(lock1_tmplock_,mode1); \ + } while (0) + +EVENT2_EXPORT_SYMBOL +int evthread_is_debug_lock_held_(void *lock); +void *evthread_debug_get_real_lock_(void *lock); + +void *evthread_setup_global_lock_(void *lock_, unsigned locktype, + int enable_locks); + +#define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \ + do { \ + lockvar = evthread_setup_global_lock_(lockvar, \ + (locktype), enable_locks); \ + if (!lockvar) { \ + event_warn("Couldn't allocate %s", #lockvar); \ + return -1; \ + } \ + } while (0); + +int event_global_setup_locks_(const int enable_locks); +int evsig_global_setup_locks_(const int enable_locks); +int evutil_global_setup_locks_(const int enable_locks); +int evutil_secure_rng_global_setup_locks_(const int enable_locks); + +/** Return current evthread_lock_callbacks */ +EVENT2_EXPORT_SYMBOL +struct evthread_lock_callbacks *evthread_get_lock_callbacks(void); +/** Return current evthread_condition_callbacks */ +struct evthread_condition_callbacks *evthread_get_condition_callbacks(void); +/** Disable locking for internal usage (like global shutdown) */ +void evthreadimpl_disable_lock_debugging_(void); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVTHREAD_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/evthread.c b/probe-busybox/libevent-2.1.11-stable/evthread.c new file mode 100644 index 00000000..3eac594d --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evthread.c @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + +#include "event2/thread.h" + +#include +#include + +#include "log-internal.h" +#include "mm-internal.h" +#include "util-internal.h" +#include "evthread-internal.h" + +#ifdef EVTHREAD_EXPOSE_STRUCTS +#define GLOBAL +#else +#define GLOBAL static +#endif + +#ifndef EVENT__DISABLE_DEBUG_MODE +extern int event_debug_created_threadable_ctx_; +extern int event_debug_mode_on_; +#endif + +/* globals */ +GLOBAL int evthread_lock_debugging_enabled_ = 0; +GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = { + 0, 0, NULL, NULL, NULL, NULL +}; +GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL; +GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = { + 0, NULL, NULL, NULL, NULL +}; + +/* Used for debugging */ +static struct evthread_lock_callbacks original_lock_fns_ = { + 0, 0, NULL, NULL, NULL, NULL +}; +static struct evthread_condition_callbacks original_cond_fns_ = { + 0, NULL, NULL, NULL, NULL +}; + +void +evthread_set_id_callback(unsigned long (*id_fn)(void)) +{ + evthread_id_fn_ = id_fn; +} + +struct evthread_lock_callbacks *evthread_get_lock_callbacks() +{ + return evthread_lock_debugging_enabled_ + ? &original_lock_fns_ : &evthread_lock_fns_; +} +struct evthread_condition_callbacks *evthread_get_condition_callbacks() +{ + return evthread_lock_debugging_enabled_ + ? &original_cond_fns_ : &evthread_cond_fns_; +} +void evthreadimpl_disable_lock_debugging_(void) +{ + evthread_lock_debugging_enabled_ = 0; +} + +int +evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs) +{ + struct evthread_lock_callbacks *target = evthread_get_lock_callbacks(); + +#ifndef EVENT__DISABLE_DEBUG_MODE + if (event_debug_mode_on_) { + if (event_debug_created_threadable_ctx_) { + event_errx(1, "evthread initialization must be called BEFORE anything else!"); + } + } +#endif + + if (!cbs) { + if (target->alloc) + event_warnx("Trying to disable lock functions after " + "they have been set up will probaby not work."); + memset(target, 0, sizeof(evthread_lock_fns_)); + return 0; + } + if (target->alloc) { + /* Uh oh; we already had locking callbacks set up.*/ + if (target->lock_api_version == cbs->lock_api_version && + target->supported_locktypes == cbs->supported_locktypes && + target->alloc == cbs->alloc && + target->free == cbs->free && + target->lock == cbs->lock && + target->unlock == cbs->unlock) { + /* no change -- allow this. */ + return 0; + } + event_warnx("Can't change lock callbacks once they have been " + "initialized."); + return -1; + } + if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) { + memcpy(target, cbs, sizeof(evthread_lock_fns_)); + return event_global_setup_locks_(1); + } else { + return -1; + } +} + +int +evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs) +{ + struct evthread_condition_callbacks *target = evthread_get_condition_callbacks(); + +#ifndef EVENT__DISABLE_DEBUG_MODE + if (event_debug_mode_on_) { + if (event_debug_created_threadable_ctx_) { + event_errx(1, "evthread initialization must be called BEFORE anything else!"); + } + } +#endif + + if (!cbs) { + if (target->alloc_condition) + event_warnx("Trying to disable condition functions " + "after they have been set up will probaby not " + "work."); + memset(target, 0, sizeof(evthread_cond_fns_)); + return 0; + } + if (target->alloc_condition) { + /* Uh oh; we already had condition callbacks set up.*/ + if (target->condition_api_version == cbs->condition_api_version && + target->alloc_condition == cbs->alloc_condition && + target->free_condition == cbs->free_condition && + target->signal_condition == cbs->signal_condition && + target->wait_condition == cbs->wait_condition) { + /* no change -- allow this. */ + return 0; + } + event_warnx("Can't change condition callbacks once they " + "have been initialized."); + return -1; + } + if (cbs->alloc_condition && cbs->free_condition && + cbs->signal_condition && cbs->wait_condition) { + memcpy(target, cbs, sizeof(evthread_cond_fns_)); + } + if (evthread_lock_debugging_enabled_) { + evthread_cond_fns_.alloc_condition = cbs->alloc_condition; + evthread_cond_fns_.free_condition = cbs->free_condition; + evthread_cond_fns_.signal_condition = cbs->signal_condition; + } + return 0; +} + +#define DEBUG_LOCK_SIG 0xdeb0b10c + +struct debug_lock { + unsigned signature; + unsigned locktype; + unsigned long held_by; + /* XXXX if we ever use read-write locks, we will need a separate + * lock to protect count. */ + int count; + void *lock; +}; + +static void * +debug_lock_alloc(unsigned locktype) +{ + struct debug_lock *result = mm_malloc(sizeof(struct debug_lock)); + if (!result) + return NULL; + if (original_lock_fns_.alloc) { + if (!(result->lock = original_lock_fns_.alloc( + locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) { + mm_free(result); + return NULL; + } + } else { + result->lock = NULL; + } + result->signature = DEBUG_LOCK_SIG; + result->locktype = locktype; + result->count = 0; + result->held_by = 0; + return result; +} + +static void +debug_lock_free(void *lock_, unsigned locktype) +{ + struct debug_lock *lock = lock_; + EVUTIL_ASSERT(lock->count == 0); + EVUTIL_ASSERT(locktype == lock->locktype); + EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature); + if (original_lock_fns_.free) { + original_lock_fns_.free(lock->lock, + lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE); + } + lock->lock = NULL; + lock->count = -100; + lock->signature = 0x12300fda; + mm_free(lock); +} + +static void +evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock) +{ + EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature); + ++lock->count; + if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) + EVUTIL_ASSERT(lock->count == 1); + if (evthread_id_fn_) { + unsigned long me; + me = evthread_id_fn_(); + if (lock->count > 1) + EVUTIL_ASSERT(lock->held_by == me); + lock->held_by = me; + } +} + +static int +debug_lock_lock(unsigned mode, void *lock_) +{ + struct debug_lock *lock = lock_; + int res = 0; + if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE) + EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE)); + else + EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0); + if (original_lock_fns_.lock) + res = original_lock_fns_.lock(mode, lock->lock); + if (!res) { + evthread_debug_lock_mark_locked(mode, lock); + } + return res; +} + +static void +evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock) +{ + EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature); + if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE) + EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE)); + else + EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0); + if (evthread_id_fn_) { + unsigned long me; + me = evthread_id_fn_(); + EVUTIL_ASSERT(lock->held_by == me); + if (lock->count == 1) + lock->held_by = 0; + } + --lock->count; + EVUTIL_ASSERT(lock->count >= 0); +} + +static int +debug_lock_unlock(unsigned mode, void *lock_) +{ + struct debug_lock *lock = lock_; + int res = 0; + evthread_debug_lock_mark_unlocked(mode, lock); + if (original_lock_fns_.unlock) + res = original_lock_fns_.unlock(mode, lock->lock); + return res; +} + +static int +debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv) +{ + int r; + struct debug_lock *lock = lock_; + EVUTIL_ASSERT(lock); + EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature); + EVLOCK_ASSERT_LOCKED(lock_); + evthread_debug_lock_mark_unlocked(0, lock); + r = original_cond_fns_.wait_condition(cond_, lock->lock, tv); + evthread_debug_lock_mark_locked(0, lock); + return r; +} + +/* misspelled version for backward compatibility */ +void +evthread_enable_lock_debuging(void) +{ + evthread_enable_lock_debugging(); +} + +void +evthread_enable_lock_debugging(void) +{ + struct evthread_lock_callbacks cbs = { + EVTHREAD_LOCK_API_VERSION, + EVTHREAD_LOCKTYPE_RECURSIVE, + debug_lock_alloc, + debug_lock_free, + debug_lock_lock, + debug_lock_unlock + }; + if (evthread_lock_debugging_enabled_) + return; + memcpy(&original_lock_fns_, &evthread_lock_fns_, + sizeof(struct evthread_lock_callbacks)); + memcpy(&evthread_lock_fns_, &cbs, + sizeof(struct evthread_lock_callbacks)); + + memcpy(&original_cond_fns_, &evthread_cond_fns_, + sizeof(struct evthread_condition_callbacks)); + evthread_cond_fns_.wait_condition = debug_cond_wait; + evthread_lock_debugging_enabled_ = 1; + + /* XXX return value should get checked. */ + event_global_setup_locks_(0); +} + +int +evthread_is_debug_lock_held_(void *lock_) +{ + struct debug_lock *lock = lock_; + if (! lock->count) + return 0; + if (evthread_id_fn_) { + unsigned long me = evthread_id_fn_(); + if (lock->held_by != me) + return 0; + } + return 1; +} + +void * +evthread_debug_get_real_lock_(void *lock_) +{ + struct debug_lock *lock = lock_; + return lock->lock; +} + +void * +evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks) +{ + /* there are four cases here: + 1) we're turning on debugging; locking is not on. + 2) we're turning on debugging; locking is on. + 3) we're turning on locking; debugging is not on. + 4) we're turning on locking; debugging is on. */ + + if (!enable_locks && original_lock_fns_.alloc == NULL) { + /* Case 1: allocate a debug lock. */ + EVUTIL_ASSERT(lock_ == NULL); + return debug_lock_alloc(locktype); + } else if (!enable_locks && original_lock_fns_.alloc != NULL) { + /* Case 2: wrap the lock in a debug lock. */ + struct debug_lock *lock; + EVUTIL_ASSERT(lock_ != NULL); + + if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) { + /* We can't wrap it: We need a recursive lock */ + original_lock_fns_.free(lock_, locktype); + return debug_lock_alloc(locktype); + } + lock = mm_malloc(sizeof(struct debug_lock)); + if (!lock) { + original_lock_fns_.free(lock_, locktype); + return NULL; + } + lock->lock = lock_; + lock->locktype = locktype; + lock->count = 0; + lock->held_by = 0; + return lock; + } else if (enable_locks && ! evthread_lock_debugging_enabled_) { + /* Case 3: allocate a regular lock */ + EVUTIL_ASSERT(lock_ == NULL); + return evthread_lock_fns_.alloc(locktype); + } else { + /* Case 4: Fill in a debug lock with a real lock */ + struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype); + EVUTIL_ASSERT(enable_locks && + evthread_lock_debugging_enabled_); + EVUTIL_ASSERT(lock->locktype == locktype); + if (!lock->lock) { + lock->lock = original_lock_fns_.alloc( + locktype|EVTHREAD_LOCKTYPE_RECURSIVE); + if (!lock->lock) { + lock->count = -200; + mm_free(lock); + return NULL; + } + } + return lock; + } +} + + +#ifndef EVTHREAD_EXPOSE_STRUCTS +unsigned long +evthreadimpl_get_id_() +{ + return evthread_id_fn_ ? evthread_id_fn_() : 1; +} +void * +evthreadimpl_lock_alloc_(unsigned locktype) +{ +#ifndef EVENT__DISABLE_DEBUG_MODE + if (event_debug_mode_on_) { + event_debug_created_threadable_ctx_ = 1; + } +#endif + + return evthread_lock_fns_.alloc ? + evthread_lock_fns_.alloc(locktype) : NULL; +} +void +evthreadimpl_lock_free_(void *lock, unsigned locktype) +{ + if (evthread_lock_fns_.free) + evthread_lock_fns_.free(lock, locktype); +} +int +evthreadimpl_lock_lock_(unsigned mode, void *lock) +{ + if (evthread_lock_fns_.lock) + return evthread_lock_fns_.lock(mode, lock); + else + return 0; +} +int +evthreadimpl_lock_unlock_(unsigned mode, void *lock) +{ + if (evthread_lock_fns_.unlock) + return evthread_lock_fns_.unlock(mode, lock); + else + return 0; +} +void * +evthreadimpl_cond_alloc_(unsigned condtype) +{ +#ifndef EVENT__DISABLE_DEBUG_MODE + if (event_debug_mode_on_) { + event_debug_created_threadable_ctx_ = 1; + } +#endif + + return evthread_cond_fns_.alloc_condition ? + evthread_cond_fns_.alloc_condition(condtype) : NULL; +} +void +evthreadimpl_cond_free_(void *cond) +{ + if (evthread_cond_fns_.free_condition) + evthread_cond_fns_.free_condition(cond); +} +int +evthreadimpl_cond_signal_(void *cond, int broadcast) +{ + if (evthread_cond_fns_.signal_condition) + return evthread_cond_fns_.signal_condition(cond, broadcast); + else + return 0; +} +int +evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv) +{ + if (evthread_cond_fns_.wait_condition) + return evthread_cond_fns_.wait_condition(cond, lock, tv); + else + return 0; +} +int +evthreadimpl_is_lock_debugging_enabled_(void) +{ + return evthread_lock_debugging_enabled_; +} + +int +evthreadimpl_locking_enabled_(void) +{ + return evthread_lock_fns_.lock != NULL; +} +#endif + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/evthread_pthread.c b/probe-busybox/libevent-2.1.11-stable/evthread_pthread.c new file mode 100644 index 00000000..4e11f749 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evthread_pthread.c @@ -0,0 +1,191 @@ +/* + * Copyright 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +/* With glibc we need to define _GNU_SOURCE to get PTHREAD_MUTEX_RECURSIVE. + * This comes from evconfig-private.h + */ +#include + +struct event_base; +#include "event2/thread.h" + +#include +#include +#include "mm-internal.h" +#include "evthread-internal.h" + +static pthread_mutexattr_t attr_recursive; + +static void * +evthread_posix_lock_alloc(unsigned locktype) +{ + pthread_mutexattr_t *attr = NULL; + pthread_mutex_t *lock = mm_malloc(sizeof(pthread_mutex_t)); + if (!lock) + return NULL; + if (locktype & EVTHREAD_LOCKTYPE_RECURSIVE) + attr = &attr_recursive; + if (pthread_mutex_init(lock, attr)) { + mm_free(lock); + return NULL; + } + return lock; +} + +static void +evthread_posix_lock_free(void *lock_, unsigned locktype) +{ + pthread_mutex_t *lock = lock_; + pthread_mutex_destroy(lock); + mm_free(lock); +} + +static int +evthread_posix_lock(unsigned mode, void *lock_) +{ + pthread_mutex_t *lock = lock_; + if (mode & EVTHREAD_TRY) + return pthread_mutex_trylock(lock); + else + return pthread_mutex_lock(lock); +} + +static int +evthread_posix_unlock(unsigned mode, void *lock_) +{ + pthread_mutex_t *lock = lock_; + return pthread_mutex_unlock(lock); +} + +static unsigned long +evthread_posix_get_id(void) +{ + union { + pthread_t thr; +#if EVENT__SIZEOF_PTHREAD_T > EVENT__SIZEOF_LONG + ev_uint64_t id; +#else + unsigned long id; +#endif + } r; +#if EVENT__SIZEOF_PTHREAD_T < EVENT__SIZEOF_LONG + memset(&r, 0, sizeof(r)); +#endif + r.thr = pthread_self(); + return (unsigned long)r.id; +} + +static void * +evthread_posix_cond_alloc(unsigned condflags) +{ + pthread_cond_t *cond = mm_malloc(sizeof(pthread_cond_t)); + if (!cond) + return NULL; + if (pthread_cond_init(cond, NULL)) { + mm_free(cond); + return NULL; + } + return cond; +} + +static void +evthread_posix_cond_free(void *cond_) +{ + pthread_cond_t *cond = cond_; + pthread_cond_destroy(cond); + mm_free(cond); +} + +static int +evthread_posix_cond_signal(void *cond_, int broadcast) +{ + pthread_cond_t *cond = cond_; + int r; + if (broadcast) + r = pthread_cond_broadcast(cond); + else + r = pthread_cond_signal(cond); + return r ? -1 : 0; +} + +static int +evthread_posix_cond_wait(void *cond_, void *lock_, const struct timeval *tv) +{ + int r; + pthread_cond_t *cond = cond_; + pthread_mutex_t *lock = lock_; + + if (tv) { + struct timeval now, abstime; + struct timespec ts; + evutil_gettimeofday(&now, NULL); + evutil_timeradd(&now, tv, &abstime); + ts.tv_sec = abstime.tv_sec; + ts.tv_nsec = abstime.tv_usec*1000; + r = pthread_cond_timedwait(cond, lock, &ts); + if (r == ETIMEDOUT) + return 1; + else if (r) + return -1; + else + return 0; + } else { + r = pthread_cond_wait(cond, lock); + return r ? -1 : 0; + } +} + +int +evthread_use_pthreads(void) +{ + struct evthread_lock_callbacks cbs = { + EVTHREAD_LOCK_API_VERSION, + EVTHREAD_LOCKTYPE_RECURSIVE, + evthread_posix_lock_alloc, + evthread_posix_lock_free, + evthread_posix_lock, + evthread_posix_unlock + }; + struct evthread_condition_callbacks cond_cbs = { + EVTHREAD_CONDITION_API_VERSION, + evthread_posix_cond_alloc, + evthread_posix_cond_free, + evthread_posix_cond_signal, + evthread_posix_cond_wait + }; + /* Set ourselves up to get recursive locks. */ + if (pthread_mutexattr_init(&attr_recursive)) + return -1; + if (pthread_mutexattr_settype(&attr_recursive, PTHREAD_MUTEX_RECURSIVE)) + return -1; + + evthread_set_lock_callbacks(&cbs); + evthread_set_condition_callbacks(&cond_cbs); + evthread_set_id_callback(evthread_posix_get_id); + return 0; +} diff --git a/probe-busybox/libevent-2.1.11-stable/evthread_win32.c b/probe-busybox/libevent-2.1.11-stable/evthread_win32.c new file mode 100644 index 00000000..2ec80560 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evthread_win32.c @@ -0,0 +1,341 @@ +/* + * Copyright 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef _WIN32 +#ifndef _WIN32_WINNT +/* Minimum required for InitializeCriticalSectionAndSpinCount */ +#define _WIN32_WINNT 0x0403 +#endif +#include +#define WIN32_LEAN_AND_MEAN +#include +#undef WIN32_LEAN_AND_MEAN +#include +#endif + +struct event_base; +#include "event2/thread.h" + +#include "mm-internal.h" +#include "evthread-internal.h" +#include "time-internal.h" + +#define SPIN_COUNT 2000 + +static void * +evthread_win32_lock_create(unsigned locktype) +{ + CRITICAL_SECTION *lock = mm_malloc(sizeof(CRITICAL_SECTION)); + if (!lock) + return NULL; + if (InitializeCriticalSectionAndSpinCount(lock, SPIN_COUNT) == 0) { + mm_free(lock); + return NULL; + } + return lock; +} + +static void +evthread_win32_lock_free(void *lock_, unsigned locktype) +{ + CRITICAL_SECTION *lock = lock_; + DeleteCriticalSection(lock); + mm_free(lock); +} + +static int +evthread_win32_lock(unsigned mode, void *lock_) +{ + CRITICAL_SECTION *lock = lock_; + if ((mode & EVTHREAD_TRY)) { + return ! TryEnterCriticalSection(lock); + } else { + EnterCriticalSection(lock); + return 0; + } +} + +static int +evthread_win32_unlock(unsigned mode, void *lock_) +{ + CRITICAL_SECTION *lock = lock_; + LeaveCriticalSection(lock); + return 0; +} + +static unsigned long +evthread_win32_get_id(void) +{ + return (unsigned long) GetCurrentThreadId(); +} + +#ifdef WIN32_HAVE_CONDITION_VARIABLES +static void WINAPI (*InitializeConditionVariable_fn)(PCONDITION_VARIABLE) + = NULL; +static BOOL WINAPI (*SleepConditionVariableCS_fn)( + PCONDITION_VARIABLE, PCRITICAL_SECTION, DWORD) = NULL; +static void WINAPI (*WakeAllConditionVariable_fn)(PCONDITION_VARIABLE) = NULL; +static void WINAPI (*WakeConditionVariable_fn)(PCONDITION_VARIABLE) = NULL; + +static int +evthread_win32_condvar_init(void) +{ + HANDLE lib; + + lib = GetModuleHandle(TEXT("kernel32.dll")); + if (lib == NULL) + return 0; + +#define LOAD(name) \ + name##_fn = GetProcAddress(lib, #name) + LOAD(InitializeConditionVariable); + LOAD(SleepConditionVariableCS); + LOAD(WakeAllConditionVariable); + LOAD(WakeConditionVariable); + + return InitializeConditionVariable_fn && SleepConditionVariableCS_fn && + WakeAllConditionVariable_fn && WakeConditionVariable_fn; +} + +/* XXXX Even if we can build this, we don't necessarily want to: the functions + * in question didn't exist before Vista, so we'd better LoadProc them. */ +static void * +evthread_win32_condvar_alloc(unsigned condflags) +{ + CONDITION_VARIABLE *cond = mm_malloc(sizeof(CONDITION_VARIABLE)); + if (!cond) + return NULL; + InitializeConditionVariable_fn(cond); + return cond; +} + +static void +evthread_win32_condvar_free(void *cond_) +{ + CONDITION_VARIABLE *cond = cond_; + /* There doesn't _seem_ to be a cleaup fn here... */ + mm_free(cond); +} + +static int +evthread_win32_condvar_signal(void *cond, int broadcast) +{ + CONDITION_VARIABLE *cond = cond_; + if (broadcast) + WakeAllConditionVariable_fn(cond); + else + WakeConditionVariable_fn(cond); + return 0; +} + +static int +evthread_win32_condvar_wait(void *cond_, void *lock_, const struct timeval *tv) +{ + CONDITION_VARIABLE *cond = cond_; + CRITICAL_SECTION *lock = lock_; + DWORD ms, err; + BOOL result; + + if (tv) + ms = evutil_tv_to_msec_(tv); + else + ms = INFINITE; + result = SleepConditionVariableCS_fn(cond, lock, ms); + if (result) { + if (GetLastError() == WAIT_TIMEOUT) + return 1; + else + return -1; + } else { + return 0; + } +} +#endif + +struct evthread_win32_cond { + HANDLE event; + + CRITICAL_SECTION lock; + int n_waiting; + int n_to_wake; + int generation; +}; + +static void * +evthread_win32_cond_alloc(unsigned flags) +{ + struct evthread_win32_cond *cond; + if (!(cond = mm_malloc(sizeof(struct evthread_win32_cond)))) + return NULL; + if (InitializeCriticalSectionAndSpinCount(&cond->lock, SPIN_COUNT)==0) { + mm_free(cond); + return NULL; + } + if ((cond->event = CreateEvent(NULL,TRUE,FALSE,NULL)) == NULL) { + DeleteCriticalSection(&cond->lock); + mm_free(cond); + return NULL; + } + cond->n_waiting = cond->n_to_wake = cond->generation = 0; + return cond; +} + +static void +evthread_win32_cond_free(void *cond_) +{ + struct evthread_win32_cond *cond = cond_; + DeleteCriticalSection(&cond->lock); + CloseHandle(cond->event); + mm_free(cond); +} + +static int +evthread_win32_cond_signal(void *cond_, int broadcast) +{ + struct evthread_win32_cond *cond = cond_; + EnterCriticalSection(&cond->lock); + if (broadcast) + cond->n_to_wake = cond->n_waiting; + else + ++cond->n_to_wake; + cond->generation++; + SetEvent(cond->event); + LeaveCriticalSection(&cond->lock); + return 0; +} + +static int +evthread_win32_cond_wait(void *cond_, void *lock_, const struct timeval *tv) +{ + struct evthread_win32_cond *cond = cond_; + CRITICAL_SECTION *lock = lock_; + int generation_at_start; + int waiting = 1; + int result = -1; + DWORD ms = INFINITE, ms_orig = INFINITE, startTime, endTime; + if (tv) + ms_orig = ms = evutil_tv_to_msec_(tv); + + EnterCriticalSection(&cond->lock); + ++cond->n_waiting; + generation_at_start = cond->generation; + LeaveCriticalSection(&cond->lock); + + LeaveCriticalSection(lock); + + startTime = GetTickCount(); + do { + DWORD res; + res = WaitForSingleObject(cond->event, ms); + EnterCriticalSection(&cond->lock); + if (cond->n_to_wake && + cond->generation != generation_at_start) { + --cond->n_to_wake; + --cond->n_waiting; + result = 0; + waiting = 0; + goto out; + } else if (res != WAIT_OBJECT_0) { + result = (res==WAIT_TIMEOUT) ? 1 : -1; + --cond->n_waiting; + waiting = 0; + goto out; + } else if (ms != INFINITE) { + endTime = GetTickCount(); + if (startTime + ms_orig <= endTime) { + result = 1; /* Timeout */ + --cond->n_waiting; + waiting = 0; + goto out; + } else { + ms = startTime + ms_orig - endTime; + } + } + /* If we make it here, we are still waiting. */ + if (cond->n_to_wake == 0) { + /* There is nobody else who should wake up; reset + * the event. */ + ResetEvent(cond->event); + } + out: + LeaveCriticalSection(&cond->lock); + } while (waiting); + + EnterCriticalSection(lock); + + EnterCriticalSection(&cond->lock); + if (!cond->n_waiting) + ResetEvent(cond->event); + LeaveCriticalSection(&cond->lock); + + return result; +} + +int +evthread_use_windows_threads(void) +{ + struct evthread_lock_callbacks cbs = { + EVTHREAD_LOCK_API_VERSION, + EVTHREAD_LOCKTYPE_RECURSIVE, + evthread_win32_lock_create, + evthread_win32_lock_free, + evthread_win32_lock, + evthread_win32_unlock + }; + + + struct evthread_condition_callbacks cond_cbs = { + EVTHREAD_CONDITION_API_VERSION, + evthread_win32_cond_alloc, + evthread_win32_cond_free, + evthread_win32_cond_signal, + evthread_win32_cond_wait + }; +#ifdef WIN32_HAVE_CONDITION_VARIABLES + struct evthread_condition_callbacks condvar_cbs = { + EVTHREAD_CONDITION_API_VERSION, + evthread_win32_condvar_alloc, + evthread_win32_condvar_free, + evthread_win32_condvar_signal, + evthread_win32_condvar_wait + }; +#endif + + evthread_set_lock_callbacks(&cbs); + evthread_set_id_callback(evthread_win32_get_id); +#ifdef WIN32_HAVE_CONDITION_VARIABLES + if (evthread_win32_condvar_init()) { + evthread_set_condition_callbacks(&condvar_cbs); + return 0; + } +#endif + evthread_set_condition_callbacks(&cond_cbs); + + return 0; +} + diff --git a/probe-busybox/libevent-2.1.11-stable/evutil.c b/probe-busybox/libevent-2.1.11-stable/evutil.c new file mode 100644 index 00000000..6e84285d --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evutil.c @@ -0,0 +1,2768 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef _WIN32 +#include +#include +#include +#define WIN32_LEAN_AND_MEAN +#include +#undef WIN32_LEAN_AND_MEAN +#include +#include +#include +#undef _WIN32_WINNT +/* For structs needed by GetAdaptersAddresses */ +#define _WIN32_WINNT 0x0501 +#include +#endif + +#include +#ifdef EVENT__HAVE_SYS_SOCKET_H +#include +#endif +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif +#ifdef EVENT__HAVE_FCNTL_H +#include +#endif +#ifdef EVENT__HAVE_STDLIB_H +#include +#endif +#include +#include +#include +#include +#ifdef EVENT__HAVE_NETINET_IN_H +#include +#endif +#ifdef EVENT__HAVE_NETINET_IN6_H +#include +#endif +#ifdef EVENT__HAVE_NETINET_TCP_H +#include +#endif +#ifdef EVENT__HAVE_ARPA_INET_H +#include +#endif +#include +#include +#include +#ifdef EVENT__HAVE_IFADDRS_H +#include +#endif + +#include "event2/util.h" +#include "util-internal.h" +#include "log-internal.h" +#include "mm-internal.h" +#include "evthread-internal.h" + +#include "strlcpy-internal.h" +#include "ipv6-internal.h" + +#ifdef _WIN32 +#define HT_NO_CACHE_HASH_VALUES +#include "ht-internal.h" +#define open _open +#define read _read +#define close _close +#ifndef fstat +#define fstat _fstati64 +#endif +#ifndef stat +#define stat _stati64 +#endif +#define mode_t int +#endif + +static int evutil_inet_pton_scope(int af, const char *src, void *dst, + unsigned *indexp); + +int +evutil_open_closeonexec_(const char *pathname, int flags, unsigned mode) +{ + int fd; + +#ifdef O_CLOEXEC + fd = open(pathname, flags|O_CLOEXEC, (mode_t)mode); + if (fd >= 0 || errno == EINVAL) + return fd; + /* If we got an EINVAL, fall through and try without O_CLOEXEC */ +#endif + fd = open(pathname, flags, (mode_t)mode); + if (fd < 0) + return -1; + +#if defined(FD_CLOEXEC) + if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) { + close(fd); + return -1; + } +#endif + + return fd; +} + +/** + Read the contents of 'filename' into a newly allocated NUL-terminated + string. Set *content_out to hold this string, and *len_out to hold its + length (not including the appended NUL). If 'is_binary', open the file in + binary mode. + + Returns 0 on success, -1 if the open fails, and -2 for all other failures. + + Used internally only; may go away in a future version. + */ +int +evutil_read_file_(const char *filename, char **content_out, size_t *len_out, + int is_binary) +{ + int fd, r; + struct stat st; + char *mem; + size_t read_so_far=0; + int mode = O_RDONLY; + + EVUTIL_ASSERT(content_out); + EVUTIL_ASSERT(len_out); + *content_out = NULL; + *len_out = 0; + +#ifdef O_BINARY + if (is_binary) + mode |= O_BINARY; +#endif + + fd = evutil_open_closeonexec_(filename, mode, 0); + if (fd < 0) + return -1; + if (fstat(fd, &st) || st.st_size < 0 || + st.st_size > EV_SSIZE_MAX-1 ) { + close(fd); + return -2; + } + mem = mm_malloc((size_t)st.st_size + 1); + if (!mem) { + close(fd); + return -2; + } + read_so_far = 0; +#ifdef _WIN32 +#define N_TO_READ(x) ((x) > INT_MAX) ? INT_MAX : ((int)(x)) +#else +#define N_TO_READ(x) (x) +#endif + while ((r = read(fd, mem+read_so_far, N_TO_READ(st.st_size - read_so_far))) > 0) { + read_so_far += r; + if (read_so_far >= (size_t)st.st_size) + break; + EVUTIL_ASSERT(read_so_far < (size_t)st.st_size); + } + close(fd); + if (r < 0) { + mm_free(mem); + return -2; + } + mem[read_so_far] = 0; + + *len_out = read_so_far; + *content_out = mem; + return 0; +} + +int +evutil_socketpair(int family, int type, int protocol, evutil_socket_t fd[2]) +{ +#ifndef _WIN32 + return socketpair(family, type, protocol, fd); +#else + return evutil_ersatz_socketpair_(family, type, protocol, fd); +#endif +} + +int +evutil_ersatz_socketpair_(int family, int type, int protocol, + evutil_socket_t fd[2]) +{ + /* This code is originally from Tor. Used with permission. */ + + /* This socketpair does not work when localhost is down. So + * it's really not the same thing at all. But it's close enough + * for now, and really, when localhost is down sometimes, we + * have other problems too. + */ +#ifdef _WIN32 +#define ERR(e) WSA##e +#else +#define ERR(e) e +#endif + evutil_socket_t listener = -1; + evutil_socket_t connector = -1; + evutil_socket_t acceptor = -1; + struct sockaddr_in listen_addr; + struct sockaddr_in connect_addr; + ev_socklen_t size; + int saved_errno = -1; + int family_test; + + family_test = family != AF_INET; +#ifdef AF_UNIX + family_test = family_test && (family != AF_UNIX); +#endif + if (protocol || family_test) { + EVUTIL_SET_SOCKET_ERROR(ERR(EAFNOSUPPORT)); + return -1; + } + + if (!fd) { + EVUTIL_SET_SOCKET_ERROR(ERR(EINVAL)); + return -1; + } + + listener = socket(AF_INET, type, 0); + if (listener < 0) + return -1; + memset(&listen_addr, 0, sizeof(listen_addr)); + listen_addr.sin_family = AF_INET; + listen_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + listen_addr.sin_port = 0; /* kernel chooses port. */ + if (bind(listener, (struct sockaddr *) &listen_addr, sizeof (listen_addr)) + == -1) + goto tidy_up_and_fail; + if (listen(listener, 1) == -1) + goto tidy_up_and_fail; + + connector = socket(AF_INET, type, 0); + if (connector < 0) + goto tidy_up_and_fail; + + memset(&connect_addr, 0, sizeof(connect_addr)); + + /* We want to find out the port number to connect to. */ + size = sizeof(connect_addr); + if (getsockname(listener, (struct sockaddr *) &connect_addr, &size) == -1) + goto tidy_up_and_fail; + if (size != sizeof (connect_addr)) + goto abort_tidy_up_and_fail; + if (connect(connector, (struct sockaddr *) &connect_addr, + sizeof(connect_addr)) == -1) + goto tidy_up_and_fail; + + size = sizeof(listen_addr); + acceptor = accept(listener, (struct sockaddr *) &listen_addr, &size); + if (acceptor < 0) + goto tidy_up_and_fail; + if (size != sizeof(listen_addr)) + goto abort_tidy_up_and_fail; + /* Now check we are talking to ourself by matching port and host on the + two sockets. */ + if (getsockname(connector, (struct sockaddr *) &connect_addr, &size) == -1) + goto tidy_up_and_fail; + if (size != sizeof (connect_addr) + || listen_addr.sin_family != connect_addr.sin_family + || listen_addr.sin_addr.s_addr != connect_addr.sin_addr.s_addr + || listen_addr.sin_port != connect_addr.sin_port) + goto abort_tidy_up_and_fail; + evutil_closesocket(listener); + fd[0] = connector; + fd[1] = acceptor; + + return 0; + + abort_tidy_up_and_fail: + saved_errno = ERR(ECONNABORTED); + tidy_up_and_fail: + if (saved_errno < 0) + saved_errno = EVUTIL_SOCKET_ERROR(); + if (listener != -1) + evutil_closesocket(listener); + if (connector != -1) + evutil_closesocket(connector); + if (acceptor != -1) + evutil_closesocket(acceptor); + + EVUTIL_SET_SOCKET_ERROR(saved_errno); + return -1; +#undef ERR +} + +int +evutil_make_socket_nonblocking(evutil_socket_t fd) +{ +#ifdef _WIN32 + { + unsigned long nonblocking = 1; + if (ioctlsocket(fd, FIONBIO, &nonblocking) == SOCKET_ERROR) { + event_sock_warn(fd, "fcntl(%d, F_GETFL)", (int)fd); + return -1; + } + } +#else + { + int flags; + if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) { + event_warn("fcntl(%d, F_GETFL)", fd); + return -1; + } + if (!(flags & O_NONBLOCK)) { + if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) { + event_warn("fcntl(%d, F_SETFL)", fd); + return -1; + } + } + } +#endif + return 0; +} + +/* Faster version of evutil_make_socket_nonblocking for internal use. + * + * Requires that no F_SETFL flags were previously set on the fd. + */ +static int +evutil_fast_socket_nonblocking(evutil_socket_t fd) +{ +#ifdef _WIN32 + return evutil_make_socket_nonblocking(fd); +#else + if (fcntl(fd, F_SETFL, O_NONBLOCK) == -1) { + event_warn("fcntl(%d, F_SETFL)", fd); + return -1; + } + return 0; +#endif +} + +int +evutil_make_listen_socket_reuseable(evutil_socket_t sock) +{ +#if defined(SO_REUSEADDR) && !defined(_WIN32) + int one = 1; + /* REUSEADDR on Unix means, "don't hang on to this address after the + * listener is closed." On Windows, though, it means "don't keep other + * processes from binding to this address while we're using it. */ + return setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void*) &one, + (ev_socklen_t)sizeof(one)); +#else + return 0; +#endif +} + +int +evutil_make_listen_socket_reuseable_port(evutil_socket_t sock) +{ +#if defined __linux__ && defined(SO_REUSEPORT) + int one = 1; + /* REUSEPORT on Linux 3.9+ means, "Multiple servers (processes or + * threads) can bind to the same port if they each set the option. */ + return setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, (void*) &one, + (ev_socklen_t)sizeof(one)); +#else + return 0; +#endif +} + +int +evutil_make_listen_socket_ipv6only(evutil_socket_t sock) +{ +#if defined(IPV6_V6ONLY) + int one = 1; + return setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*) &one, + (ev_socklen_t)sizeof(one)); +#endif + return 0; +} + +int +evutil_make_tcp_listen_socket_deferred(evutil_socket_t sock) +{ +#if defined(EVENT__HAVE_NETINET_TCP_H) && defined(TCP_DEFER_ACCEPT) + int one = 1; + + /* TCP_DEFER_ACCEPT tells the kernel to call defer accept() only after data + * has arrived and ready to read */ + return setsockopt(sock, IPPROTO_TCP, TCP_DEFER_ACCEPT, &one, + (ev_socklen_t)sizeof(one)); +#endif + return 0; +} + +int +evutil_make_socket_closeonexec(evutil_socket_t fd) +{ +#if !defined(_WIN32) && defined(EVENT__HAVE_SETFD) + int flags; + if ((flags = fcntl(fd, F_GETFD, NULL)) < 0) { + event_warn("fcntl(%d, F_GETFD)", fd); + return -1; + } + if (!(flags & FD_CLOEXEC)) { + if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) == -1) { + event_warn("fcntl(%d, F_SETFD)", fd); + return -1; + } + } +#endif + return 0; +} + +/* Faster version of evutil_make_socket_closeonexec for internal use. + * + * Requires that no F_SETFD flags were previously set on the fd. + */ +static int +evutil_fast_socket_closeonexec(evutil_socket_t fd) +{ +#if !defined(_WIN32) && defined(EVENT__HAVE_SETFD) + if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1) { + event_warn("fcntl(%d, F_SETFD)", fd); + return -1; + } +#endif + return 0; +} + +int +evutil_closesocket(evutil_socket_t sock) +{ +#ifndef _WIN32 + return close(sock); +#else + return closesocket(sock); +#endif +} + +ev_int64_t +evutil_strtoll(const char *s, char **endptr, int base) +{ +#ifdef EVENT__HAVE_STRTOLL + return (ev_int64_t)strtoll(s, endptr, base); +#elif EVENT__SIZEOF_LONG == 8 + return (ev_int64_t)strtol(s, endptr, base); +#elif defined(_WIN32) && defined(_MSC_VER) && _MSC_VER < 1300 + /* XXXX on old versions of MS APIs, we only support base + * 10. */ + ev_int64_t r; + if (base != 10) + return 0; + r = (ev_int64_t) _atoi64(s); + while (isspace(*s)) + ++s; + if (*s == '-') + ++s; + while (isdigit(*s)) + ++s; + if (endptr) + *endptr = (char*) s; + return r; +#elif defined(_WIN32) + return (ev_int64_t) _strtoi64(s, endptr, base); +#elif defined(EVENT__SIZEOF_LONG_LONG) && EVENT__SIZEOF_LONG_LONG == 8 + long long r; + int n; + if (base != 10 && base != 16) + return 0; + if (base == 10) { + n = sscanf(s, "%lld", &r); + } else { + unsigned long long ru=0; + n = sscanf(s, "%llx", &ru); + if (ru > EV_INT64_MAX) + return 0; + r = (long long) ru; + } + if (n != 1) + return 0; + while (EVUTIL_ISSPACE_(*s)) + ++s; + if (*s == '-') + ++s; + if (base == 10) { + while (EVUTIL_ISDIGIT_(*s)) + ++s; + } else { + while (EVUTIL_ISXDIGIT_(*s)) + ++s; + } + if (endptr) + *endptr = (char*) s; + return r; +#else +#error "I don't know how to parse 64-bit integers." +#endif +} + +#ifdef _WIN32 +int +evutil_socket_geterror(evutil_socket_t sock) +{ + int optval, optvallen=sizeof(optval); + int err = WSAGetLastError(); + if (err == WSAEWOULDBLOCK && sock >= 0) { + if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (void*)&optval, + &optvallen)) + return err; + if (optval) + return optval; + } + return err; +} +#endif + +/* XXX we should use an enum here. */ +/* 2 for connection refused, 1 for connected, 0 for not yet, -1 for error. */ +int +evutil_socket_connect_(evutil_socket_t *fd_ptr, const struct sockaddr *sa, int socklen) +{ + int made_fd = 0; + + if (*fd_ptr < 0) { + if ((*fd_ptr = socket(sa->sa_family, SOCK_STREAM, 0)) < 0) + goto err; + made_fd = 1; + if (evutil_make_socket_nonblocking(*fd_ptr) < 0) { + goto err; + } + } + + if (connect(*fd_ptr, sa, socklen) < 0) { + int e = evutil_socket_geterror(*fd_ptr); + if (EVUTIL_ERR_CONNECT_RETRIABLE(e)) + return 0; + if (EVUTIL_ERR_CONNECT_REFUSED(e)) + return 2; + goto err; + } else { + return 1; + } + +err: + if (made_fd) { + evutil_closesocket(*fd_ptr); + *fd_ptr = -1; + } + return -1; +} + +/* Check whether a socket on which we called connect() is done + connecting. Return 1 for connected, 0 for not yet, -1 for error. In the + error case, set the current socket errno to the error that happened during + the connect operation. */ +int +evutil_socket_finished_connecting_(evutil_socket_t fd) +{ + int e; + ev_socklen_t elen = sizeof(e); + + if (getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&e, &elen) < 0) + return -1; + + if (e) { + if (EVUTIL_ERR_CONNECT_RETRIABLE(e)) + return 0; + EVUTIL_SET_SOCKET_ERROR(e); + return -1; + } + + return 1; +} + +#if (EVUTIL_AI_PASSIVE|EVUTIL_AI_CANONNAME|EVUTIL_AI_NUMERICHOST| \ + EVUTIL_AI_NUMERICSERV|EVUTIL_AI_V4MAPPED|EVUTIL_AI_ALL| \ + EVUTIL_AI_ADDRCONFIG) != \ + (EVUTIL_AI_PASSIVE^EVUTIL_AI_CANONNAME^EVUTIL_AI_NUMERICHOST^ \ + EVUTIL_AI_NUMERICSERV^EVUTIL_AI_V4MAPPED^EVUTIL_AI_ALL^ \ + EVUTIL_AI_ADDRCONFIG) +#error "Some of our EVUTIL_AI_* flags seem to overlap with system AI_* flags" +#endif + +/* We sometimes need to know whether we have an ipv4 address and whether we + have an ipv6 address. If 'have_checked_interfaces', then we've already done + the test. If 'had_ipv4_address', then it turns out we had an ipv4 address. + If 'had_ipv6_address', then it turns out we had an ipv6 address. These are + set by evutil_check_interfaces. */ +static int have_checked_interfaces, had_ipv4_address, had_ipv6_address; + +/* True iff the IPv4 address 'addr', in host order, is in 127.0.0.0/8 */ +static inline int evutil_v4addr_is_localhost(ev_uint32_t addr) +{ return addr>>24 == 127; } + +/* True iff the IPv4 address 'addr', in host order, is link-local + * 169.254.0.0/16 (RFC3927) */ +static inline int evutil_v4addr_is_linklocal(ev_uint32_t addr) +{ return ((addr & 0xffff0000U) == 0xa9fe0000U); } + +/* True iff the IPv4 address 'addr', in host order, is a class D + * (multiclass) address. */ +static inline int evutil_v4addr_is_classd(ev_uint32_t addr) +{ return ((addr>>24) & 0xf0) == 0xe0; } + +int +evutil_v4addr_is_local_(const struct in_addr *in) +{ + const ev_uint32_t addr = ntohl(in->s_addr); + return addr == INADDR_ANY || + evutil_v4addr_is_localhost(addr) || + evutil_v4addr_is_linklocal(addr) || + evutil_v4addr_is_classd(addr); +} +int +evutil_v6addr_is_local_(const struct in6_addr *in) +{ + static const char ZEROES[] = + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00"; + + const unsigned char *addr = (const unsigned char *)in->s6_addr; + return !memcmp(addr, ZEROES, 8) || + ((addr[0] & 0xfe) == 0xfc) || + (addr[0] == 0xfe && (addr[1] & 0xc0) == 0x80) || + (addr[0] == 0xfe && (addr[1] & 0xc0) == 0xc0) || + (addr[0] == 0xff); +} + +static void +evutil_found_ifaddr(const struct sockaddr *sa) +{ + if (sa->sa_family == AF_INET) { + const struct sockaddr_in *sin = (struct sockaddr_in *)sa; + if (!evutil_v4addr_is_local_(&sin->sin_addr)) { + event_debug(("Detected an IPv4 interface")); + had_ipv4_address = 1; + } + } else if (sa->sa_family == AF_INET6) { + const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa; + if (!evutil_v6addr_is_local_(&sin6->sin6_addr)) { + event_debug(("Detected an IPv6 interface")); + had_ipv6_address = 1; + } + } +} + +#ifdef _WIN32 +typedef ULONG (WINAPI *GetAdaptersAddresses_fn_t)( + ULONG, ULONG, PVOID, PIP_ADAPTER_ADDRESSES, PULONG); +#endif + +static int +evutil_check_ifaddrs(void) +{ +#if defined(EVENT__HAVE_GETIFADDRS) + /* Most free Unixy systems provide getifaddrs, which gives us a linked list + * of struct ifaddrs. */ + struct ifaddrs *ifa = NULL; + const struct ifaddrs *i; + if (getifaddrs(&ifa) < 0) { + event_warn("Unable to call getifaddrs()"); + return -1; + } + + for (i = ifa; i; i = i->ifa_next) { + if (!i->ifa_addr) + continue; + evutil_found_ifaddr(i->ifa_addr); + } + + freeifaddrs(ifa); + return 0; +#elif defined(_WIN32) + /* Windows XP began to provide GetAdaptersAddresses. Windows 2000 had a + "GetAdaptersInfo", but that's deprecated; let's just try + GetAdaptersAddresses and fall back to connect+getsockname. + */ + HMODULE lib = evutil_load_windows_system_library_(TEXT("ihplapi.dll")); + GetAdaptersAddresses_fn_t fn; + ULONG size, res; + IP_ADAPTER_ADDRESSES *addresses = NULL, *address; + int result = -1; + +#define FLAGS (GAA_FLAG_SKIP_ANYCAST | \ + GAA_FLAG_SKIP_MULTICAST | \ + GAA_FLAG_SKIP_DNS_SERVER) + + if (!lib) + goto done; + + if (!(fn = (GetAdaptersAddresses_fn_t) GetProcAddress(lib, "GetAdaptersAddresses"))) + goto done; + + /* Guess how much space we need. */ + size = 15*1024; + addresses = mm_malloc(size); + if (!addresses) + goto done; + res = fn(AF_UNSPEC, FLAGS, NULL, addresses, &size); + if (res == ERROR_BUFFER_OVERFLOW) { + /* we didn't guess that we needed enough space; try again */ + mm_free(addresses); + addresses = mm_malloc(size); + if (!addresses) + goto done; + res = fn(AF_UNSPEC, FLAGS, NULL, addresses, &size); + } + if (res != NO_ERROR) + goto done; + + for (address = addresses; address; address = address->Next) { + IP_ADAPTER_UNICAST_ADDRESS *a; + for (a = address->FirstUnicastAddress; a; a = a->Next) { + /* Yes, it's a linked list inside a linked list */ + struct sockaddr *sa = a->Address.lpSockaddr; + evutil_found_ifaddr(sa); + } + } + + result = 0; +done: + if (lib) + FreeLibrary(lib); + if (addresses) + mm_free(addresses); + return result; +#else + return -1; +#endif +} + +/* Test whether we have an ipv4 interface and an ipv6 interface. Return 0 if + * the test seemed successful. */ +static int +evutil_check_interfaces(void) +{ + evutil_socket_t fd = -1; + struct sockaddr_in sin, sin_out; + struct sockaddr_in6 sin6, sin6_out; + ev_socklen_t sin_out_len = sizeof(sin_out); + ev_socklen_t sin6_out_len = sizeof(sin6_out); + int r; + if (have_checked_interfaces) + return 0; + + /* From this point on we have done the ipv4/ipv6 interface check */ + have_checked_interfaces = 1; + + if (evutil_check_ifaddrs() == 0) { + /* Use a nice sane interface, if this system has one. */ + return 0; + } + + /* Ugh. There was no nice sane interface. So to check whether we have + * an interface open for a given protocol, will try to make a UDP + * 'connection' to a remote host on the internet. We don't actually + * use it, so the address doesn't matter, but we want to pick one that + * keep us from using a host- or link-local interface. */ + memset(&sin, 0, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_port = htons(53); + r = evutil_inet_pton(AF_INET, "18.244.0.188", &sin.sin_addr); + EVUTIL_ASSERT(r); + + memset(&sin6, 0, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + sin6.sin6_port = htons(53); + r = evutil_inet_pton(AF_INET6, "2001:4860:b002::68", &sin6.sin6_addr); + EVUTIL_ASSERT(r); + + memset(&sin_out, 0, sizeof(sin_out)); + memset(&sin6_out, 0, sizeof(sin6_out)); + + /* XXX some errnos mean 'no address'; some mean 'not enough sockets'. */ + if ((fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) >= 0 && + connect(fd, (struct sockaddr*)&sin, sizeof(sin)) == 0 && + getsockname(fd, (struct sockaddr*)&sin_out, &sin_out_len) == 0) { + /* We might have an IPv4 interface. */ + evutil_found_ifaddr((struct sockaddr*) &sin_out); + } + if (fd >= 0) + evutil_closesocket(fd); + + if ((fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP)) >= 0 && + connect(fd, (struct sockaddr*)&sin6, sizeof(sin6)) == 0 && + getsockname(fd, (struct sockaddr*)&sin6_out, &sin6_out_len) == 0) { + /* We might have an IPv6 interface. */ + evutil_found_ifaddr((struct sockaddr*) &sin6_out); + } + + if (fd >= 0) + evutil_closesocket(fd); + + return 0; +} + +/* Internal addrinfo flag. This one is set when we allocate the addrinfo from + * inside libevent. Otherwise, the built-in getaddrinfo() function allocated + * it, and we should trust what they said. + **/ +#define EVUTIL_AI_LIBEVENT_ALLOCATED 0x80000000 + +/* Helper: construct a new addrinfo containing the socket address in + * 'sa', which must be a sockaddr_in or a sockaddr_in6. Take the + * socktype and protocol info from hints. If they weren't set, then + * allocate both a TCP and a UDP addrinfo. + */ +struct evutil_addrinfo * +evutil_new_addrinfo_(struct sockaddr *sa, ev_socklen_t socklen, + const struct evutil_addrinfo *hints) +{ + struct evutil_addrinfo *res; + EVUTIL_ASSERT(hints); + + if (hints->ai_socktype == 0 && hints->ai_protocol == 0) { + /* Indecisive user! Give them a UDP and a TCP. */ + struct evutil_addrinfo *r1, *r2; + struct evutil_addrinfo tmp; + memcpy(&tmp, hints, sizeof(tmp)); + tmp.ai_socktype = SOCK_STREAM; tmp.ai_protocol = IPPROTO_TCP; + r1 = evutil_new_addrinfo_(sa, socklen, &tmp); + if (!r1) + return NULL; + tmp.ai_socktype = SOCK_DGRAM; tmp.ai_protocol = IPPROTO_UDP; + r2 = evutil_new_addrinfo_(sa, socklen, &tmp); + if (!r2) { + evutil_freeaddrinfo(r1); + return NULL; + } + r1->ai_next = r2; + return r1; + } + + /* We're going to allocate extra space to hold the sockaddr. */ + res = mm_calloc(1,sizeof(struct evutil_addrinfo)+socklen); + if (!res) + return NULL; + res->ai_addr = (struct sockaddr*) + (((char*)res) + sizeof(struct evutil_addrinfo)); + memcpy(res->ai_addr, sa, socklen); + res->ai_addrlen = socklen; + res->ai_family = sa->sa_family; /* Same or not? XXX */ + res->ai_flags = EVUTIL_AI_LIBEVENT_ALLOCATED; + res->ai_socktype = hints->ai_socktype; + res->ai_protocol = hints->ai_protocol; + + return res; +} + +/* Append the addrinfo 'append' to the end of 'first', and return the start of + * the list. Either element can be NULL, in which case we return the element + * that is not NULL. */ +struct evutil_addrinfo * +evutil_addrinfo_append_(struct evutil_addrinfo *first, + struct evutil_addrinfo *append) +{ + struct evutil_addrinfo *ai = first; + if (!ai) + return append; + while (ai->ai_next) + ai = ai->ai_next; + ai->ai_next = append; + + return first; +} + +static int +parse_numeric_servname(const char *servname) +{ + int n; + char *endptr=NULL; + n = (int) strtol(servname, &endptr, 10); + if (n>=0 && n <= 65535 && servname[0] && endptr && !endptr[0]) + return n; + else + return -1; +} + +/** Parse a service name in 'servname', which can be a decimal port. + * Return the port number, or -1 on error. + */ +static int +evutil_parse_servname(const char *servname, const char *protocol, + const struct evutil_addrinfo *hints) +{ + int n = parse_numeric_servname(servname); + if (n>=0) + return n; +#if defined(EVENT__HAVE_GETSERVBYNAME) || defined(_WIN32) + if (!(hints->ai_flags & EVUTIL_AI_NUMERICSERV)) { + struct servent *ent = getservbyname(servname, protocol); + if (ent) { + return ntohs(ent->s_port); + } + } +#endif + return -1; +} + +/* Return a string corresponding to a protocol number that we can pass to + * getservyname. */ +static const char * +evutil_unparse_protoname(int proto) +{ + switch (proto) { + case 0: + return NULL; + case IPPROTO_TCP: + return "tcp"; + case IPPROTO_UDP: + return "udp"; +#ifdef IPPROTO_SCTP + case IPPROTO_SCTP: + return "sctp"; +#endif + default: +#ifdef EVENT__HAVE_GETPROTOBYNUMBER + { + struct protoent *ent = getprotobynumber(proto); + if (ent) + return ent->p_name; + } +#endif + return NULL; + } +} + +static void +evutil_getaddrinfo_infer_protocols(struct evutil_addrinfo *hints) +{ + /* If we can guess the protocol from the socktype, do so. */ + if (!hints->ai_protocol && hints->ai_socktype) { + if (hints->ai_socktype == SOCK_DGRAM) + hints->ai_protocol = IPPROTO_UDP; + else if (hints->ai_socktype == SOCK_STREAM) + hints->ai_protocol = IPPROTO_TCP; + } + + /* Set the socktype if it isn't set. */ + if (!hints->ai_socktype && hints->ai_protocol) { + if (hints->ai_protocol == IPPROTO_UDP) + hints->ai_socktype = SOCK_DGRAM; + else if (hints->ai_protocol == IPPROTO_TCP) + hints->ai_socktype = SOCK_STREAM; +#ifdef IPPROTO_SCTP + else if (hints->ai_protocol == IPPROTO_SCTP) + hints->ai_socktype = SOCK_STREAM; +#endif + } +} + +#if AF_UNSPEC != PF_UNSPEC +#error "I cannot build on a system where AF_UNSPEC != PF_UNSPEC" +#endif + +/** Implements the part of looking up hosts by name that's common to both + * the blocking and nonblocking resolver: + * - Adjust 'hints' to have a reasonable socktype and protocol. + * - Look up the port based on 'servname', and store it in *portnum, + * - Handle the nodename==NULL case + * - Handle some invalid arguments cases. + * - Handle the cases where nodename is an IPv4 or IPv6 address. + * + * If we need the resolver to look up the hostname, we return + * EVUTIL_EAI_NEED_RESOLVE. Otherwise, we can completely implement + * getaddrinfo: we return 0 or an appropriate EVUTIL_EAI_* error, and + * set *res as getaddrinfo would. + */ +int +evutil_getaddrinfo_common_(const char *nodename, const char *servname, + struct evutil_addrinfo *hints, struct evutil_addrinfo **res, int *portnum) +{ + int port = 0; + unsigned int if_index; + const char *pname; + + if (nodename == NULL && servname == NULL) + return EVUTIL_EAI_NONAME; + + /* We only understand 3 families */ + if (hints->ai_family != PF_UNSPEC && hints->ai_family != PF_INET && + hints->ai_family != PF_INET6) + return EVUTIL_EAI_FAMILY; + + evutil_getaddrinfo_infer_protocols(hints); + + /* Look up the port number and protocol, if possible. */ + pname = evutil_unparse_protoname(hints->ai_protocol); + if (servname) { + /* XXXX We could look at the protocol we got back from + * getservbyname, but it doesn't seem too useful. */ + port = evutil_parse_servname(servname, pname, hints); + if (port < 0) { + return EVUTIL_EAI_NONAME; + } + } + + /* If we have no node name, then we're supposed to bind to 'any' and + * connect to localhost. */ + if (nodename == NULL) { + struct evutil_addrinfo *res4=NULL, *res6=NULL; + if (hints->ai_family != PF_INET) { /* INET6 or UNSPEC. */ + struct sockaddr_in6 sin6; + memset(&sin6, 0, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + sin6.sin6_port = htons(port); + if (hints->ai_flags & EVUTIL_AI_PASSIVE) { + /* Bind to :: */ + } else { + /* connect to ::1 */ + sin6.sin6_addr.s6_addr[15] = 1; + } + res6 = evutil_new_addrinfo_((struct sockaddr*)&sin6, + sizeof(sin6), hints); + if (!res6) + return EVUTIL_EAI_MEMORY; + } + + if (hints->ai_family != PF_INET6) { /* INET or UNSPEC */ + struct sockaddr_in sin; + memset(&sin, 0, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_port = htons(port); + if (hints->ai_flags & EVUTIL_AI_PASSIVE) { + /* Bind to 0.0.0.0 */ + } else { + /* connect to 127.0.0.1 */ + sin.sin_addr.s_addr = htonl(0x7f000001); + } + res4 = evutil_new_addrinfo_((struct sockaddr*)&sin, + sizeof(sin), hints); + if (!res4) { + if (res6) + evutil_freeaddrinfo(res6); + return EVUTIL_EAI_MEMORY; + } + } + *res = evutil_addrinfo_append_(res4, res6); + return 0; + } + + /* If we can, we should try to parse the hostname without resolving + * it. */ + /* Try ipv6. */ + if (hints->ai_family == PF_INET6 || hints->ai_family == PF_UNSPEC) { + struct sockaddr_in6 sin6; + memset(&sin6, 0, sizeof(sin6)); + if (1==evutil_inet_pton_scope(AF_INET6, nodename, + &sin6.sin6_addr, &if_index)) + { + /* Got an ipv6 address. */ + sin6.sin6_family = AF_INET6; + sin6.sin6_port = htons(port); + sin6.sin6_scope_id= if_index; + *res = evutil_new_addrinfo_((struct sockaddr*)&sin6, + sizeof(sin6), hints); + if (!*res) + return EVUTIL_EAI_MEMORY; + return 0; + } + } + + /* Try ipv4. */ + if (hints->ai_family == PF_INET || hints->ai_family == PF_UNSPEC) { + struct sockaddr_in sin; + memset(&sin, 0, sizeof(sin)); + if (1==evutil_inet_pton(AF_INET, nodename, &sin.sin_addr)) { + /* Got an ipv4 address. */ + sin.sin_family = AF_INET; + sin.sin_port = htons(port); + *res = evutil_new_addrinfo_((struct sockaddr*)&sin, + sizeof(sin), hints); + if (!*res) + return EVUTIL_EAI_MEMORY; + return 0; + } + } + + + /* If we have reached this point, we definitely need to do a DNS + * lookup. */ + if ((hints->ai_flags & EVUTIL_AI_NUMERICHOST)) { + /* If we're not allowed to do one, then say so. */ + return EVUTIL_EAI_NONAME; + } + *portnum = port; + return EVUTIL_EAI_NEED_RESOLVE; +} + +#ifdef EVENT__HAVE_GETADDRINFO +#define USE_NATIVE_GETADDRINFO +#endif + +#ifdef USE_NATIVE_GETADDRINFO +/* A mask of all the flags that we declare, so we can clear them before calling + * the native getaddrinfo */ +static const unsigned int ALL_NONNATIVE_AI_FLAGS = +#ifndef AI_PASSIVE + EVUTIL_AI_PASSIVE | +#endif +#ifndef AI_CANONNAME + EVUTIL_AI_CANONNAME | +#endif +#ifndef AI_NUMERICHOST + EVUTIL_AI_NUMERICHOST | +#endif +#ifndef AI_NUMERICSERV + EVUTIL_AI_NUMERICSERV | +#endif +#ifndef AI_ADDRCONFIG + EVUTIL_AI_ADDRCONFIG | +#endif +#ifndef AI_ALL + EVUTIL_AI_ALL | +#endif +#ifndef AI_V4MAPPED + EVUTIL_AI_V4MAPPED | +#endif + EVUTIL_AI_LIBEVENT_ALLOCATED; + +static const unsigned int ALL_NATIVE_AI_FLAGS = +#ifdef AI_PASSIVE + AI_PASSIVE | +#endif +#ifdef AI_CANONNAME + AI_CANONNAME | +#endif +#ifdef AI_NUMERICHOST + AI_NUMERICHOST | +#endif +#ifdef AI_NUMERICSERV + AI_NUMERICSERV | +#endif +#ifdef AI_ADDRCONFIG + AI_ADDRCONFIG | +#endif +#ifdef AI_ALL + AI_ALL | +#endif +#ifdef AI_V4MAPPED + AI_V4MAPPED | +#endif + 0; +#endif + +#ifndef USE_NATIVE_GETADDRINFO +/* Helper for systems with no getaddrinfo(): make one or more addrinfos out of + * a struct hostent. + */ +static struct evutil_addrinfo * +addrinfo_from_hostent(const struct hostent *ent, + int port, const struct evutil_addrinfo *hints) +{ + int i; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; + struct sockaddr *sa; + int socklen; + struct evutil_addrinfo *res=NULL, *ai; + void *addrp; + + if (ent->h_addrtype == PF_INET) { + memset(&sin, 0, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_port = htons(port); + sa = (struct sockaddr *)&sin; + socklen = sizeof(struct sockaddr_in); + addrp = &sin.sin_addr; + if (ent->h_length != sizeof(sin.sin_addr)) { + event_warnx("Weird h_length from gethostbyname"); + return NULL; + } + } else if (ent->h_addrtype == PF_INET6) { + memset(&sin6, 0, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + sin6.sin6_port = htons(port); + sa = (struct sockaddr *)&sin6; + socklen = sizeof(struct sockaddr_in6); + addrp = &sin6.sin6_addr; + if (ent->h_length != sizeof(sin6.sin6_addr)) { + event_warnx("Weird h_length from gethostbyname"); + return NULL; + } + } else + return NULL; + + for (i = 0; ent->h_addr_list[i]; ++i) { + memcpy(addrp, ent->h_addr_list[i], ent->h_length); + ai = evutil_new_addrinfo_(sa, socklen, hints); + if (!ai) { + evutil_freeaddrinfo(res); + return NULL; + } + res = evutil_addrinfo_append_(res, ai); + } + + if (res && ((hints->ai_flags & EVUTIL_AI_CANONNAME) && ent->h_name)) { + res->ai_canonname = mm_strdup(ent->h_name); + if (res->ai_canonname == NULL) { + evutil_freeaddrinfo(res); + return NULL; + } + } + + return res; +} +#endif + +/* If the EVUTIL_AI_ADDRCONFIG flag is set on hints->ai_flags, and + * hints->ai_family is PF_UNSPEC, then revise the value of hints->ai_family so + * that we'll only get addresses we could maybe connect to. + */ +void +evutil_adjust_hints_for_addrconfig_(struct evutil_addrinfo *hints) +{ + if (!(hints->ai_flags & EVUTIL_AI_ADDRCONFIG)) + return; + if (hints->ai_family != PF_UNSPEC) + return; + evutil_check_interfaces(); + if (had_ipv4_address && !had_ipv6_address) { + hints->ai_family = PF_INET; + } else if (!had_ipv4_address && had_ipv6_address) { + hints->ai_family = PF_INET6; + } +} + +#ifdef USE_NATIVE_GETADDRINFO +static int need_numeric_port_hack_=0; +static int need_socktype_protocol_hack_=0; +static int tested_for_getaddrinfo_hacks=0; + +/* Some older BSDs (like OpenBSD up to 4.6) used to believe that + giving a numeric port without giving an ai_socktype was verboten. + We test for this so we can apply an appropriate workaround. If it + turns out that the bug is present, then: + + - If nodename==NULL and servname is numeric, we build an answer + ourselves using evutil_getaddrinfo_common_(). + + - If nodename!=NULL and servname is numeric, then we set + servname=NULL when calling getaddrinfo, and post-process the + result to set the ports on it. + + We test for this bug at runtime, since otherwise we can't have the + same binary run on multiple BSD versions. + + - Some versions of Solaris believe that it's nice to leave to protocol + field set to 0. We test for this so we can apply an appropriate + workaround. +*/ +static struct evutil_addrinfo *ai_find_protocol(struct evutil_addrinfo *ai) +{ + while (ai) { + if (ai->ai_protocol) + return ai; + ai = ai->ai_next; + } + return NULL; +} +static void +test_for_getaddrinfo_hacks(void) +{ + int r, r2; + struct evutil_addrinfo *ai=NULL, *ai2=NULL, *ai3=NULL; + struct evutil_addrinfo hints; + + memset(&hints,0,sizeof(hints)); + hints.ai_family = PF_UNSPEC; + hints.ai_flags = +#ifdef AI_NUMERICHOST + AI_NUMERICHOST | +#endif +#ifdef AI_NUMERICSERV + AI_NUMERICSERV | +#endif + 0; + r = getaddrinfo("1.2.3.4", "80", &hints, &ai); + getaddrinfo("1.2.3.4", NULL, &hints, &ai3); + hints.ai_socktype = SOCK_STREAM; + r2 = getaddrinfo("1.2.3.4", "80", &hints, &ai2); + if (r2 == 0 && r != 0) { + need_numeric_port_hack_=1; + } + if (!ai_find_protocol(ai2) || !ai_find_protocol(ai3)) { + need_socktype_protocol_hack_=1; + } + + if (ai) + freeaddrinfo(ai); + if (ai2) + freeaddrinfo(ai2); + if (ai3) + freeaddrinfo(ai3); + tested_for_getaddrinfo_hacks=1; +} + +static inline int +need_numeric_port_hack(void) +{ + if (!tested_for_getaddrinfo_hacks) + test_for_getaddrinfo_hacks(); + return need_numeric_port_hack_; +} + +static inline int +need_socktype_protocol_hack(void) +{ + if (!tested_for_getaddrinfo_hacks) + test_for_getaddrinfo_hacks(); + return need_socktype_protocol_hack_; +} + +static void +apply_numeric_port_hack(int port, struct evutil_addrinfo **ai) +{ + /* Now we run through the list and set the ports on all of the + * results where ports would make sense. */ + for ( ; *ai; ai = &(*ai)->ai_next) { + struct sockaddr *sa = (*ai)->ai_addr; + if (sa && sa->sa_family == AF_INET) { + struct sockaddr_in *sin = (struct sockaddr_in*)sa; + sin->sin_port = htons(port); + } else if (sa && sa->sa_family == AF_INET6) { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)sa; + sin6->sin6_port = htons(port); + } else { + /* A numeric port makes no sense here; remove this one + * from the list. */ + struct evutil_addrinfo *victim = *ai; + *ai = victim->ai_next; + victim->ai_next = NULL; + freeaddrinfo(victim); + } + } +} + +static int +apply_socktype_protocol_hack(struct evutil_addrinfo *ai) +{ + struct evutil_addrinfo *ai_new; + for (; ai; ai = ai->ai_next) { + evutil_getaddrinfo_infer_protocols(ai); + if (ai->ai_socktype || ai->ai_protocol) + continue; + ai_new = mm_malloc(sizeof(*ai_new)); + if (!ai_new) + return -1; + memcpy(ai_new, ai, sizeof(*ai_new)); + ai->ai_socktype = SOCK_STREAM; + ai->ai_protocol = IPPROTO_TCP; + ai_new->ai_socktype = SOCK_DGRAM; + ai_new->ai_protocol = IPPROTO_UDP; + + ai_new->ai_next = ai->ai_next; + ai->ai_next = ai_new; + } + return 0; +} +#endif + +int +evutil_getaddrinfo(const char *nodename, const char *servname, + const struct evutil_addrinfo *hints_in, struct evutil_addrinfo **res) +{ +#ifdef USE_NATIVE_GETADDRINFO + struct evutil_addrinfo hints; + int portnum=-1, need_np_hack, err; + + if (hints_in) { + memcpy(&hints, hints_in, sizeof(hints)); + } else { + memset(&hints, 0, sizeof(hints)); + hints.ai_family = PF_UNSPEC; + } + +#ifndef AI_ADDRCONFIG + /* Not every system has AI_ADDRCONFIG, so fake it. */ + if (hints.ai_family == PF_UNSPEC && + (hints.ai_flags & EVUTIL_AI_ADDRCONFIG)) { + evutil_adjust_hints_for_addrconfig_(&hints); + } +#endif + +#ifndef AI_NUMERICSERV + /* Not every system has AI_NUMERICSERV, so fake it. */ + if (hints.ai_flags & EVUTIL_AI_NUMERICSERV) { + if (servname && parse_numeric_servname(servname)<0) + return EVUTIL_EAI_NONAME; + } +#endif + + /* Enough operating systems handle enough common non-resolve + * cases here weirdly enough that we are better off just + * overriding them. For example: + * + * - Windows doesn't like to infer the protocol from the + * socket type, or fill in socket or protocol types much at + * all. It also seems to do its own broken implicit + * always-on version of AI_ADDRCONFIG that keeps it from + * ever resolving even a literal IPv6 address when + * ai_addrtype is PF_UNSPEC. + */ +#ifdef _WIN32 + { + int tmp_port; + err = evutil_getaddrinfo_common_(nodename,servname,&hints, + res, &tmp_port); + if (err == 0 || + err == EVUTIL_EAI_MEMORY || + err == EVUTIL_EAI_NONAME) + return err; + /* If we make it here, the system getaddrinfo can + * have a crack at it. */ + } +#endif + + /* See documentation for need_numeric_port_hack above.*/ + need_np_hack = need_numeric_port_hack() && servname && !hints.ai_socktype + && ((portnum=parse_numeric_servname(servname)) >= 0); + if (need_np_hack) { + if (!nodename) + return evutil_getaddrinfo_common_( + NULL,servname,&hints, res, &portnum); + servname = NULL; + } + + if (need_socktype_protocol_hack()) { + evutil_getaddrinfo_infer_protocols(&hints); + } + + /* Make sure that we didn't actually steal any AI_FLAGS values that + * the system is using. (This is a constant expression, and should ge + * optimized out.) + * + * XXXX Turn this into a compile-time failure rather than a run-time + * failure. + */ + EVUTIL_ASSERT((ALL_NONNATIVE_AI_FLAGS & ALL_NATIVE_AI_FLAGS) == 0); + + /* Clear any flags that only libevent understands. */ + hints.ai_flags &= ~ALL_NONNATIVE_AI_FLAGS; + + err = getaddrinfo(nodename, servname, &hints, res); + if (need_np_hack) + apply_numeric_port_hack(portnum, res); + + if (need_socktype_protocol_hack()) { + if (apply_socktype_protocol_hack(*res) < 0) { + evutil_freeaddrinfo(*res); + *res = NULL; + return EVUTIL_EAI_MEMORY; + } + } + return err; +#else + int port=0, err; + struct hostent *ent = NULL; + struct evutil_addrinfo hints; + + if (hints_in) { + memcpy(&hints, hints_in, sizeof(hints)); + } else { + memset(&hints, 0, sizeof(hints)); + hints.ai_family = PF_UNSPEC; + } + + evutil_adjust_hints_for_addrconfig_(&hints); + + err = evutil_getaddrinfo_common_(nodename, servname, &hints, res, &port); + if (err != EVUTIL_EAI_NEED_RESOLVE) { + /* We either succeeded or failed. No need to continue */ + return err; + } + + err = 0; + /* Use any of the various gethostbyname_r variants as available. */ + { +#ifdef EVENT__HAVE_GETHOSTBYNAME_R_6_ARG + /* This one is what glibc provides. */ + char buf[2048]; + struct hostent hostent; + int r; + r = gethostbyname_r(nodename, &hostent, buf, sizeof(buf), &ent, + &err); +#elif defined(EVENT__HAVE_GETHOSTBYNAME_R_5_ARG) + char buf[2048]; + struct hostent hostent; + ent = gethostbyname_r(nodename, &hostent, buf, sizeof(buf), + &err); +#elif defined(EVENT__HAVE_GETHOSTBYNAME_R_3_ARG) + struct hostent_data data; + struct hostent hostent; + memset(&data, 0, sizeof(data)); + err = gethostbyname_r(nodename, &hostent, &data); + ent = err ? NULL : &hostent; +#else + /* fall back to gethostbyname. */ + /* XXXX This needs a lock everywhere but Windows. */ + ent = gethostbyname(nodename); +#ifdef _WIN32 + err = WSAGetLastError(); +#else + err = h_errno; +#endif +#endif + + /* Now we have either ent or err set. */ + if (!ent) { + /* XXX is this right for windows ? */ + switch (err) { + case TRY_AGAIN: + return EVUTIL_EAI_AGAIN; + case NO_RECOVERY: + default: + return EVUTIL_EAI_FAIL; + case HOST_NOT_FOUND: + return EVUTIL_EAI_NONAME; + case NO_ADDRESS: +#if NO_DATA != NO_ADDRESS + case NO_DATA: +#endif + return EVUTIL_EAI_NODATA; + } + } + + if (ent->h_addrtype != hints.ai_family && + hints.ai_family != PF_UNSPEC) { + /* This wasn't the type we were hoping for. Too bad + * we never had a chance to ask gethostbyname for what + * we wanted. */ + return EVUTIL_EAI_NONAME; + } + + /* Make sure we got _some_ answers. */ + if (ent->h_length == 0) + return EVUTIL_EAI_NODATA; + + /* If we got an address type we don't know how to make a + sockaddr for, give up. */ + if (ent->h_addrtype != PF_INET && ent->h_addrtype != PF_INET6) + return EVUTIL_EAI_FAMILY; + + *res = addrinfo_from_hostent(ent, port, &hints); + if (! *res) + return EVUTIL_EAI_MEMORY; + } + + return 0; +#endif +} + +void +evutil_freeaddrinfo(struct evutil_addrinfo *ai) +{ +#ifdef EVENT__HAVE_GETADDRINFO + if (!(ai->ai_flags & EVUTIL_AI_LIBEVENT_ALLOCATED)) { + freeaddrinfo(ai); + return; + } +#endif + while (ai) { + struct evutil_addrinfo *next = ai->ai_next; + if (ai->ai_canonname) + mm_free(ai->ai_canonname); + mm_free(ai); + ai = next; + } +} + +static evdns_getaddrinfo_fn evdns_getaddrinfo_impl = NULL; +static evdns_getaddrinfo_cancel_fn evdns_getaddrinfo_cancel_impl = NULL; + +void +evutil_set_evdns_getaddrinfo_fn_(evdns_getaddrinfo_fn fn) +{ + if (!evdns_getaddrinfo_impl) + evdns_getaddrinfo_impl = fn; +} +void +evutil_set_evdns_getaddrinfo_cancel_fn_(evdns_getaddrinfo_cancel_fn fn) +{ + if (!evdns_getaddrinfo_cancel_impl) + evdns_getaddrinfo_cancel_impl = fn; +} + +/* Internal helper function: act like evdns_getaddrinfo if dns_base is set; + * otherwise do a blocking resolve and pass the result to the callback in the + * way that evdns_getaddrinfo would. + */ +struct evdns_getaddrinfo_request *evutil_getaddrinfo_async_( + struct evdns_base *dns_base, + const char *nodename, const char *servname, + const struct evutil_addrinfo *hints_in, + void (*cb)(int, struct evutil_addrinfo *, void *), void *arg) +{ + if (dns_base && evdns_getaddrinfo_impl) { + return evdns_getaddrinfo_impl( + dns_base, nodename, servname, hints_in, cb, arg); + } else { + struct evutil_addrinfo *ai=NULL; + int err; + err = evutil_getaddrinfo(nodename, servname, hints_in, &ai); + cb(err, ai, arg); + return NULL; + } +} + +void evutil_getaddrinfo_cancel_async_(struct evdns_getaddrinfo_request *data) +{ + if (evdns_getaddrinfo_cancel_impl && data) { + evdns_getaddrinfo_cancel_impl(data); + } +} + +const char * +evutil_gai_strerror(int err) +{ + /* As a sneaky side-benefit, this case statement will get most + * compilers to tell us if any of the error codes we defined + * conflict with the platform's native error codes. */ + switch (err) { + case EVUTIL_EAI_CANCEL: + return "Request canceled"; + case 0: + return "No error"; + + case EVUTIL_EAI_ADDRFAMILY: + return "address family for nodename not supported"; + case EVUTIL_EAI_AGAIN: + return "temporary failure in name resolution"; + case EVUTIL_EAI_BADFLAGS: + return "invalid value for ai_flags"; + case EVUTIL_EAI_FAIL: + return "non-recoverable failure in name resolution"; + case EVUTIL_EAI_FAMILY: + return "ai_family not supported"; + case EVUTIL_EAI_MEMORY: + return "memory allocation failure"; + case EVUTIL_EAI_NODATA: + return "no address associated with nodename"; + case EVUTIL_EAI_NONAME: + return "nodename nor servname provided, or not known"; + case EVUTIL_EAI_SERVICE: + return "servname not supported for ai_socktype"; + case EVUTIL_EAI_SOCKTYPE: + return "ai_socktype not supported"; + case EVUTIL_EAI_SYSTEM: + return "system error"; + default: +#if defined(USE_NATIVE_GETADDRINFO) && defined(_WIN32) + return gai_strerrorA(err); +#elif defined(USE_NATIVE_GETADDRINFO) + return gai_strerror(err); +#else + return "Unknown error code"; +#endif + } +} + +#ifdef _WIN32 +/* destructively remove a trailing line terminator from s */ +static void +chomp (char *s) +{ + size_t len; + if (s && (len = strlen (s)) > 0 && s[len - 1] == '\n') { + s[--len] = 0; + if (len > 0 && s[len - 1] == '\r') + s[--len] = 0; + } +} + +/* FormatMessage returns allocated strings, but evutil_socket_error_to_string + * is supposed to return a string which is good indefinitely without having + * to be freed. To make this work without leaking memory, we cache the + * string the first time FormatMessage is called on a particular error + * code, and then return the cached string on subsequent calls with the + * same code. The strings aren't freed until libevent_global_shutdown + * (or never). We use a linked list to cache the errors, because we + * only expect there to be a few dozen, and that should be fast enough. + */ + +struct cached_sock_errs_entry { + HT_ENTRY(cached_sock_errs_entry) node; + DWORD code; + char *msg; /* allocated with LocalAlloc; free with LocalFree */ +}; + +static inline unsigned +hash_cached_sock_errs(const struct cached_sock_errs_entry *e) +{ + /* Use Murmur3's 32-bit finalizer as an integer hash function */ + DWORD h = e->code; + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + return h; +} + +static inline int +eq_cached_sock_errs(const struct cached_sock_errs_entry *a, + const struct cached_sock_errs_entry *b) +{ + return a->code == b->code; +} + +#ifndef EVENT__DISABLE_THREAD_SUPPORT +static void *windows_socket_errors_lock_ = NULL; +#endif + +static HT_HEAD(cached_sock_errs_map, cached_sock_errs_entry) + windows_socket_errors = HT_INITIALIZER(); + +HT_PROTOTYPE(cached_sock_errs_map, + cached_sock_errs_entry, + node, + hash_cached_sock_errs, + eq_cached_sock_errs); + +HT_GENERATE(cached_sock_errs_map, + cached_sock_errs_entry, + node, + hash_cached_sock_errs, + eq_cached_sock_errs, + 0.5, + mm_malloc, + mm_realloc, + mm_free); + +/** Equivalent to strerror, but for windows socket errors. */ +const char * +evutil_socket_error_to_string(int errcode) +{ + struct cached_sock_errs_entry *errs, *newerr, find; + char *msg = NULL; + + EVLOCK_LOCK(windows_socket_errors_lock_, 0); + + find.code = errcode; + errs = HT_FIND(cached_sock_errs_map, &windows_socket_errors, &find); + if (errs) { + msg = errs->msg; + goto done; + } + + if (0 != FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS | + FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, errcode, 0, (char *)&msg, 0, NULL)) + chomp (msg); /* because message has trailing newline */ + else { + size_t len = 50; + /* use LocalAlloc because FormatMessage does */ + msg = LocalAlloc(LMEM_FIXED, len); + if (!msg) { + msg = (char *)"LocalAlloc failed during Winsock error"; + goto done; + } + evutil_snprintf(msg, len, "winsock error 0x%08x", errcode); + } + + newerr = (struct cached_sock_errs_entry *) + mm_malloc(sizeof (struct cached_sock_errs_entry)); + + if (!newerr) { + LocalFree(msg); + msg = (char *)"malloc failed during Winsock error"; + goto done; + } + + newerr->code = errcode; + newerr->msg = msg; + HT_INSERT(cached_sock_errs_map, &windows_socket_errors, newerr); + + done: + EVLOCK_UNLOCK(windows_socket_errors_lock_, 0); + + return msg; +} + +#ifndef EVENT__DISABLE_THREAD_SUPPORT +int +evutil_global_setup_locks_(const int enable_locks) +{ + EVTHREAD_SETUP_GLOBAL_LOCK(windows_socket_errors_lock_, 0); + return 0; +} +#endif + +static void +evutil_free_sock_err_globals(void) +{ + struct cached_sock_errs_entry **errs, *tofree; + + for (errs = HT_START(cached_sock_errs_map, &windows_socket_errors) + ; errs; ) { + tofree = *errs; + errs = HT_NEXT_RMV(cached_sock_errs_map, + &windows_socket_errors, + errs); + LocalFree(tofree->msg); + mm_free(tofree); + } + + HT_CLEAR(cached_sock_errs_map, &windows_socket_errors); + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (windows_socket_errors_lock_ != NULL) { + EVTHREAD_FREE_LOCK(windows_socket_errors_lock_, 0); + windows_socket_errors_lock_ = NULL; + } +#endif +} + +#else + +#ifndef EVENT__DISABLE_THREAD_SUPPORT +int +evutil_global_setup_locks_(const int enable_locks) +{ + return 0; +} +#endif + +static void +evutil_free_sock_err_globals(void) +{ +} + +#endif + +int +evutil_snprintf(char *buf, size_t buflen, const char *format, ...) +{ + int r; + va_list ap; + va_start(ap, format); + r = evutil_vsnprintf(buf, buflen, format, ap); + va_end(ap); + return r; +} + +int +evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap) +{ + int r; + if (!buflen) + return 0; +#if defined(_MSC_VER) || defined(_WIN32) + r = _vsnprintf(buf, buflen, format, ap); + if (r < 0) + r = _vscprintf(format, ap); +#elif defined(sgi) + /* Make sure we always use the correct vsnprintf on IRIX */ + extern int _xpg5_vsnprintf(char * __restrict, + __SGI_LIBC_NAMESPACE_QUALIFIER size_t, + const char * __restrict, /* va_list */ char *); + + r = _xpg5_vsnprintf(buf, buflen, format, ap); +#else + r = vsnprintf(buf, buflen, format, ap); +#endif + buf[buflen-1] = '\0'; + return r; +} + +#define USE_INTERNAL_NTOP +#define USE_INTERNAL_PTON + +const char * +evutil_inet_ntop(int af, const void *src, char *dst, size_t len) +{ +#if defined(EVENT__HAVE_INET_NTOP) && !defined(USE_INTERNAL_NTOP) + return inet_ntop(af, src, dst, len); +#else + if (af == AF_INET) { + const struct in_addr *in = src; + const ev_uint32_t a = ntohl(in->s_addr); + int r; + r = evutil_snprintf(dst, len, "%d.%d.%d.%d", + (int)(ev_uint8_t)((a>>24)&0xff), + (int)(ev_uint8_t)((a>>16)&0xff), + (int)(ev_uint8_t)((a>>8 )&0xff), + (int)(ev_uint8_t)((a )&0xff)); + if (r<0||(size_t)r>=len) + return NULL; + else + return dst; +#ifdef AF_INET6 + } else if (af == AF_INET6) { + const struct in6_addr *addr = src; + char buf[64], *cp; + int longestGapLen = 0, longestGapPos = -1, i, + curGapPos = -1, curGapLen = 0; + ev_uint16_t words[8]; + for (i = 0; i < 8; ++i) { + words[i] = + (((ev_uint16_t)addr->s6_addr[2*i])<<8) + addr->s6_addr[2*i+1]; + } + if (words[0] == 0 && words[1] == 0 && words[2] == 0 && words[3] == 0 && + words[4] == 0 && ((words[5] == 0 && words[6] && words[7]) || + (words[5] == 0xffff))) { + /* This is an IPv4 address. */ + if (words[5] == 0) { + evutil_snprintf(buf, sizeof(buf), "::%d.%d.%d.%d", + addr->s6_addr[12], addr->s6_addr[13], + addr->s6_addr[14], addr->s6_addr[15]); + } else { + evutil_snprintf(buf, sizeof(buf), "::%x:%d.%d.%d.%d", words[5], + addr->s6_addr[12], addr->s6_addr[13], + addr->s6_addr[14], addr->s6_addr[15]); + } + if (strlen(buf) > len) + return NULL; + strlcpy(dst, buf, len); + return dst; + } + i = 0; + while (i < 8) { + if (words[i] == 0) { + curGapPos = i++; + curGapLen = 1; + while (i<8 && words[i] == 0) { + ++i; ++curGapLen; + } + if (curGapLen > longestGapLen) { + longestGapPos = curGapPos; + longestGapLen = curGapLen; + } + } else { + ++i; + } + } + if (longestGapLen<=1) + longestGapPos = -1; + + cp = buf; + for (i = 0; i < 8; ++i) { + if (words[i] == 0 && longestGapPos == i) { + if (i == 0) + *cp++ = ':'; + *cp++ = ':'; + while (i < 8 && words[i] == 0) + ++i; + --i; /* to compensate for loop increment. */ + } else { + evutil_snprintf(cp, + sizeof(buf)-(cp-buf), "%x", (unsigned)words[i]); + cp += strlen(cp); + if (i != 7) + *cp++ = ':'; + } + } + *cp = '\0'; + if (strlen(buf) > len) + return NULL; + strlcpy(dst, buf, len); + return dst; +#endif + } else { + return NULL; + } +#endif +} + +static int +evutil_inet_pton_scope(int af, const char *src, void *dst, unsigned *indexp) +{ + int r; + unsigned if_index; + char *check, *cp, *tmp_src; + + *indexp= 0; /* Reasonable default */ + + /* Bail out if not IPv6 */ + if (af != AF_INET6) + return evutil_inet_pton(af, src, dst); + + cp= strchr(src, '%'); + + /* Bail out if no zone ID */ + if (cp == NULL) + return evutil_inet_pton(af, src, dst); + + if_index= if_nametoindex(cp+1); + if (if_index == 0) + { + /* Could be numeric */ + if_index= strtoul(cp+1, &check, 10); + if (check[0] != '\0') + return 0; + } + *indexp= if_index; + tmp_src= strdup(src); + cp= strchr(tmp_src, '%'); + *cp= '\0'; + r= evutil_inet_pton(af, tmp_src, dst); + free(tmp_src); + return r; +} + +int +evutil_inet_pton(int af, const char *src, void *dst) +{ +#if defined(EVENT__HAVE_INET_PTON) && !defined(USE_INTERNAL_PTON) + return inet_pton(af, src, dst); +#else + if (af == AF_INET) { + unsigned a,b,c,d; + char more; + struct in_addr *addr = dst; + if (sscanf(src, "%u.%u.%u.%u%c", &a,&b,&c,&d,&more) != 4) + return 0; + if (a > 255) return 0; + if (b > 255) return 0; + if (c > 255) return 0; + if (d > 255) return 0; + addr->s_addr = htonl((a<<24) | (b<<16) | (c<<8) | d); + return 1; +#ifdef AF_INET6 + } else if (af == AF_INET6) { + struct in6_addr *out = dst; + ev_uint16_t words[8]; + int gapPos = -1, i, setWords=0; + const char *dot = strchr(src, '.'); + const char *eow; /* end of words. */ + if (dot == src) + return 0; + else if (!dot) + eow = src+strlen(src); + else { + unsigned byte1,byte2,byte3,byte4; + char more; + for (eow = dot-1; eow >= src && EVUTIL_ISDIGIT_(*eow); --eow) + ; + ++eow; + + /* We use "scanf" because some platform inet_aton()s are too lax + * about IPv4 addresses of the form "1.2.3" */ + if (sscanf(eow, "%u.%u.%u.%u%c", + &byte1,&byte2,&byte3,&byte4,&more) != 4) + return 0; + + if (byte1 > 255 || + byte2 > 255 || + byte3 > 255 || + byte4 > 255) + return 0; + + words[6] = (byte1<<8) | byte2; + words[7] = (byte3<<8) | byte4; + setWords += 2; + } + + i = 0; + while (src < eow) { + if (i > 7) + return 0; + if (EVUTIL_ISXDIGIT_(*src)) { + char *next; + long r = strtol(src, &next, 16); + if (next > 4+src) + return 0; + if (next == src) + return 0; + if (r<0 || r>65536) + return 0; + + words[i++] = (ev_uint16_t)r; + setWords++; + src = next; + if (*src != ':' && src != eow) + return 0; + ++src; + } else if (*src == ':' && i > 0 && gapPos==-1) { + gapPos = i; + ++src; + } else if (*src == ':' && i == 0 && src[1] == ':' && gapPos==-1) { + gapPos = i; + src += 2; + } else { + return 0; + } + } + + if (setWords > 8 || + (setWords == 8 && gapPos != -1) || + (setWords < 8 && gapPos == -1)) + return 0; + + if (gapPos >= 0) { + int nToMove = setWords - (dot ? 2 : 0) - gapPos; + int gapLen = 8 - setWords; + /* assert(nToMove >= 0); */ + if (nToMove < 0) + return -1; /* should be impossible */ + memmove(&words[gapPos+gapLen], &words[gapPos], + sizeof(ev_uint16_t)*nToMove); + memset(&words[gapPos], 0, sizeof(ev_uint16_t)*gapLen); + } + for (i = 0; i < 8; ++i) { + out->s6_addr[2*i ] = words[i] >> 8; + out->s6_addr[2*i+1] = words[i] & 0xff; + } + + return 1; +#endif + } else { + return -1; + } +#endif +} + +int +evutil_parse_sockaddr_port(const char *ip_as_string, struct sockaddr *out, int *outlen) +{ + int port; + unsigned int if_index; + char buf[128]; + const char *cp, *addr_part, *port_part; + int is_ipv6; + /* recognized formats are: + * [ipv6]:port + * ipv6 + * [ipv6] + * ipv4:port + * ipv4 + */ + + cp = strchr(ip_as_string, ':'); + if (*ip_as_string == '[') { + size_t len; + if (!(cp = strchr(ip_as_string, ']'))) { + return -1; + } + len = ( cp-(ip_as_string + 1) ); + if (len > sizeof(buf)-1) { + return -1; + } + memcpy(buf, ip_as_string+1, len); + buf[len] = '\0'; + addr_part = buf; + if (cp[1] == ':') + port_part = cp+2; + else + port_part = NULL; + is_ipv6 = 1; + } else if (cp && strchr(cp+1, ':')) { + is_ipv6 = 1; + addr_part = ip_as_string; + port_part = NULL; + } else if (cp) { + is_ipv6 = 0; + if (cp - ip_as_string > (int)sizeof(buf)-1) { + return -1; + } + memcpy(buf, ip_as_string, cp-ip_as_string); + buf[cp-ip_as_string] = '\0'; + addr_part = buf; + port_part = cp+1; + } else { + addr_part = ip_as_string; + port_part = NULL; + is_ipv6 = 0; + } + + if (port_part == NULL) { + port = 0; + } else { + port = atoi(port_part); + if (port <= 0 || port > 65535) { + return -1; + } + } + + if (!addr_part) + return -1; /* Should be impossible. */ +#ifdef AF_INET6 + if (is_ipv6) + { + struct sockaddr_in6 sin6; + memset(&sin6, 0, sizeof(sin6)); +#ifdef EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN + sin6.sin6_len = sizeof(sin6); +#endif + sin6.sin6_family = AF_INET6; + sin6.sin6_port = htons(port); + if (1 != evutil_inet_pton_scope(AF_INET6, addr_part, + &sin6.sin6_addr, &if_index)) + { + return -1; + } + if ((int)sizeof(sin6) > *outlen) + return -1; + sin6.sin6_scope_id= if_index; + memset(out, 0, *outlen); + memcpy(out, &sin6, sizeof(sin6)); + *outlen = sizeof(sin6); + return 0; + } + else +#endif + { + struct sockaddr_in sin; + memset(&sin, 0, sizeof(sin)); +#ifdef EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN + sin.sin_len = sizeof(sin); +#endif + sin.sin_family = AF_INET; + sin.sin_port = htons(port); + if (1 != evutil_inet_pton(AF_INET, addr_part, &sin.sin_addr)) + return -1; + if ((int)sizeof(sin) > *outlen) + return -1; + memset(out, 0, *outlen); + memcpy(out, &sin, sizeof(sin)); + *outlen = sizeof(sin); + return 0; + } +} + +const char * +evutil_format_sockaddr_port_(const struct sockaddr *sa, char *out, size_t outlen) +{ + char b[128]; + const char *res=NULL; + int port; + if (sa->sa_family == AF_INET) { + const struct sockaddr_in *sin = (const struct sockaddr_in*)sa; + res = evutil_inet_ntop(AF_INET, &sin->sin_addr,b,sizeof(b)); + port = ntohs(sin->sin_port); + if (res) { + evutil_snprintf(out, outlen, "%s:%d", b, port); + return out; + } + } else if (sa->sa_family == AF_INET6) { + const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6*)sa; + res = evutil_inet_ntop(AF_INET6, &sin6->sin6_addr,b,sizeof(b)); + port = ntohs(sin6->sin6_port); + if (res) { + evutil_snprintf(out, outlen, "[%s]:%d", b, port); + return out; + } + } + + evutil_snprintf(out, outlen, "", + (int)sa->sa_family); + return out; +} + +int +evutil_sockaddr_cmp(const struct sockaddr *sa1, const struct sockaddr *sa2, + int include_port) +{ + int r; + if (0 != (r = (sa1->sa_family - sa2->sa_family))) + return r; + + if (sa1->sa_family == AF_INET) { + const struct sockaddr_in *sin1, *sin2; + sin1 = (const struct sockaddr_in *)sa1; + sin2 = (const struct sockaddr_in *)sa2; + if (sin1->sin_addr.s_addr < sin2->sin_addr.s_addr) + return -1; + else if (sin1->sin_addr.s_addr > sin2->sin_addr.s_addr) + return 1; + else if (include_port && + (r = ((int)sin1->sin_port - (int)sin2->sin_port))) + return r; + else + return 0; + } +#ifdef AF_INET6 + else if (sa1->sa_family == AF_INET6) { + const struct sockaddr_in6 *sin1, *sin2; + sin1 = (const struct sockaddr_in6 *)sa1; + sin2 = (const struct sockaddr_in6 *)sa2; + if ((r = memcmp(sin1->sin6_addr.s6_addr, sin2->sin6_addr.s6_addr, 16))) + return r; + else if (include_port && + (r = ((int)sin1->sin6_port - (int)sin2->sin6_port))) + return r; + else + return 0; + } +#endif + return 1; +} + +/* Tables to implement ctypes-replacement EVUTIL_IS*() functions. Each table + * has 256 bits to look up whether a character is in some set or not. This + * fails on non-ASCII platforms, but so does every other place where we + * take a char and write it onto the network. + **/ +static const ev_uint32_t EVUTIL_ISALPHA_TABLE[8] = + { 0, 0, 0x7fffffe, 0x7fffffe, 0, 0, 0, 0 }; +static const ev_uint32_t EVUTIL_ISALNUM_TABLE[8] = + { 0, 0x3ff0000, 0x7fffffe, 0x7fffffe, 0, 0, 0, 0 }; +static const ev_uint32_t EVUTIL_ISSPACE_TABLE[8] = { 0x3e00, 0x1, 0, 0, 0, 0, 0, 0 }; +static const ev_uint32_t EVUTIL_ISXDIGIT_TABLE[8] = + { 0, 0x3ff0000, 0x7e, 0x7e, 0, 0, 0, 0 }; +static const ev_uint32_t EVUTIL_ISDIGIT_TABLE[8] = { 0, 0x3ff0000, 0, 0, 0, 0, 0, 0 }; +static const ev_uint32_t EVUTIL_ISPRINT_TABLE[8] = + { 0, 0xffffffff, 0xffffffff, 0x7fffffff, 0, 0, 0, 0x0 }; +static const ev_uint32_t EVUTIL_ISUPPER_TABLE[8] = { 0, 0, 0x7fffffe, 0, 0, 0, 0, 0 }; +static const ev_uint32_t EVUTIL_ISLOWER_TABLE[8] = { 0, 0, 0, 0x7fffffe, 0, 0, 0, 0 }; +/* Upper-casing and lowercasing tables to map characters to upper/lowercase + * equivalents. */ +static const unsigned char EVUTIL_TOUPPER_TABLE[256] = { + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, + 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, + 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, + 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, + 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, + 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, + 96,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, + 80,81,82,83,84,85,86,87,88,89,90,123,124,125,126,127, + 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, + 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, + 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, + 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, + 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, + 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, + 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, + 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, +}; +static const unsigned char EVUTIL_TOLOWER_TABLE[256] = { + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, + 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, + 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, + 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, + 64,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, + 112,113,114,115,116,117,118,119,120,121,122,91,92,93,94,95, + 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, + 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, + 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, + 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, + 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, + 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, + 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, + 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, + 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, + 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, +}; + +#define IMPL_CTYPE_FN(name) \ + int EVUTIL_##name##_(char c) { \ + ev_uint8_t u = c; \ + return !!(EVUTIL_##name##_TABLE[(u >> 5) & 7] & (1 << (u & 31))); \ + } +IMPL_CTYPE_FN(ISALPHA) +IMPL_CTYPE_FN(ISALNUM) +IMPL_CTYPE_FN(ISSPACE) +IMPL_CTYPE_FN(ISDIGIT) +IMPL_CTYPE_FN(ISXDIGIT) +IMPL_CTYPE_FN(ISPRINT) +IMPL_CTYPE_FN(ISLOWER) +IMPL_CTYPE_FN(ISUPPER) + +char EVUTIL_TOLOWER_(char c) +{ + return ((char)EVUTIL_TOLOWER_TABLE[(ev_uint8_t)c]); +} +char EVUTIL_TOUPPER_(char c) +{ + return ((char)EVUTIL_TOUPPER_TABLE[(ev_uint8_t)c]); +} +int +evutil_ascii_strcasecmp(const char *s1, const char *s2) +{ + char c1, c2; + while (1) { + c1 = EVUTIL_TOLOWER_(*s1++); + c2 = EVUTIL_TOLOWER_(*s2++); + if (c1 < c2) + return -1; + else if (c1 > c2) + return 1; + else if (c1 == 0) + return 0; + } +} +int evutil_ascii_strncasecmp(const char *s1, const char *s2, size_t n) +{ + char c1, c2; + while (n--) { + c1 = EVUTIL_TOLOWER_(*s1++); + c2 = EVUTIL_TOLOWER_(*s2++); + if (c1 < c2) + return -1; + else if (c1 > c2) + return 1; + else if (c1 == 0) + return 0; + } + return 0; +} + +void +evutil_rtrim_lws_(char *str) +{ + char *cp; + + if (str == NULL) + return; + + if ((cp = strchr(str, '\0')) == NULL || (cp == str)) + return; + + --cp; + + while (*cp == ' ' || *cp == '\t') { + *cp = '\0'; + if (cp == str) + break; + --cp; + } +} + +static int +evutil_issetugid(void) +{ +#ifdef EVENT__HAVE_ISSETUGID + return issetugid(); +#else + +#ifdef EVENT__HAVE_GETEUID + if (getuid() != geteuid()) + return 1; +#endif +#ifdef EVENT__HAVE_GETEGID + if (getgid() != getegid()) + return 1; +#endif + return 0; +#endif +} + +const char * +evutil_getenv_(const char *varname) +{ + if (evutil_issetugid()) + return NULL; + + return getenv(varname); +} + +ev_uint32_t +evutil_weakrand_seed_(struct evutil_weakrand_state *state, ev_uint32_t seed) +{ + if (seed == 0) { + struct timeval tv; + evutil_gettimeofday(&tv, NULL); + seed = (ev_uint32_t)tv.tv_sec + (ev_uint32_t)tv.tv_usec; +#ifdef _WIN32 + seed += (ev_uint32_t) _getpid(); +#else + seed += (ev_uint32_t) getpid(); +#endif + } + state->seed = seed; + return seed; +} + +ev_int32_t +evutil_weakrand_(struct evutil_weakrand_state *state) +{ + /* This RNG implementation is a linear congruential generator, with + * modulus 2^31, multiplier 1103515245, and addend 12345. It's also + * used by OpenBSD, and by Glibc's TYPE_0 RNG. + * + * The linear congruential generator is not an industrial-strength + * RNG! It's fast, but it can have higher-order patterns. Notably, + * the low bits tend to have periodicity. + */ + state->seed = ((state->seed) * 1103515245 + 12345) & 0x7fffffff; + return (ev_int32_t)(state->seed); +} + +ev_int32_t +evutil_weakrand_range_(struct evutil_weakrand_state *state, ev_int32_t top) +{ + ev_int32_t divisor, result; + + /* We can't just do weakrand() % top, since the low bits of the LCG + * are less random than the high ones. (Specifically, since the LCG + * modulus is 2^N, every 2^m for m= top); + return result; +} + +/** + * Volatile pointer to memset: we use this to keep the compiler from + * eliminating our call to memset. + */ +void * (*volatile evutil_memset_volatile_)(void *, int, size_t) = memset; + +void +evutil_memclear_(void *mem, size_t len) +{ + evutil_memset_volatile_(mem, 0, len); +} + +int +evutil_sockaddr_is_loopback_(const struct sockaddr *addr) +{ + static const char LOOPBACK_S6[16] = + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1"; + if (addr->sa_family == AF_INET) { + struct sockaddr_in *sin = (struct sockaddr_in *)addr; + return (ntohl(sin->sin_addr.s_addr) & 0xff000000) == 0x7f000000; + } else if (addr->sa_family == AF_INET6) { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; + return !memcmp(sin6->sin6_addr.s6_addr, LOOPBACK_S6, 16); + } + return 0; +} + +int +evutil_hex_char_to_int_(char c) +{ + switch(c) + { + case '0': return 0; + case '1': return 1; + case '2': return 2; + case '3': return 3; + case '4': return 4; + case '5': return 5; + case '6': return 6; + case '7': return 7; + case '8': return 8; + case '9': return 9; + case 'A': case 'a': return 10; + case 'B': case 'b': return 11; + case 'C': case 'c': return 12; + case 'D': case 'd': return 13; + case 'E': case 'e': return 14; + case 'F': case 'f': return 15; + } + return -1; +} + +#ifdef _WIN32 +HMODULE +evutil_load_windows_system_library_(const TCHAR *library_name) +{ + TCHAR path[MAX_PATH]; + unsigned n; + n = GetSystemDirectory(path, MAX_PATH); + if (n == 0 || n + _tcslen(library_name) + 2 >= MAX_PATH) + return 0; + _tcscat(path, TEXT("\\")); + _tcscat(path, library_name); + return LoadLibrary(path); +} +#endif + +/* Internal wrapper around 'socket' to provide Linux-style support for + * syscall-saving methods where available. + * + * In addition to regular socket behavior, you can use a bitwise or to set the + * flags EVUTIL_SOCK_NONBLOCK and EVUTIL_SOCK_CLOEXEC in the 'type' argument, + * to make the socket nonblocking or close-on-exec with as few syscalls as + * possible. + */ +evutil_socket_t +evutil_socket_(int domain, int type, int protocol) +{ + evutil_socket_t r; +#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC) + r = socket(domain, type, protocol); + if (r >= 0) + return r; + else if ((type & (SOCK_NONBLOCK|SOCK_CLOEXEC)) == 0) + return -1; +#endif +#define SOCKET_TYPE_MASK (~(EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC)) + r = socket(domain, type & SOCKET_TYPE_MASK, protocol); + if (r < 0) + return -1; + if (type & EVUTIL_SOCK_NONBLOCK) { + if (evutil_fast_socket_nonblocking(r) < 0) { + evutil_closesocket(r); + return -1; + } + } + if (type & EVUTIL_SOCK_CLOEXEC) { + if (evutil_fast_socket_closeonexec(r) < 0) { + evutil_closesocket(r); + return -1; + } + } + return r; +} + +/* Internal wrapper around 'accept' or 'accept4' to provide Linux-style + * support for syscall-saving methods where available. + * + * In addition to regular accept behavior, you can set one or more of flags + * EVUTIL_SOCK_NONBLOCK and EVUTIL_SOCK_CLOEXEC in the 'flags' argument, to + * make the socket nonblocking or close-on-exec with as few syscalls as + * possible. + */ +evutil_socket_t +evutil_accept4_(evutil_socket_t sockfd, struct sockaddr *addr, + ev_socklen_t *addrlen, int flags) +{ + evutil_socket_t result; +#if defined(EVENT__HAVE_ACCEPT4) && defined(SOCK_CLOEXEC) && defined(SOCK_NONBLOCK) + result = accept4(sockfd, addr, addrlen, flags); + if (result >= 0 || (errno != EINVAL && errno != ENOSYS)) { + /* A nonnegative result means that we succeeded, so return. + * Failing with EINVAL means that an option wasn't supported, + * and failing with ENOSYS means that the syscall wasn't + * there: in those cases we want to fall back. Otherwise, we + * got a real error, and we should return. */ + return result; + } +#endif + result = accept(sockfd, addr, addrlen); + if (result < 0) + return result; + + if (flags & EVUTIL_SOCK_CLOEXEC) { + if (evutil_fast_socket_closeonexec(result) < 0) { + evutil_closesocket(result); + return -1; + } + } + if (flags & EVUTIL_SOCK_NONBLOCK) { + if (evutil_fast_socket_nonblocking(result) < 0) { + evutil_closesocket(result); + return -1; + } + } + return result; +} + +/* Internal function: Set fd[0] and fd[1] to a pair of fds such that writes on + * fd[1] get read from fd[0]. Make both fds nonblocking and close-on-exec. + * Return 0 on success, -1 on failure. + */ +int +evutil_make_internal_pipe_(evutil_socket_t fd[2]) +{ + /* + Making the second socket nonblocking is a bit subtle, given that we + ignore any EAGAIN returns when writing to it, and you don't usally + do that for a nonblocking socket. But if the kernel gives us EAGAIN, + then there's no need to add any more data to the buffer, since + the main thread is already either about to wake up and drain it, + or woken up and in the process of draining it. + */ + +#if defined(EVENT__HAVE_PIPE2) + if (pipe2(fd, O_NONBLOCK|O_CLOEXEC) == 0) + return 0; +#endif +#if defined(EVENT__HAVE_PIPE) + if (pipe(fd) == 0) { + if (evutil_fast_socket_nonblocking(fd[0]) < 0 || + evutil_fast_socket_nonblocking(fd[1]) < 0 || + evutil_fast_socket_closeonexec(fd[0]) < 0 || + evutil_fast_socket_closeonexec(fd[1]) < 0) { + close(fd[0]); + close(fd[1]); + fd[0] = fd[1] = -1; + return -1; + } + return 0; + } else { + event_warn("%s: pipe", __func__); + } +#endif + +#ifdef _WIN32 +#define LOCAL_SOCKETPAIR_AF AF_INET +#else +#define LOCAL_SOCKETPAIR_AF AF_UNIX +#endif + if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, fd) == 0) { + if (evutil_fast_socket_nonblocking(fd[0]) < 0 || + evutil_fast_socket_nonblocking(fd[1]) < 0 || + evutil_fast_socket_closeonexec(fd[0]) < 0 || + evutil_fast_socket_closeonexec(fd[1]) < 0) { + evutil_closesocket(fd[0]); + evutil_closesocket(fd[1]); + fd[0] = fd[1] = -1; + return -1; + } + return 0; + } + fd[0] = fd[1] = -1; + return -1; +} + +/* Wrapper around eventfd on systems that provide it. Unlike the system + * eventfd, it always supports EVUTIL_EFD_CLOEXEC and EVUTIL_EFD_NONBLOCK as + * flags. Returns -1 on error or if eventfd is not supported. + */ +evutil_socket_t +evutil_eventfd_(unsigned initval, int flags) +{ +#if defined(EVENT__HAVE_EVENTFD) && defined(EVENT__HAVE_SYS_EVENTFD_H) + int r; +#if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) + r = eventfd(initval, flags); + if (r >= 0 || flags == 0) + return r; +#endif + r = eventfd(initval, 0); + if (r < 0) + return r; + if (flags & EVUTIL_EFD_CLOEXEC) { + if (evutil_fast_socket_closeonexec(r) < 0) { + evutil_closesocket(r); + return -1; + } + } + if (flags & EVUTIL_EFD_NONBLOCK) { + if (evutil_fast_socket_nonblocking(r) < 0) { + evutil_closesocket(r); + return -1; + } + } + return r; +#else + return -1; +#endif +} + +void +evutil_free_globals_(void) +{ + evutil_free_secure_rng_globals_(); + evutil_free_sock_err_globals(); +} diff --git a/probe-busybox/libevent-2.1.11-stable/evutil_rand.c b/probe-busybox/libevent-2.1.11-stable/evutil_rand.c new file mode 100644 index 00000000..8e9afdaa --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evutil_rand.c @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* This file has our secure PRNG code. On platforms that have arc4random(), + * we just use that. Otherwise, we include arc4random.c as a bunch of static + * functions, and wrap it lightly. We don't expose the arc4random*() APIs + * because A) they aren't in our namespace, and B) it's not nice to name your + * APIs after their implementations. We keep them in a separate file + * so that other people can rip it out and use it for whatever. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include + +#include "util-internal.h" +#include "evthread-internal.h" + +#ifdef EVENT__HAVE_ARC4RANDOM +#include +#include +int +evutil_secure_rng_set_urandom_device_file(char *fname) +{ + (void) fname; + return -1; +} +int +evutil_secure_rng_init(void) +{ + /* call arc4random() now to force it to self-initialize */ + (void) arc4random(); + return 0; +} +#ifndef EVENT__DISABLE_THREAD_SUPPORT +int +evutil_secure_rng_global_setup_locks_(const int enable_locks) +{ + return 0; +} +#endif +static void +evutil_free_secure_rng_globals_locks(void) +{ +} + +static void +ev_arc4random_buf(void *buf, size_t n) +{ +#if defined(EVENT__HAVE_ARC4RANDOM_BUF) && !defined(__APPLE__) + arc4random_buf(buf, n); + return; +#else + unsigned char *b = buf; + +#if defined(EVENT__HAVE_ARC4RANDOM_BUF) + /* OSX 10.7 introducd arc4random_buf, so if you build your program + * there, you'll get surprised when older versions of OSX fail to run. + * To solve this, we can check whether the function pointer is set, + * and fall back otherwise. (OSX does this using some linker + * trickery.) + */ + { + void (*tptr)(void *,size_t) = + (void (*)(void*,size_t))arc4random_buf; + if (tptr != NULL) { + arc4random_buf(buf, n); + return; + } + } +#endif + /* Make sure that we start out with b at a 4-byte alignment; plenty + * of CPUs care about this for 32-bit access. */ + if (n >= 4 && ((ev_uintptr_t)b) & 3) { + ev_uint32_t u = arc4random(); + int n_bytes = 4 - (((ev_uintptr_t)b) & 3); + memcpy(b, &u, n_bytes); + b += n_bytes; + n -= n_bytes; + } + while (n >= 4) { + *(ev_uint32_t*)b = arc4random(); + b += 4; + n -= 4; + } + if (n) { + ev_uint32_t u = arc4random(); + memcpy(b, &u, n); + } +#endif +} + +#else /* !EVENT__HAVE_ARC4RANDOM { */ + +#ifdef EVENT__ssize_t +#define ssize_t EVENT__ssize_t +#endif +#define ARC4RANDOM_EXPORT static +#define ARC4_LOCK_() EVLOCK_LOCK(arc4rand_lock, 0) +#define ARC4_UNLOCK_() EVLOCK_UNLOCK(arc4rand_lock, 0) +#ifndef EVENT__DISABLE_THREAD_SUPPORT +static void *arc4rand_lock; +#endif + +#define ARC4RANDOM_UINT32 ev_uint32_t +#define ARC4RANDOM_NOSTIR +#define ARC4RANDOM_NORANDOM +#define ARC4RANDOM_NOUNIFORM + +#include "./arc4random.c" + +#ifndef EVENT__DISABLE_THREAD_SUPPORT +int +evutil_secure_rng_global_setup_locks_(const int enable_locks) +{ + EVTHREAD_SETUP_GLOBAL_LOCK(arc4rand_lock, 0); + return 0; +} +#endif + +static void +evutil_free_secure_rng_globals_locks(void) +{ +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (arc4rand_lock != NULL) { + EVTHREAD_FREE_LOCK(arc4rand_lock, 0); + arc4rand_lock = NULL; + } +#endif + return; +} + +int +evutil_secure_rng_set_urandom_device_file(char *fname) +{ +#ifdef TRY_SEED_URANDOM + ARC4_LOCK_(); + arc4random_urandom_filename = fname; + ARC4_UNLOCK_(); +#endif + return 0; +} + +int +evutil_secure_rng_init(void) +{ + int val; + + ARC4_LOCK_(); + val = (!arc4_stir()) ? 0 : -1; + ARC4_UNLOCK_(); + return val; +} + +static void +ev_arc4random_buf(void *buf, size_t n) +{ + arc4random_buf(buf, n); +} + +#endif /* } !EVENT__HAVE_ARC4RANDOM */ + +void +evutil_secure_rng_get_bytes(void *buf, size_t n) +{ + ev_arc4random_buf(buf, n); +} + +#if !defined(EVENT__HAVE_ARC4RANDOM) || defined(EVENT__HAVE_ARC4RANDOM_ADDRANDOM) +void +evutil_secure_rng_add_bytes(const char *buf, size_t n) +{ + arc4random_addrandom((unsigned char*)buf, + n>(size_t)INT_MAX ? INT_MAX : (int)n); +} +#endif + +void +evutil_free_secure_rng_globals_(void) +{ + evutil_free_secure_rng_globals_locks(); +} diff --git a/probe-busybox/libevent-2.1.11-stable/evutil_time.c b/probe-busybox/libevent-2.1.11-stable/evutil_time.c new file mode 100644 index 00000000..c3a23589 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/evutil_time.c @@ -0,0 +1,579 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef _WIN32 +#include +#define WIN32_LEAN_AND_MEAN +#include +#undef WIN32_LEAN_AND_MEAN +#endif + +#include +#ifdef EVENT__HAVE_STDLIB_H +#include +#endif +#include +#include +#ifndef EVENT__HAVE_GETTIMEOFDAY +#include +#endif +#if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT__HAVE_USLEEP) && \ + !defined(_WIN32) +#include +#endif +#include +#include +#include + +/** evutil_usleep_() */ +#if defined(_WIN32) +#elif defined(EVENT__HAVE_NANOSLEEP) +#elif defined(EVENT__HAVE_USLEEP) +#include +#endif + +#include "event2/util.h" +#include "util-internal.h" +#include "log-internal.h" +#include "mm-internal.h" + +#ifndef EVENT__HAVE_GETTIMEOFDAY +/* No gettimeofday; this must be windows. */ +int +evutil_gettimeofday(struct timeval *tv, struct timezone *tz) +{ +#ifdef _MSC_VER +#define U64_LITERAL(n) n##ui64 +#else +#define U64_LITERAL(n) n##llu +#endif + + /* Conversion logic taken from Tor, which in turn took it + * from Perl. GetSystemTimeAsFileTime returns its value as + * an unaligned (!) 64-bit value containing the number of + * 100-nanosecond intervals since 1 January 1601 UTC. */ +#define EPOCH_BIAS U64_LITERAL(116444736000000000) +#define UNITS_PER_SEC U64_LITERAL(10000000) +#define USEC_PER_SEC U64_LITERAL(1000000) +#define UNITS_PER_USEC U64_LITERAL(10) + union { + FILETIME ft_ft; + ev_uint64_t ft_64; + } ft; + + if (tv == NULL) + return -1; + + GetSystemTimeAsFileTime(&ft.ft_ft); + + if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) { + /* Time before the unix epoch. */ + return -1; + } + ft.ft_64 -= EPOCH_BIAS; + tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC); + tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC); + return 0; +} +#endif + +#define MAX_SECONDS_IN_MSEC_LONG \ + (((LONG_MAX) - 999) / 1000) + +long +evutil_tv_to_msec_(const struct timeval *tv) +{ + if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG) + return -1; + + return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000); +} + +/* + Replacement for usleep on platforms that don't have one. Not guaranteed to + be any more finegrained than 1 msec. + */ +void +evutil_usleep_(const struct timeval *tv) +{ + if (!tv) + return; +#if defined(_WIN32) + { + long msec = evutil_tv_to_msec_(tv); + Sleep((DWORD)msec); + } +#elif defined(EVENT__HAVE_NANOSLEEP) + { + struct timespec ts; + ts.tv_sec = tv->tv_sec; + ts.tv_nsec = tv->tv_usec*1000; + nanosleep(&ts, NULL); + } +#elif defined(EVENT__HAVE_USLEEP) + /* Some systems don't like to usleep more than 999999 usec */ + sleep(tv->tv_sec); + usleep(tv->tv_usec); +#else + { + struct timeval tv2 = *tv; + select(0, NULL, NULL, NULL, &tv2); + } +#endif +} + +int +evutil_date_rfc1123(char *date, const size_t datelen, const struct tm *tm) +{ + static const char *DAYS[] = + { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; + static const char *MONTHS[] = + { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; + + time_t t = time(NULL); + +#ifndef _WIN32 + struct tm sys; +#endif + + /* If `tm` is null, set system's current time. */ + if (tm == NULL) { +#ifdef _WIN32 + /** TODO: detect _gmtime64()/_gmtime64_s() */ + tm = gmtime(&t); +#else + gmtime_r(&t, &sys); + tm = &sys; +#endif + } + + return evutil_snprintf( + date, datelen, "%s, %02d %s %4d %02d:%02d:%02d GMT", + DAYS[tm->tm_wday], tm->tm_mday, MONTHS[tm->tm_mon], + 1900 + tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec); +} + +/* + This function assumes it's called repeatedly with a + not-actually-so-monotonic time source whose outputs are in 'tv'. It + implements a trivial ratcheting mechanism so that the values never go + backwards. + */ +static void +adjust_monotonic_time(struct evutil_monotonic_timer *base, + struct timeval *tv) +{ + evutil_timeradd(tv, &base->adjust_monotonic_clock, tv); + + if (evutil_timercmp(tv, &base->last_time, <)) { + /* Guess it wasn't monotonic after all. */ + struct timeval adjust; + evutil_timersub(&base->last_time, tv, &adjust); + evutil_timeradd(&adjust, &base->adjust_monotonic_clock, + &base->adjust_monotonic_clock); + *tv = base->last_time; + } + base->last_time = *tv; +} + +/* + Allocate a new struct evutil_monotonic_timer + */ +struct evutil_monotonic_timer * +evutil_monotonic_timer_new(void) +{ + struct evutil_monotonic_timer *p = NULL; + + p = mm_malloc(sizeof(*p)); + if (!p) goto done; + + memset(p, 0, sizeof(*p)); + + done: + return p; +} + +/* + Free a struct evutil_monotonic_timer + */ +void +evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer) +{ + if (timer) { + mm_free(timer); + } +} + +/* + Set up a struct evutil_monotonic_timer for initial use + */ +int +evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer, + int flags) +{ + return evutil_configure_monotonic_time_(timer, flags); +} + +/* + Query the current monotonic time + */ +int +evutil_gettime_monotonic(struct evutil_monotonic_timer *timer, + struct timeval *tp) +{ + return evutil_gettime_monotonic_(timer, tp); +} + + +#if defined(HAVE_POSIX_MONOTONIC) +/* ===== + The POSIX clock_gettime() interface provides a few ways to get at a + monotonic clock. CLOCK_MONOTONIC is most widely supported. Linux also + provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec. + + On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic. + Platforms don't agree about whether it should jump on a sleep/resume. + */ + +int +evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, + int flags) +{ + /* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris. You need to + * check for it at runtime, because some older kernel versions won't + * have it working. */ +#ifdef CLOCK_MONOTONIC_COARSE + const int precise = flags & EV_MONOT_PRECISE; +#endif + const int fallback = flags & EV_MONOT_FALLBACK; + struct timespec ts; + +#ifdef CLOCK_MONOTONIC_COARSE + if (CLOCK_MONOTONIC_COARSE < 0) { + /* Technically speaking, nothing keeps CLOCK_* from being + * negative (as far as I know). This check and the one below + * make sure that it's safe for us to use -1 as an "unset" + * value. */ + event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0"); + } + if (! precise && ! fallback) { + if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) { + base->monotonic_clock = CLOCK_MONOTONIC_COARSE; + return 0; + } + } +#endif + if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { + base->monotonic_clock = CLOCK_MONOTONIC; + return 0; + } + + if (CLOCK_MONOTONIC < 0) { + event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0"); + } + + base->monotonic_clock = -1; + return 0; +} + +int +evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, + struct timeval *tp) +{ + struct timespec ts; + + if (base->monotonic_clock < 0) { + if (evutil_gettimeofday(tp, NULL) < 0) + return -1; + adjust_monotonic_time(base, tp); + return 0; + } + + if (clock_gettime(base->monotonic_clock, &ts) == -1) + return -1; + tp->tv_sec = ts.tv_sec; + tp->tv_usec = ts.tv_nsec / 1000; + + return 0; +} +#endif + +#if defined(HAVE_MACH_MONOTONIC) +/* ====== + Apple is a little late to the POSIX party. And why not? Instead of + clock_gettime(), they provide mach_absolute_time(). Its units are not + fixed; we need to use mach_timebase_info() to get the right functions to + convert its units into nanoseconds. + + To all appearances, mach_absolute_time() seems to be honest-to-goodness + monotonic. Whether it stops during sleep or not is unspecified in + principle, and dependent on CPU architecture in practice. + */ + +int +evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, + int flags) +{ + const int fallback = flags & EV_MONOT_FALLBACK; + struct mach_timebase_info mi; + memset(base, 0, sizeof(*base)); + /* OSX has mach_absolute_time() */ + if (!fallback && + mach_timebase_info(&mi) == 0 && + mach_absolute_time() != 0) { + /* mach_timebase_info tells us how to convert + * mach_absolute_time() into nanoseconds, but we + * want to use microseconds instead. */ + mi.denom *= 1000; + memcpy(&base->mach_timebase_units, &mi, sizeof(mi)); + } else { + base->mach_timebase_units.numer = 0; + } + return 0; +} + +int +evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, + struct timeval *tp) +{ + ev_uint64_t abstime, usec; + if (base->mach_timebase_units.numer == 0) { + if (evutil_gettimeofday(tp, NULL) < 0) + return -1; + adjust_monotonic_time(base, tp); + return 0; + } + + abstime = mach_absolute_time(); + usec = (abstime * base->mach_timebase_units.numer) + / (base->mach_timebase_units.denom); + tp->tv_sec = usec / 1000000; + tp->tv_usec = usec % 1000000; + + return 0; +} +#endif + +#if defined(HAVE_WIN32_MONOTONIC) +/* ===== + Turn we now to Windows. Want monontonic time on Windows? + + Windows has QueryPerformanceCounter(), which gives time most high- + resolution time. It's a pity it's not so monotonic in practice; it's + also got some fun bugs, especially: with older Windowses, under + virtualizations, with funny hardware, on multiprocessor systems, and so + on. PEP418 [1] has a nice roundup of the issues here. + + There's GetTickCount64() on Vista and later, which gives a number of 1-msec + ticks since startup. The accuracy here might be as bad as 10-20 msec, I + hear. There's an undocumented function (NtSetTimerResolution) that + allegedly increases the accuracy. Good luck! + + There's also GetTickCount(), which is only 32 bits, but seems to be + supported on pre-Vista versions of Windows. Apparently, you can coax + another 14 bits out of it, giving you 2231 years before rollover. + + The less said about timeGetTime() the better. + + "We don't care. We don't have to. We're the Phone Company." + -- Lily Tomlin, SNL + + Our strategy, if precise timers are turned off, is to just use the best + GetTickCount equivalent available. If we've been asked for precise timing, + then we mostly[2] assume that GetTickCount is monotonic, and correct + GetPerformanceCounter to approximate it. + + [1] http://www.python.org/dev/peps/pep-0418 + [2] Of course, we feed the Windows stuff into adjust_monotonic_time() + anyway, just in case it isn't. + + */ +/* + Parts of our logic in the win32 timer code here are closely based on + BitTorrent's libUTP library. That code is subject to the following + license: + + Copyright (c) 2010 BitTorrent, Inc. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +static ev_uint64_t +evutil_GetTickCount_(struct evutil_monotonic_timer *base) +{ + if (base->GetTickCount64_fn) { + /* Let's just use GetTickCount64 if we can. */ + return base->GetTickCount64_fn(); + } else if (base->GetTickCount_fn) { + /* Greg Hazel assures me that this works, that BitTorrent has + * done it for years, and this it won't turn around and + * bite us. He says they found it on some game programmers' + * forum some time around 2007. + */ + ev_uint64_t v = base->GetTickCount_fn(); + return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000); + } else { + /* Here's the fallback implementation. We have to use + * GetTickCount() with its given signature, so we only get + * 32 bits worth of milliseconds, which will roll ove every + * 49 days or so. */ + DWORD ticks = GetTickCount(); + if (ticks < base->last_tick_count) { + base->adjust_tick_count += ((ev_uint64_t)1) << 32; + } + base->last_tick_count = ticks; + return ticks + base->adjust_tick_count; + } +} + +int +evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, + int flags) +{ + const int precise = flags & EV_MONOT_PRECISE; + const int fallback = flags & EV_MONOT_FALLBACK; + HANDLE h; + memset(base, 0, sizeof(*base)); + + h = evutil_load_windows_system_library_(TEXT("kernel32.dll")); + if (h != NULL && !fallback) { + base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64"); + base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount"); + } + + base->first_tick = base->last_tick_count = evutil_GetTickCount_(base); + if (precise && !fallback) { + LARGE_INTEGER freq; + if (QueryPerformanceFrequency(&freq)) { + LARGE_INTEGER counter; + QueryPerformanceCounter(&counter); + base->first_counter = counter.QuadPart; + base->usec_per_count = 1.0e6 / freq.QuadPart; + base->use_performance_counter = 1; + } + } + + return 0; +} + +static inline ev_int64_t +abs64(ev_int64_t i) +{ + return i < 0 ? -i : i; +} + + +int +evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, + struct timeval *tp) +{ + ev_uint64_t ticks = evutil_GetTickCount_(base); + if (base->use_performance_counter) { + /* Here's a trick we took from BitTorrent's libutp, at Greg + * Hazel's recommendation. We use QueryPerformanceCounter for + * our high-resolution timer, but use GetTickCount*() to keep + * it sane, and adjust_monotonic_time() to keep it monotonic. + */ + LARGE_INTEGER counter; + ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed; + QueryPerformanceCounter(&counter); + counter_elapsed = (ev_int64_t) + (counter.QuadPart - base->first_counter); + ticks_elapsed = ticks - base->first_tick; + /* TODO: This may upset VC6. If you need this to work with + * VC6, please supply an appropriate patch. */ + counter_usec_elapsed = (ev_int64_t) + (counter_elapsed * base->usec_per_count); + + if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) { + /* It appears that the QueryPerformanceCounter() + * result is more than 1 second away from + * GetTickCount() result. Let's adjust it to be as + * accurate as we can; adjust_monotnonic_time() below + * will keep it monotonic. */ + counter_usec_elapsed = ticks_elapsed * 1000; + base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count); + } + tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000); + tp->tv_usec = counter_usec_elapsed % 1000000; + + } else { + /* We're just using GetTickCount(). */ + tp->tv_sec = (time_t) (ticks / 1000); + tp->tv_usec = (ticks % 1000) * 1000; + } + adjust_monotonic_time(base, tp); + + return 0; +} +#endif + +#if defined(HAVE_FALLBACK_MONOTONIC) +/* ===== + And if none of the other options work, let's just use gettimeofday(), and + ratchet it forward so that it acts like a monotonic timer, whether it + wants to or not. + */ + +int +evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, + int precise) +{ + memset(base, 0, sizeof(*base)); + return 0; +} + +int +evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, + struct timeval *tp) +{ + if (evutil_gettimeofday(tp, NULL) < 0) + return -1; + adjust_monotonic_time(base, tp); + return 0; + +} +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/ht-internal.h b/probe-busybox/libevent-2.1.11-stable/ht-internal.h new file mode 100644 index 00000000..50375bba --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/ht-internal.h @@ -0,0 +1,487 @@ +/* Copyright 2002 Christopher Clark */ +/* Copyright 2005-2012 Nick Mathewson */ +/* Copyright 2009-2012 Niels Provos and Nick Mathewson */ +/* See license at end. */ + +/* Based on ideas by Christopher Clark and interfaces from Niels Provos. */ + +#ifndef HT_INTERNAL_H_INCLUDED_ +#define HT_INTERNAL_H_INCLUDED_ + +#define HT_HEAD(name, type) \ + struct name { \ + /* The hash table itself. */ \ + struct type **hth_table; \ + /* How long is the hash table? */ \ + unsigned hth_table_length; \ + /* How many elements does the table contain? */ \ + unsigned hth_n_entries; \ + /* How many elements will we allow in the table before resizing it? */ \ + unsigned hth_load_limit; \ + /* Position of hth_table_length in the primes table. */ \ + int hth_prime_idx; \ + } + +#define HT_INITIALIZER() \ + { NULL, 0, 0, 0, -1 } + +#ifdef HT_NO_CACHE_HASH_VALUES +#define HT_ENTRY(type) \ + struct { \ + struct type *hte_next; \ + } +#else +#define HT_ENTRY(type) \ + struct { \ + struct type *hte_next; \ + unsigned hte_hash; \ + } +#endif + +#define HT_EMPTY(head) \ + ((head)->hth_n_entries == 0) + +/* How many elements in 'head'? */ +#define HT_SIZE(head) \ + ((head)->hth_n_entries) + +/* Return memory usage for a hashtable (not counting the entries themselves) */ +#define HT_MEM_USAGE(head) \ + (sizeof(*head) + (head)->hth_table_length * sizeof(void*)) + +#define HT_FIND(name, head, elm) name##_HT_FIND((head), (elm)) +#define HT_INSERT(name, head, elm) name##_HT_INSERT((head), (elm)) +#define HT_REPLACE(name, head, elm) name##_HT_REPLACE((head), (elm)) +#define HT_REMOVE(name, head, elm) name##_HT_REMOVE((head), (elm)) +#define HT_START(name, head) name##_HT_START(head) +#define HT_NEXT(name, head, elm) name##_HT_NEXT((head), (elm)) +#define HT_NEXT_RMV(name, head, elm) name##_HT_NEXT_RMV((head), (elm)) +#define HT_CLEAR(name, head) name##_HT_CLEAR(head) +#define HT_INIT(name, head) name##_HT_INIT(head) +/* Helper: */ +static inline unsigned +ht_improve_hash_(unsigned h) +{ + /* Aim to protect against poor hash functions by adding logic here + * - logic taken from java 1.4 hashtable source */ + h += ~(h << 9); + h ^= ((h >> 14) | (h << 18)); /* >>> */ + h += (h << 4); + h ^= ((h >> 10) | (h << 22)); /* >>> */ + return h; +} + +#if 0 +/** Basic string hash function, from Java standard String.hashCode(). */ +static inline unsigned +ht_string_hash_(const char *s) +{ + unsigned h = 0; + int m = 1; + while (*s) { + h += ((signed char)*s++)*m; + m = (m<<5)-1; /* m *= 31 */ + } + return h; +} +#endif + +/** Basic string hash function, from Python's str.__hash__() */ +static inline unsigned +ht_string_hash_(const char *s) +{ + unsigned h; + const unsigned char *cp = (const unsigned char *)s; + h = *cp << 7; + while (*cp) { + h = (1000003*h) ^ *cp++; + } + /* This conversion truncates the length of the string, but that's ok. */ + h ^= (unsigned)(cp-(const unsigned char*)s); + return h; +} + +#ifndef HT_NO_CACHE_HASH_VALUES +#define HT_SET_HASH_(elm, field, hashfn) \ + do { (elm)->field.hte_hash = hashfn(elm); } while (0) +#define HT_SET_HASHVAL_(elm, field, val) \ + do { (elm)->field.hte_hash = (val); } while (0) +#define HT_ELT_HASH_(elm, field, hashfn) \ + ((elm)->field.hte_hash) +#else +#define HT_SET_HASH_(elm, field, hashfn) \ + ((void)0) +#define HT_ELT_HASH_(elm, field, hashfn) \ + (hashfn(elm)) +#define HT_SET_HASHVAL_(elm, field, val) \ + ((void)0) +#endif + +/* Helper: alias for the bucket containing 'elm'. */ +#define HT_BUCKET_(head, field, elm, hashfn) \ + ((head)->hth_table[HT_ELT_HASH_(elm,field,hashfn) % head->hth_table_length]) + +#define HT_FOREACH(x, name, head) \ + for ((x) = HT_START(name, head); \ + (x) != NULL; \ + (x) = HT_NEXT(name, head, x)) + +#define HT_PROTOTYPE(name, type, field, hashfn, eqfn) \ + int name##_HT_GROW(struct name *ht, unsigned min_capacity); \ + void name##_HT_CLEAR(struct name *ht); \ + int name##_HT_REP_IS_BAD_(const struct name *ht); \ + static inline void \ + name##_HT_INIT(struct name *head) { \ + head->hth_table_length = 0; \ + head->hth_table = NULL; \ + head->hth_n_entries = 0; \ + head->hth_load_limit = 0; \ + head->hth_prime_idx = -1; \ + } \ + /* Helper: returns a pointer to the right location in the table \ + * 'head' to find or insert the element 'elm'. */ \ + static inline struct type ** \ + name##_HT_FIND_P_(struct name *head, struct type *elm) \ + { \ + struct type **p; \ + if (!head->hth_table) \ + return NULL; \ + p = &HT_BUCKET_(head, field, elm, hashfn); \ + while (*p) { \ + if (eqfn(*p, elm)) \ + return p; \ + p = &(*p)->field.hte_next; \ + } \ + return p; \ + } \ + /* Return a pointer to the element in the table 'head' matching 'elm', \ + * or NULL if no such element exists */ \ + static inline struct type * \ + name##_HT_FIND(const struct name *head, struct type *elm) \ + { \ + struct type **p; \ + struct name *h = (struct name *) head; \ + HT_SET_HASH_(elm, field, hashfn); \ + p = name##_HT_FIND_P_(h, elm); \ + return p ? *p : NULL; \ + } \ + /* Insert the element 'elm' into the table 'head'. Do not call this \ + * function if the table might already contain a matching element. */ \ + static inline void \ + name##_HT_INSERT(struct name *head, struct type *elm) \ + { \ + struct type **p; \ + if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \ + name##_HT_GROW(head, head->hth_n_entries+1); \ + ++head->hth_n_entries; \ + HT_SET_HASH_(elm, field, hashfn); \ + p = &HT_BUCKET_(head, field, elm, hashfn); \ + elm->field.hte_next = *p; \ + *p = elm; \ + } \ + /* Insert the element 'elm' into the table 'head'. If there already \ + * a matching element in the table, replace that element and return \ + * it. */ \ + static inline struct type * \ + name##_HT_REPLACE(struct name *head, struct type *elm) \ + { \ + struct type **p, *r; \ + if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \ + name##_HT_GROW(head, head->hth_n_entries+1); \ + HT_SET_HASH_(elm, field, hashfn); \ + p = name##_HT_FIND_P_(head, elm); \ + r = *p; \ + *p = elm; \ + if (r && (r!=elm)) { \ + elm->field.hte_next = r->field.hte_next; \ + r->field.hte_next = NULL; \ + return r; \ + } else { \ + ++head->hth_n_entries; \ + return NULL; \ + } \ + } \ + /* Remove any element matching 'elm' from the table 'head'. If such \ + * an element is found, return it; otherwise return NULL. */ \ + static inline struct type * \ + name##_HT_REMOVE(struct name *head, struct type *elm) \ + { \ + struct type **p, *r; \ + HT_SET_HASH_(elm, field, hashfn); \ + p = name##_HT_FIND_P_(head,elm); \ + if (!p || !*p) \ + return NULL; \ + r = *p; \ + *p = r->field.hte_next; \ + r->field.hte_next = NULL; \ + --head->hth_n_entries; \ + return r; \ + } \ + /* Invoke the function 'fn' on every element of the table 'head', \ + * using 'data' as its second argument. If the function returns \ + * nonzero, remove the most recently examined element before invoking \ + * the function again. */ \ + static inline void \ + name##_HT_FOREACH_FN(struct name *head, \ + int (*fn)(struct type *, void *), \ + void *data) \ + { \ + unsigned idx; \ + struct type **p, **nextp, *next; \ + if (!head->hth_table) \ + return; \ + for (idx=0; idx < head->hth_table_length; ++idx) { \ + p = &head->hth_table[idx]; \ + while (*p) { \ + nextp = &(*p)->field.hte_next; \ + next = *nextp; \ + if (fn(*p, data)) { \ + --head->hth_n_entries; \ + *p = next; \ + } else { \ + p = nextp; \ + } \ + } \ + } \ + } \ + /* Return a pointer to the first element in the table 'head', under \ + * an arbitrary order. This order is stable under remove operations, \ + * but not under others. If the table is empty, return NULL. */ \ + static inline struct type ** \ + name##_HT_START(struct name *head) \ + { \ + unsigned b = 0; \ + while (b < head->hth_table_length) { \ + if (head->hth_table[b]) \ + return &head->hth_table[b]; \ + ++b; \ + } \ + return NULL; \ + } \ + /* Return the next element in 'head' after 'elm', under the arbitrary \ + * order used by HT_START. If there are no more elements, return \ + * NULL. If 'elm' is to be removed from the table, you must call \ + * this function for the next value before you remove it. \ + */ \ + static inline struct type ** \ + name##_HT_NEXT(struct name *head, struct type **elm) \ + { \ + if ((*elm)->field.hte_next) { \ + return &(*elm)->field.hte_next; \ + } else { \ + unsigned b = (HT_ELT_HASH_(*elm, field, hashfn) % head->hth_table_length)+1; \ + while (b < head->hth_table_length) { \ + if (head->hth_table[b]) \ + return &head->hth_table[b]; \ + ++b; \ + } \ + return NULL; \ + } \ + } \ + static inline struct type ** \ + name##_HT_NEXT_RMV(struct name *head, struct type **elm) \ + { \ + unsigned h = HT_ELT_HASH_(*elm, field, hashfn); \ + *elm = (*elm)->field.hte_next; \ + --head->hth_n_entries; \ + if (*elm) { \ + return elm; \ + } else { \ + unsigned b = (h % head->hth_table_length)+1; \ + while (b < head->hth_table_length) { \ + if (head->hth_table[b]) \ + return &head->hth_table[b]; \ + ++b; \ + } \ + return NULL; \ + } \ + } + +#define HT_GENERATE(name, type, field, hashfn, eqfn, load, mallocfn, \ + reallocfn, freefn) \ + static unsigned name##_PRIMES[] = { \ + 53, 97, 193, 389, \ + 769, 1543, 3079, 6151, \ + 12289, 24593, 49157, 98317, \ + 196613, 393241, 786433, 1572869, \ + 3145739, 6291469, 12582917, 25165843, \ + 50331653, 100663319, 201326611, 402653189, \ + 805306457, 1610612741 \ + }; \ + static unsigned name##_N_PRIMES = \ + (unsigned)(sizeof(name##_PRIMES)/sizeof(name##_PRIMES[0])); \ + /* Expand the internal table of 'head' until it is large enough to \ + * hold 'size' elements. Return 0 on success, -1 on allocation \ + * failure. */ \ + int \ + name##_HT_GROW(struct name *head, unsigned size) \ + { \ + unsigned new_len, new_load_limit; \ + int prime_idx; \ + struct type **new_table; \ + if (head->hth_prime_idx == (int)name##_N_PRIMES - 1) \ + return 0; \ + if (head->hth_load_limit > size) \ + return 0; \ + prime_idx = head->hth_prime_idx; \ + do { \ + new_len = name##_PRIMES[++prime_idx]; \ + new_load_limit = (unsigned)(load*new_len); \ + } while (new_load_limit <= size && \ + prime_idx < (int)name##_N_PRIMES); \ + if ((new_table = mallocfn(new_len*sizeof(struct type*)))) { \ + unsigned b; \ + memset(new_table, 0, new_len*sizeof(struct type*)); \ + for (b = 0; b < head->hth_table_length; ++b) { \ + struct type *elm, *next; \ + unsigned b2; \ + elm = head->hth_table[b]; \ + while (elm) { \ + next = elm->field.hte_next; \ + b2 = HT_ELT_HASH_(elm, field, hashfn) % new_len; \ + elm->field.hte_next = new_table[b2]; \ + new_table[b2] = elm; \ + elm = next; \ + } \ + } \ + if (head->hth_table) \ + freefn(head->hth_table); \ + head->hth_table = new_table; \ + } else { \ + unsigned b, b2; \ + new_table = reallocfn(head->hth_table, new_len*sizeof(struct type*)); \ + if (!new_table) return -1; \ + memset(new_table + head->hth_table_length, 0, \ + (new_len - head->hth_table_length)*sizeof(struct type*)); \ + for (b=0; b < head->hth_table_length; ++b) { \ + struct type *e, **pE; \ + for (pE = &new_table[b], e = *pE; e != NULL; e = *pE) { \ + b2 = HT_ELT_HASH_(e, field, hashfn) % new_len; \ + if (b2 == b) { \ + pE = &e->field.hte_next; \ + } else { \ + *pE = e->field.hte_next; \ + e->field.hte_next = new_table[b2]; \ + new_table[b2] = e; \ + } \ + } \ + } \ + head->hth_table = new_table; \ + } \ + head->hth_table_length = new_len; \ + head->hth_prime_idx = prime_idx; \ + head->hth_load_limit = new_load_limit; \ + return 0; \ + } \ + /* Free all storage held by 'head'. Does not free 'head' itself, or \ + * individual elements. */ \ + void \ + name##_HT_CLEAR(struct name *head) \ + { \ + if (head->hth_table) \ + freefn(head->hth_table); \ + name##_HT_INIT(head); \ + } \ + /* Debugging helper: return false iff the representation of 'head' is \ + * internally consistent. */ \ + int \ + name##_HT_REP_IS_BAD_(const struct name *head) \ + { \ + unsigned n, i; \ + struct type *elm; \ + if (!head->hth_table_length) { \ + if (!head->hth_table && !head->hth_n_entries && \ + !head->hth_load_limit && head->hth_prime_idx == -1) \ + return 0; \ + else \ + return 1; \ + } \ + if (!head->hth_table || head->hth_prime_idx < 0 || \ + !head->hth_load_limit) \ + return 2; \ + if (head->hth_n_entries > head->hth_load_limit) \ + return 3; \ + if (head->hth_table_length != name##_PRIMES[head->hth_prime_idx]) \ + return 4; \ + if (head->hth_load_limit != (unsigned)(load*head->hth_table_length)) \ + return 5; \ + for (n = i = 0; i < head->hth_table_length; ++i) { \ + for (elm = head->hth_table[i]; elm; elm = elm->field.hte_next) { \ + if (HT_ELT_HASH_(elm, field, hashfn) != hashfn(elm)) \ + return 1000 + i; \ + if ((HT_ELT_HASH_(elm, field, hashfn) % head->hth_table_length) != i) \ + return 10000 + i; \ + ++n; \ + } \ + } \ + if (n != head->hth_n_entries) \ + return 6; \ + return 0; \ + } + +/** Implements an over-optimized "find and insert if absent" block; + * not meant for direct usage by typical code, or usage outside the critical + * path.*/ +#define HT_FIND_OR_INSERT_(name, field, hashfn, head, eltype, elm, var, y, n) \ + { \ + struct name *var##_head_ = head; \ + struct eltype **var; \ + if (!var##_head_->hth_table || \ + var##_head_->hth_n_entries >= var##_head_->hth_load_limit) \ + name##_HT_GROW(var##_head_, var##_head_->hth_n_entries+1); \ + HT_SET_HASH_((elm), field, hashfn); \ + var = name##_HT_FIND_P_(var##_head_, (elm)); \ + if (*var) { \ + y; \ + } else { \ + n; \ + } \ + } +#define HT_FOI_INSERT_(field, head, elm, newent, var) \ + { \ + HT_SET_HASHVAL_(newent, field, (elm)->field.hte_hash); \ + newent->field.hte_next = NULL; \ + *var = newent; \ + ++((head)->hth_n_entries); \ + } + +/* + * Copyright 2005, Nick Mathewson. Implementation logic is adapted from code + * by Christopher Clark, retrofit to allow drop-in memory management, and to + * use the same interface as Niels Provos's tree.h. This is probably still + * a derived work, so the original license below still applies. + * + * Copyright (c) 2002, Christopher Clark + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#endif + diff --git a/probe-busybox/libevent-2.1.11-stable/http-internal.h b/probe-busybox/libevent-2.1.11-stable/http-internal.h new file mode 100644 index 00000000..feaf436d --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/http-internal.h @@ -0,0 +1,206 @@ +/* + * Copyright 2001-2007 Niels Provos + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * This header file contains definitions for dealing with HTTP requests + * that are internal to libevent. As user of the library, you should not + * need to know about these. + */ + +#ifndef HTTP_INTERNAL_H_INCLUDED_ +#define HTTP_INTERNAL_H_INCLUDED_ + +#include "event2/event_struct.h" +#include "util-internal.h" +#include "defer-internal.h" + +#define HTTP_CONNECT_TIMEOUT 45 +#define HTTP_WRITE_TIMEOUT 50 +#define HTTP_READ_TIMEOUT 50 + +enum message_read_status { + ALL_DATA_READ = 1, + MORE_DATA_EXPECTED = 0, + DATA_CORRUPTED = -1, + REQUEST_CANCELED = -2, + DATA_TOO_LONG = -3 +}; + +struct evbuffer; +struct addrinfo; +struct evhttp_request; + +/* Indicates an unknown request method. */ +#define EVHTTP_REQ_UNKNOWN_ (1<<15) + +enum evhttp_connection_state { + EVCON_DISCONNECTED, /**< not currently connected not trying either*/ + EVCON_CONNECTING, /**< tries to currently connect */ + EVCON_IDLE, /**< connection is established */ + EVCON_READING_FIRSTLINE,/**< reading Request-Line (incoming conn) or + **< Status-Line (outgoing conn) */ + EVCON_READING_HEADERS, /**< reading request/response headers */ + EVCON_READING_BODY, /**< reading request/response body */ + EVCON_READING_TRAILER, /**< reading request/response chunked trailer */ + EVCON_WRITING /**< writing request/response headers/body */ +}; + +struct event_base; + +/* A client or server connection. */ +struct evhttp_connection { + /* we use this tailq only if this connection was created for an http + * server */ + TAILQ_ENTRY(evhttp_connection) next; + + evutil_socket_t fd; + struct bufferevent *bufev; + + struct event retry_ev; /* for retrying connects */ + + char *bind_address; /* address to use for binding the src */ + ev_uint16_t bind_port; /* local port for binding the src */ + + char *address; /* address to connect to */ + ev_uint16_t port; + + size_t max_headers_size; + ev_uint64_t max_body_size; + + int flags; +#define EVHTTP_CON_INCOMING 0x0001 /* only one request on it ever */ +#define EVHTTP_CON_OUTGOING 0x0002 /* multiple requests possible */ +#define EVHTTP_CON_CLOSEDETECT 0x0004 /* detecting if persistent close */ +/* set when we want to auto free the connection */ +#define EVHTTP_CON_AUTOFREE EVHTTP_CON_PUBLIC_FLAGS_END +/* Installed when attempt to read HTTP error after write failed, see + * EVHTTP_CON_READ_ON_WRITE_ERROR */ +#define EVHTTP_CON_READING_ERROR (EVHTTP_CON_AUTOFREE << 1) + + struct timeval timeout; /* timeout for events */ + int retry_cnt; /* retry count */ + int retry_max; /* maximum number of retries */ + struct timeval initial_retry_timeout; /* Timeout for low long to wait + * after first failing attempt + * before retry */ + + enum evhttp_connection_state state; + + /* for server connections, the http server they are connected with */ + struct evhttp *http_server; + + TAILQ_HEAD(evcon_requestq, evhttp_request) requests; + + void (*cb)(struct evhttp_connection *, void *); + void *cb_arg; + + void (*closecb)(struct evhttp_connection *, void *); + void *closecb_arg; + + struct event_callback read_more_deferred_cb; + + struct event_base *base; + struct evdns_base *dns_base; + int ai_family; +}; + +/* A callback for an http server */ +struct evhttp_cb { + TAILQ_ENTRY(evhttp_cb) next; + + char *what; + + void (*cb)(struct evhttp_request *req, void *); + void *cbarg; +}; + +/* both the http server as well as the rpc system need to queue connections */ +TAILQ_HEAD(evconq, evhttp_connection); + +/* each bound socket is stored in one of these */ +struct evhttp_bound_socket { + TAILQ_ENTRY(evhttp_bound_socket) next; + + struct evconnlistener *listener; +}; + +/* server alias list item. */ +struct evhttp_server_alias { + TAILQ_ENTRY(evhttp_server_alias) next; + + char *alias; /* the server alias. */ +}; + +struct evhttp { + /* Next vhost, if this is a vhost. */ + TAILQ_ENTRY(evhttp) next_vhost; + + /* All listeners for this host */ + TAILQ_HEAD(boundq, evhttp_bound_socket) sockets; + + TAILQ_HEAD(httpcbq, evhttp_cb) callbacks; + + /* All live connections on this host. */ + struct evconq connections; + + TAILQ_HEAD(vhostsq, evhttp) virtualhosts; + + TAILQ_HEAD(aliasq, evhttp_server_alias) aliases; + + /* NULL if this server is not a vhost */ + char *vhost_pattern; + + struct timeval timeout; + + size_t default_max_headers_size; + ev_uint64_t default_max_body_size; + int flags; + const char *default_content_type; + + /* Bitmask of all HTTP methods that we accept and pass to user + * callbacks. */ + ev_uint16_t allowed_methods; + + /* Fallback callback if all the other callbacks for this connection + don't match. */ + void (*gencb)(struct evhttp_request *req, void *); + void *gencbarg; + struct bufferevent* (*bevcb)(struct event_base *, void *); + void *bevcbarg; + + struct event_base *base; +}; + +/* XXX most of these functions could be static. */ + +/* resets the connection; can be reused for more requests */ +void evhttp_connection_reset_(struct evhttp_connection *); + +/* connects if necessary */ +int evhttp_connection_connect_(struct evhttp_connection *); + +enum evhttp_request_error; +/* notifies the current request that it failed; resets connection */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_fail_(struct evhttp_connection *, + enum evhttp_request_error error); + +enum message_read_status; + +EVENT2_EXPORT_SYMBOL +enum message_read_status evhttp_parse_firstline_(struct evhttp_request *, struct evbuffer*); +EVENT2_EXPORT_SYMBOL +enum message_read_status evhttp_parse_headers_(struct evhttp_request *, struct evbuffer*); + +void evhttp_start_read_(struct evhttp_connection *); +void evhttp_start_write_(struct evhttp_connection *); + +/* response sending HTML the data in the buffer */ +void evhttp_response_code_(struct evhttp_request *, int, const char *); +void evhttp_send_page_(struct evhttp_request *, struct evbuffer *); + +EVENT2_EXPORT_SYMBOL +int evhttp_decode_uri_internal(const char *uri, size_t length, + char *ret, int decode_plus); + +#endif /* HTTP_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/http.c b/probe-busybox/libevent-2.1.11-stable/http.c new file mode 100644 index 00000000..5331602a --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/http.c @@ -0,0 +1,5117 @@ +/* + * Copyright (c) 2002-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef EVENT__HAVE_SYS_PARAM_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif + +#ifdef HAVE_SYS_IOCCOM_H +#include +#endif +#ifdef EVENT__HAVE_SYS_RESOURCE_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#ifdef EVENT__HAVE_SYS_WAIT_H +#include +#endif + +#ifndef _WIN32 +#include +#include +#else /* _WIN32 */ +#include +#include +#endif /* _WIN32 */ + +#ifdef EVENT__HAVE_SYS_UN_H +#include +#endif +#ifdef EVENT__HAVE_AFUNIX_H +#include +#endif + +#include + +#ifdef EVENT__HAVE_NETINET_IN_H +#include +#endif +#ifdef EVENT__HAVE_ARPA_INET_H +#include +#endif +#ifdef EVENT__HAVE_NETDB_H +#include +#endif + +#ifdef _WIN32 +#include +#endif + +#include +#include +#include +#include +#ifndef _WIN32 +#include +#endif /* !_WIN32 */ +#include +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif +#ifdef EVENT__HAVE_FCNTL_H +#include +#endif + +#undef timeout_pending +#undef timeout_initialized + +#include "strlcpy-internal.h" +#include "event2/http.h" +#include "event2/event.h" +#include "event2/buffer.h" +#include "event2/bufferevent.h" +#include "event2/http_struct.h" +#include "event2/http_compat.h" +#include "event2/util.h" +#include "event2/listener.h" +#include "log-internal.h" +#include "util-internal.h" +#include "http-internal.h" +#include "mm-internal.h" +#include "bufferevent-internal.h" + +#ifndef EVENT__HAVE_GETNAMEINFO +#define NI_MAXSERV 32 +#define NI_MAXHOST 1025 + +#ifndef NI_NUMERICHOST +#define NI_NUMERICHOST 1 +#endif + +#ifndef NI_NUMERICSERV +#define NI_NUMERICSERV 2 +#endif + +static int +fake_getnameinfo(const struct sockaddr *sa, size_t salen, char *host, + size_t hostlen, char *serv, size_t servlen, int flags) +{ + struct sockaddr_in *sin = (struct sockaddr_in *)sa; + + if (serv != NULL) { + char tmpserv[16]; + evutil_snprintf(tmpserv, sizeof(tmpserv), + "%d", ntohs(sin->sin_port)); + if (strlcpy(serv, tmpserv, servlen) >= servlen) + return (-1); + } + + if (host != NULL) { + if (flags & NI_NUMERICHOST) { + if (strlcpy(host, inet_ntoa(sin->sin_addr), + hostlen) >= hostlen) + return (-1); + else + return (0); + } else { + struct hostent *hp; + hp = gethostbyaddr((char *)&sin->sin_addr, + sizeof(struct in_addr), AF_INET); + if (hp == NULL) + return (-2); + + if (strlcpy(host, hp->h_name, hostlen) >= hostlen) + return (-1); + else + return (0); + } + } + return (0); +} + +#endif + +#define REQ_VERSION_BEFORE(req, major_v, minor_v) \ + ((req)->major < (major_v) || \ + ((req)->major == (major_v) && (req)->minor < (minor_v))) + +#define REQ_VERSION_ATLEAST(req, major_v, minor_v) \ + ((req)->major > (major_v) || \ + ((req)->major == (major_v) && (req)->minor >= (minor_v))) + +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +extern int debug; + +static evutil_socket_t bind_socket_ai(struct evutil_addrinfo *, int reuse); +static evutil_socket_t bind_socket(const char *, ev_uint16_t, int reuse); +static void name_from_addr(struct sockaddr *, ev_socklen_t, char **, char **); +static struct evhttp_uri *evhttp_uri_parse_authority(char *source_uri); +static int evhttp_associate_new_request_with_connection( + struct evhttp_connection *evcon); +static void evhttp_connection_start_detectclose( + struct evhttp_connection *evcon); +static void evhttp_connection_stop_detectclose( + struct evhttp_connection *evcon); +static void evhttp_request_dispatch(struct evhttp_connection* evcon); +static void evhttp_read_firstline(struct evhttp_connection *evcon, + struct evhttp_request *req); +static void evhttp_read_header(struct evhttp_connection *evcon, + struct evhttp_request *req); +static int evhttp_add_header_internal(struct evkeyvalq *headers, + const char *key, const char *value); +static const char *evhttp_response_phrase_internal(int code); +static void evhttp_get_request(struct evhttp *, evutil_socket_t, struct sockaddr *, ev_socklen_t); +static void evhttp_write_buffer(struct evhttp_connection *, + void (*)(struct evhttp_connection *, void *), void *); +static void evhttp_make_header(struct evhttp_connection *, struct evhttp_request *); + +/* callbacks for bufferevent */ +static void evhttp_read_cb(struct bufferevent *, void *); +static void evhttp_write_cb(struct bufferevent *, void *); +static void evhttp_error_cb(struct bufferevent *bufev, short what, void *arg); +static int evhttp_find_vhost(struct evhttp *http, struct evhttp **outhttp, + const char *hostname); + +#ifndef EVENT__HAVE_STRSEP +/* strsep replacement for platforms that lack it. Only works if + * del is one character long. */ +static char * +strsep(char **s, const char *del) +{ + char *d, *tok; + EVUTIL_ASSERT(strlen(del) == 1); + if (!s || !*s) + return NULL; + tok = *s; + d = strstr(tok, del); + if (d) { + *d = '\0'; + *s = d + 1; + } else + *s = NULL; + return tok; +} +#endif + +static size_t +html_replace(const char ch, const char **escaped) +{ + switch (ch) { + case '<': + *escaped = "<"; + return 4; + case '>': + *escaped = ">"; + return 4; + case '"': + *escaped = """; + return 6; + case '\'': + *escaped = "'"; + return 6; + case '&': + *escaped = "&"; + return 5; + default: + break; + } + + return 1; +} + +/* + * Replaces <, >, ", ' and & with <, >, ", + * ' and & correspondingly. + * + * The returned string needs to be freed by the caller. + */ + +char * +evhttp_htmlescape(const char *html) +{ + size_t i; + size_t new_size = 0, old_size = 0; + char *escaped_html, *p; + + if (html == NULL) + return (NULL); + + old_size = strlen(html); + for (i = 0; i < old_size; ++i) { + const char *replaced = NULL; + const size_t replace_size = html_replace(html[i], &replaced); + if (replace_size > EV_SIZE_MAX - new_size) { + event_warn("%s: html_replace overflow", __func__); + return (NULL); + } + new_size += replace_size; + } + + if (new_size == EV_SIZE_MAX) + return (NULL); + p = escaped_html = mm_malloc(new_size + 1); + if (escaped_html == NULL) { + event_warn("%s: malloc(%lu)", __func__, + (unsigned long)(new_size + 1)); + return (NULL); + } + for (i = 0; i < old_size; ++i) { + const char *replaced = &html[i]; + const size_t len = html_replace(html[i], &replaced); + memcpy(p, replaced, len); + p += len; + } + + *p = '\0'; + + return (escaped_html); +} + +/** Given an evhttp_cmd_type, returns a constant string containing the + * equivalent HTTP command, or NULL if the evhttp_command_type is + * unrecognized. */ +static const char * +evhttp_method(enum evhttp_cmd_type type) +{ + const char *method; + + switch (type) { + case EVHTTP_REQ_GET: + method = "GET"; + break; + case EVHTTP_REQ_POST: + method = "POST"; + break; + case EVHTTP_REQ_HEAD: + method = "HEAD"; + break; + case EVHTTP_REQ_PUT: + method = "PUT"; + break; + case EVHTTP_REQ_DELETE: + method = "DELETE"; + break; + case EVHTTP_REQ_OPTIONS: + method = "OPTIONS"; + break; + case EVHTTP_REQ_TRACE: + method = "TRACE"; + break; + case EVHTTP_REQ_CONNECT: + method = "CONNECT"; + break; + case EVHTTP_REQ_PATCH: + method = "PATCH"; + break; + default: + method = NULL; + break; + } + + return (method); +} + +/** + * Determines if a response should have a body. + * Follows the rules in RFC 2616 section 4.3. + * @return 1 if the response MUST have a body; 0 if the response MUST NOT have + * a body. + */ +static int +evhttp_response_needs_body(struct evhttp_request *req) +{ + return (req->response_code != HTTP_NOCONTENT && + req->response_code != HTTP_NOTMODIFIED && + (req->response_code < 100 || req->response_code >= 200) && + req->type != EVHTTP_REQ_HEAD); +} + +/** Helper: called after we've added some data to an evcon's bufferevent's + * output buffer. Sets the evconn's writing-is-done callback, and puts + * the bufferevent into writing mode. + */ +static void +evhttp_write_buffer(struct evhttp_connection *evcon, + void (*cb)(struct evhttp_connection *, void *), void *arg) +{ + event_debug(("%s: preparing to write buffer\n", __func__)); + + /* Set call back */ + evcon->cb = cb; + evcon->cb_arg = arg; + + /* Disable the read callback: we don't actually care about data; + * we only care about close detection. (We don't disable reading -- + * EV_READ, since we *do* want to learn about any close events.) */ + bufferevent_setcb(evcon->bufev, + NULL, /*read*/ + evhttp_write_cb, + evhttp_error_cb, + evcon); + + bufferevent_enable(evcon->bufev, EV_READ|EV_WRITE); +} + +static void +evhttp_send_continue_done(struct evhttp_connection *evcon, void *arg) +{ + bufferevent_disable(evcon->bufev, EV_WRITE); +} + +static void +evhttp_send_continue(struct evhttp_connection *evcon, + struct evhttp_request *req) +{ + bufferevent_enable(evcon->bufev, EV_WRITE); + evbuffer_add_printf(bufferevent_get_output(evcon->bufev), + "HTTP/%d.%d 100 Continue\r\n\r\n", + req->major, req->minor); + evcon->cb = evhttp_send_continue_done; + evcon->cb_arg = NULL; + bufferevent_setcb(evcon->bufev, + evhttp_read_cb, + evhttp_write_cb, + evhttp_error_cb, + evcon); +} + +/** Helper: returns true iff evconn is in any connected state. */ +static int +evhttp_connected(struct evhttp_connection *evcon) +{ + switch (evcon->state) { + case EVCON_DISCONNECTED: + case EVCON_CONNECTING: + return (0); + case EVCON_IDLE: + case EVCON_READING_FIRSTLINE: + case EVCON_READING_HEADERS: + case EVCON_READING_BODY: + case EVCON_READING_TRAILER: + case EVCON_WRITING: + default: + return (1); + } +} + +/* Create the headers needed for an outgoing HTTP request, adds them to + * the request's header list, and writes the request line to the + * connection's output buffer. + */ +static void +evhttp_make_header_request(struct evhttp_connection *evcon, + struct evhttp_request *req) +{ + const char *method; + + evhttp_remove_header(req->output_headers, "Proxy-Connection"); + + /* Generate request line */ + if (!(method = evhttp_method(req->type))) { + method = "NULL"; + } + + evbuffer_add_printf(bufferevent_get_output(evcon->bufev), + "%s %s HTTP/%d.%d\r\n", + method, req->uri, req->major, req->minor); + + /* Add the content length on a post or put request if missing */ + if ((req->type == EVHTTP_REQ_POST || req->type == EVHTTP_REQ_PUT) && + evhttp_find_header(req->output_headers, "Content-Length") == NULL){ + char size[22]; + evutil_snprintf(size, sizeof(size), EV_SIZE_FMT, + EV_SIZE_ARG(evbuffer_get_length(req->output_buffer))); + evhttp_add_header(req->output_headers, "Content-Length", size); + } +} + +/** Return true if the list of headers in 'headers', intepreted with respect + * to flags, means that we should send a "connection: close" when the request + * is done. */ +static int +evhttp_is_connection_close(int flags, struct evkeyvalq* headers) +{ + if (flags & EVHTTP_PROXY_REQUEST) { + /* proxy connection */ + const char *connection = evhttp_find_header(headers, "Proxy-Connection"); + return (connection == NULL || evutil_ascii_strcasecmp(connection, "keep-alive") != 0); + } else { + const char *connection = evhttp_find_header(headers, "Connection"); + return (connection != NULL && evutil_ascii_strcasecmp(connection, "close") == 0); + } +} +static int +evhttp_is_request_connection_close(struct evhttp_request *req) +{ + return + evhttp_is_connection_close(req->flags, req->input_headers) || + evhttp_is_connection_close(req->flags, req->output_headers); +} + +/* Return true iff 'headers' contains 'Connection: keep-alive' */ +static int +evhttp_is_connection_keepalive(struct evkeyvalq* headers) +{ + const char *connection = evhttp_find_header(headers, "Connection"); + return (connection != NULL + && evutil_ascii_strncasecmp(connection, "keep-alive", 10) == 0); +} + +/* Add a correct "Date" header to headers, unless it already has one. */ +static void +evhttp_maybe_add_date_header(struct evkeyvalq *headers) +{ + if (evhttp_find_header(headers, "Date") == NULL) { + char date[50]; + if (sizeof(date) - evutil_date_rfc1123(date, sizeof(date), NULL) > 0) { + evhttp_add_header(headers, "Date", date); + } + } +} + +/* Add a "Content-Length" header with value 'content_length' to headers, + * unless it already has a content-length or transfer-encoding header. */ +static void +evhttp_maybe_add_content_length_header(struct evkeyvalq *headers, + size_t content_length) +{ + if (evhttp_find_header(headers, "Transfer-Encoding") == NULL && + evhttp_find_header(headers, "Content-Length") == NULL) { + char len[22]; + evutil_snprintf(len, sizeof(len), EV_SIZE_FMT, + EV_SIZE_ARG(content_length)); + evhttp_add_header(headers, "Content-Length", len); + } +} + +/* + * Create the headers needed for an HTTP reply in req->output_headers, + * and write the first HTTP response for req line to evcon. + */ +static void +evhttp_make_header_response(struct evhttp_connection *evcon, + struct evhttp_request *req) +{ + int is_keepalive = evhttp_is_connection_keepalive(req->input_headers); + evbuffer_add_printf(bufferevent_get_output(evcon->bufev), + "HTTP/%d.%d %d %s\r\n", + req->major, req->minor, req->response_code, + req->response_code_line); + + if (req->major == 1) { + if (req->minor >= 1) + evhttp_maybe_add_date_header(req->output_headers); + + /* + * if the protocol is 1.0; and the connection was keep-alive + * we need to add a keep-alive header, too. + */ + if (req->minor == 0 && is_keepalive) + evhttp_add_header(req->output_headers, + "Connection", "keep-alive"); + + if ((req->minor >= 1 || is_keepalive) && + evhttp_response_needs_body(req)) { + /* + * we need to add the content length if the + * user did not give it, this is required for + * persistent connections to work. + */ + evhttp_maybe_add_content_length_header( + req->output_headers, + evbuffer_get_length(req->output_buffer)); + } + } + + /* Potentially add headers for unidentified content. */ + if (evhttp_response_needs_body(req)) { + if (evhttp_find_header(req->output_headers, + "Content-Type") == NULL + && evcon->http_server->default_content_type) { + evhttp_add_header(req->output_headers, + "Content-Type", + evcon->http_server->default_content_type); + } + } + + /* if the request asked for a close, we send a close, too */ + if (evhttp_is_connection_close(req->flags, req->input_headers)) { + evhttp_remove_header(req->output_headers, "Connection"); + if (!(req->flags & EVHTTP_PROXY_REQUEST)) + evhttp_add_header(req->output_headers, "Connection", "close"); + evhttp_remove_header(req->output_headers, "Proxy-Connection"); + } +} + +enum expect { NO, CONTINUE, OTHER }; +static enum expect evhttp_have_expect(struct evhttp_request *req, int input) +{ + const char *expect; + struct evkeyvalq *h = input ? req->input_headers : req->output_headers; + + if (!(req->kind == EVHTTP_REQUEST) || !REQ_VERSION_ATLEAST(req, 1, 1)) + return NO; + + expect = evhttp_find_header(h, "Expect"); + if (!expect) + return NO; + + return !evutil_ascii_strcasecmp(expect, "100-continue") ? CONTINUE : OTHER; +} + + +/** Generate all headers appropriate for sending the http request in req (or + * the response, if we're sending a response), and write them to evcon's + * bufferevent. Also writes all data from req->output_buffer */ +static void +evhttp_make_header(struct evhttp_connection *evcon, struct evhttp_request *req) +{ + struct evkeyval *header; + struct evbuffer *output = bufferevent_get_output(evcon->bufev); + + /* + * Depending if this is a HTTP request or response, we might need to + * add some new headers or remove existing headers. + */ + if (req->kind == EVHTTP_REQUEST) { + evhttp_make_header_request(evcon, req); + } else { + evhttp_make_header_response(evcon, req); + } + + TAILQ_FOREACH(header, req->output_headers, next) { + evbuffer_add_printf(output, "%s: %s\r\n", + header->key, header->value); + } + evbuffer_add(output, "\r\n", 2); + + if (evhttp_have_expect(req, 0) != CONTINUE && + evbuffer_get_length(req->output_buffer)) { + /* + * For a request, we add the POST data, for a reply, this + * is the regular data. + */ + evbuffer_add_buffer(output, req->output_buffer); + } +} + +void +evhttp_connection_set_max_headers_size(struct evhttp_connection *evcon, + ev_ssize_t new_max_headers_size) +{ + if (new_max_headers_size<0) + evcon->max_headers_size = EV_SIZE_MAX; + else + evcon->max_headers_size = new_max_headers_size; +} +void +evhttp_connection_set_max_body_size(struct evhttp_connection* evcon, + ev_ssize_t new_max_body_size) +{ + if (new_max_body_size<0) + evcon->max_body_size = EV_UINT64_MAX; + else + evcon->max_body_size = new_max_body_size; +} + +static int +evhttp_connection_incoming_fail(struct evhttp_request *req, + enum evhttp_request_error error) +{ + switch (error) { + case EVREQ_HTTP_DATA_TOO_LONG: + req->response_code = HTTP_ENTITYTOOLARGE; + break; + default: + req->response_code = HTTP_BADREQUEST; + } + + switch (error) { + case EVREQ_HTTP_TIMEOUT: + case EVREQ_HTTP_EOF: + /* + * these are cases in which we probably should just + * close the connection and not send a reply. this + * case may happen when a browser keeps a persistent + * connection open and we timeout on the read. when + * the request is still being used for sending, we + * need to disassociated it from the connection here. + */ + if (!req->userdone) { + /* remove it so that it will not be freed */ + TAILQ_REMOVE(&req->evcon->requests, req, next); + /* indicate that this request no longer has a + * connection object + */ + req->evcon = NULL; + } + return (-1); + case EVREQ_HTTP_INVALID_HEADER: + case EVREQ_HTTP_BUFFER_ERROR: + case EVREQ_HTTP_REQUEST_CANCEL: + case EVREQ_HTTP_DATA_TOO_LONG: + default: /* xxx: probably should just error on default */ + /* the callback looks at the uri to determine errors */ + if (req->uri) { + mm_free(req->uri); + req->uri = NULL; + } + if (req->uri_elems) { + evhttp_uri_free(req->uri_elems); + req->uri_elems = NULL; + } + + /* + * the callback needs to send a reply, once the reply has + * been send, the connection should get freed. + */ + (*req->cb)(req, req->cb_arg); + } + + return (0); +} + +/* Free connection ownership of which can be acquired by user using + * evhttp_request_own(). */ +static inline void +evhttp_request_free_auto(struct evhttp_request *req) +{ + if (!(req->flags & EVHTTP_USER_OWNED)) + evhttp_request_free(req); +} + +static void +evhttp_request_free_(struct evhttp_connection *evcon, struct evhttp_request *req) +{ + TAILQ_REMOVE(&evcon->requests, req, next); + evhttp_request_free_auto(req); +} + +/* Called when evcon has experienced a (non-recoverable? -NM) error, as + * given in error. If it's an outgoing connection, reset the connection, + * retry any pending requests, and inform the user. If it's incoming, + * delegates to evhttp_connection_incoming_fail(). */ +void +evhttp_connection_fail_(struct evhttp_connection *evcon, + enum evhttp_request_error error) +{ + const int errsave = EVUTIL_SOCKET_ERROR(); + struct evhttp_request* req = TAILQ_FIRST(&evcon->requests); + void (*cb)(struct evhttp_request *, void *); + void *cb_arg; + void (*error_cb)(enum evhttp_request_error, void *); + void *error_cb_arg; + EVUTIL_ASSERT(req != NULL); + + bufferevent_disable(evcon->bufev, EV_READ|EV_WRITE); + + if (evcon->flags & EVHTTP_CON_INCOMING) { + /* + * for incoming requests, there are two different + * failure cases. it's either a network level error + * or an http layer error. for problems on the network + * layer like timeouts we just drop the connections. + * For HTTP problems, we might have to send back a + * reply before the connection can be freed. + */ + if (evhttp_connection_incoming_fail(req, error) == -1) + evhttp_connection_free(evcon); + return; + } + + error_cb = req->error_cb; + error_cb_arg = req->cb_arg; + /* when the request was canceled, the callback is not executed */ + if (error != EVREQ_HTTP_REQUEST_CANCEL) { + /* save the callback for later; the cb might free our object */ + cb = req->cb; + cb_arg = req->cb_arg; + } else { + cb = NULL; + cb_arg = NULL; + } + + /* do not fail all requests; the next request is going to get + * send over a new connection. when a user cancels a request, + * all other pending requests should be processed as normal + */ + evhttp_request_free_(evcon, req); + + /* reset the connection */ + evhttp_connection_reset_(evcon); + + /* We are trying the next request that was queued on us */ + if (TAILQ_FIRST(&evcon->requests) != NULL) + evhttp_connection_connect_(evcon); + + /* The call to evhttp_connection_reset_ overwrote errno. + * Let's restore the original errno, so that the user's + * callback can have a better idea of what the error was. + */ + EVUTIL_SET_SOCKET_ERROR(errsave); + + /* inform the user */ + if (error_cb != NULL) + error_cb(error, error_cb_arg); + if (cb != NULL) + (*cb)(NULL, cb_arg); +} + +/* Bufferevent callback: invoked when any data has been written from an + * http connection's bufferevent */ +static void +evhttp_write_cb(struct bufferevent *bufev, void *arg) +{ + struct evhttp_connection *evcon = arg; + + /* Activate our call back */ + if (evcon->cb != NULL) + (*evcon->cb)(evcon, evcon->cb_arg); +} + +/** + * Advance the connection state. + * - If this is an outgoing connection, we've just processed the response; + * idle or close the connection. + * - If this is an incoming connection, we've just processed the request; + * respond. + */ +static void +evhttp_connection_done(struct evhttp_connection *evcon) +{ + struct evhttp_request *req = TAILQ_FIRST(&evcon->requests); + int con_outgoing = evcon->flags & EVHTTP_CON_OUTGOING; + int free_evcon = 0; + + if (con_outgoing) { + /* idle or close the connection */ + int need_close = evhttp_is_request_connection_close(req); + TAILQ_REMOVE(&evcon->requests, req, next); + req->evcon = NULL; + + evcon->state = EVCON_IDLE; + + /* check if we got asked to close the connection */ + if (need_close) + evhttp_connection_reset_(evcon); + + if (TAILQ_FIRST(&evcon->requests) != NULL) { + /* + * We have more requests; reset the connection + * and deal with the next request. + */ + if (!evhttp_connected(evcon)) + evhttp_connection_connect_(evcon); + else + evhttp_request_dispatch(evcon); + } else if (!need_close) { + /* + * The connection is going to be persistent, but we + * need to detect if the other side closes it. + */ + evhttp_connection_start_detectclose(evcon); + } else if ((evcon->flags & EVHTTP_CON_AUTOFREE)) { + /* + * If we have no more requests that need completion + * and we're not waiting for the connection to close + */ + free_evcon = 1; + } + } else { + /* + * incoming connection - we need to leave the request on the + * connection so that we can reply to it. + */ + evcon->state = EVCON_WRITING; + } + + /* notify the user of the request */ + (*req->cb)(req, req->cb_arg); + + /* if this was an outgoing request, we own and it's done. so free it. */ + if (con_outgoing) { + evhttp_request_free_auto(req); + } + + /* If this was the last request of an outgoing connection and we're + * not waiting to receive a connection close event and we want to + * automatically free the connection. We check to ensure our request + * list is empty one last time just in case our callback added a + * new request. + */ + if (free_evcon && TAILQ_FIRST(&evcon->requests) == NULL) { + evhttp_connection_free(evcon); + } +} + +/* + * Handles reading from a chunked request. + * return ALL_DATA_READ: + * all data has been read + * return MORE_DATA_EXPECTED: + * more data is expected + * return DATA_CORRUPTED: + * data is corrupted + * return REQUEST_CANCELED: + * request was canceled by the user calling evhttp_cancel_request + * return DATA_TOO_LONG: + * ran over the maximum limit + */ + +static enum message_read_status +evhttp_handle_chunked_read(struct evhttp_request *req, struct evbuffer *buf) +{ + if (req == NULL || buf == NULL) { + return DATA_CORRUPTED; + } + + while (1) { + size_t buflen; + + if ((buflen = evbuffer_get_length(buf)) == 0) { + break; + } + + /* evbuffer_get_length returns size_t, but len variable is ssize_t, + * check for overflow conditions */ + if (buflen > EV_SSIZE_MAX) { + return DATA_CORRUPTED; + } + + if (req->ntoread < 0) { + /* Read chunk size */ + ev_int64_t ntoread; + char *p = evbuffer_readln(buf, NULL, EVBUFFER_EOL_CRLF); + char *endp; + int error; + if (p == NULL) + break; + /* the last chunk is on a new line? */ + if (strlen(p) == 0) { + mm_free(p); + continue; + } + ntoread = evutil_strtoll(p, &endp, 16); + error = (*p == '\0' || + (*endp != '\0' && *endp != ' ') || + ntoread < 0); + mm_free(p); + if (error) { + /* could not get chunk size */ + return (DATA_CORRUPTED); + } + + /* ntoread is signed int64, body_size is unsigned size_t, check for under/overflow conditions */ + if ((ev_uint64_t)ntoread > EV_SIZE_MAX - req->body_size) { + return DATA_CORRUPTED; + } + + if (req->body_size + (size_t)ntoread > req->evcon->max_body_size) { + /* failed body length test */ + event_debug(("Request body is too long")); + return (DATA_TOO_LONG); + } + + req->body_size += (size_t)ntoread; + req->ntoread = ntoread; + if (req->ntoread == 0) { + /* Last chunk */ + return (ALL_DATA_READ); + } + continue; + } + + /* req->ntoread is signed int64, len is ssize_t, based on arch, + * ssize_t could only be 32b, check for these conditions */ + if (req->ntoread > EV_SSIZE_MAX) { + return DATA_CORRUPTED; + } + + /* don't have enough to complete a chunk; wait for more */ + if (req->ntoread > 0 && buflen < (ev_uint64_t)req->ntoread) + return (MORE_DATA_EXPECTED); + + /* Completed chunk */ + evbuffer_remove_buffer(buf, req->input_buffer, (size_t)req->ntoread); + req->ntoread = -1; + if (req->chunk_cb != NULL) { + req->flags |= EVHTTP_REQ_DEFER_FREE; + (*req->chunk_cb)(req, req->cb_arg); + evbuffer_drain(req->input_buffer, + evbuffer_get_length(req->input_buffer)); + req->flags &= ~EVHTTP_REQ_DEFER_FREE; + if ((req->flags & EVHTTP_REQ_NEEDS_FREE) != 0) { + return (REQUEST_CANCELED); + } + } + } + + return (MORE_DATA_EXPECTED); +} + +static void +evhttp_read_trailer(struct evhttp_connection *evcon, struct evhttp_request *req) +{ + struct evbuffer *buf = bufferevent_get_input(evcon->bufev); + + switch (evhttp_parse_headers_(req, buf)) { + case DATA_CORRUPTED: + case DATA_TOO_LONG: + evhttp_connection_fail_(evcon, EVREQ_HTTP_DATA_TOO_LONG); + break; + case ALL_DATA_READ: + bufferevent_disable(evcon->bufev, EV_READ); + evhttp_connection_done(evcon); + break; + case MORE_DATA_EXPECTED: + case REQUEST_CANCELED: /* ??? */ + default: + break; + } +} + +static void +evhttp_lingering_close(struct evhttp_connection *evcon, + struct evhttp_request *req) +{ + struct evbuffer *buf = bufferevent_get_input(evcon->bufev); + + size_t n = evbuffer_get_length(buf); + if (n > (size_t) req->ntoread) + n = (size_t) req->ntoread; + req->ntoread -= n; + req->body_size += n; + + event_debug(("Request body is too long, left " EV_I64_FMT, + EV_I64_ARG(req->ntoread))); + + evbuffer_drain(buf, n); + if (!req->ntoread) + evhttp_connection_fail_(evcon, EVREQ_HTTP_DATA_TOO_LONG); +} +static void +evhttp_lingering_fail(struct evhttp_connection *evcon, + struct evhttp_request *req) +{ + if (evcon->flags & EVHTTP_CON_LINGERING_CLOSE) + evhttp_lingering_close(evcon, req); + else + evhttp_connection_fail_(evcon, EVREQ_HTTP_DATA_TOO_LONG); +} + +static void +evhttp_read_body(struct evhttp_connection *evcon, struct evhttp_request *req) +{ + struct evbuffer *buf = bufferevent_get_input(evcon->bufev); + + if (req->chunked) { + switch (evhttp_handle_chunked_read(req, buf)) { + case ALL_DATA_READ: + /* finished last chunk */ + evcon->state = EVCON_READING_TRAILER; + evhttp_read_trailer(evcon, req); + return; + case DATA_CORRUPTED: + case DATA_TOO_LONG: + /* corrupted data */ + evhttp_connection_fail_(evcon, + EVREQ_HTTP_DATA_TOO_LONG); + return; + case REQUEST_CANCELED: + /* request canceled */ + evhttp_request_free_auto(req); + return; + case MORE_DATA_EXPECTED: + default: + break; + } + } else if (req->ntoread < 0) { + /* Read until connection close. */ + if ((size_t)(req->body_size + evbuffer_get_length(buf)) < req->body_size) { + evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER); + return; + } + + req->body_size += evbuffer_get_length(buf); + evbuffer_add_buffer(req->input_buffer, buf); + } else if (req->chunk_cb != NULL || evbuffer_get_length(buf) >= (size_t)req->ntoread) { + /* XXX: the above get_length comparison has to be fixed for overflow conditions! */ + /* We've postponed moving the data until now, but we're + * about to use it. */ + size_t n = evbuffer_get_length(buf); + + if (n > (size_t) req->ntoread) + n = (size_t) req->ntoread; + req->ntoread -= n; + req->body_size += n; + evbuffer_remove_buffer(buf, req->input_buffer, n); + } + + if (req->body_size > req->evcon->max_body_size || + (!req->chunked && req->ntoread >= 0 && + (size_t)req->ntoread > req->evcon->max_body_size)) { + /* XXX: The above casted comparison must checked for overflow */ + /* failed body length test */ + + evhttp_lingering_fail(evcon, req); + return; + } + + if (evbuffer_get_length(req->input_buffer) > 0 && req->chunk_cb != NULL) { + req->flags |= EVHTTP_REQ_DEFER_FREE; + (*req->chunk_cb)(req, req->cb_arg); + req->flags &= ~EVHTTP_REQ_DEFER_FREE; + evbuffer_drain(req->input_buffer, + evbuffer_get_length(req->input_buffer)); + if ((req->flags & EVHTTP_REQ_NEEDS_FREE) != 0) { + evhttp_request_free_auto(req); + return; + } + } + + if (!req->ntoread) { + bufferevent_disable(evcon->bufev, EV_READ); + /* Completed content length */ + evhttp_connection_done(evcon); + return; + } +} + +#define get_deferred_queue(evcon) \ + ((evcon)->base) + +/* + * Gets called when more data becomes available + */ + +static void +evhttp_read_cb(struct bufferevent *bufev, void *arg) +{ + struct evhttp_connection *evcon = arg; + struct evhttp_request *req = TAILQ_FIRST(&evcon->requests); + + /* Cancel if it's pending. */ + event_deferred_cb_cancel_(get_deferred_queue(evcon), + &evcon->read_more_deferred_cb); + + switch (evcon->state) { + case EVCON_READING_FIRSTLINE: + evhttp_read_firstline(evcon, req); + /* note the request may have been freed in + * evhttp_read_body */ + break; + case EVCON_READING_HEADERS: + evhttp_read_header(evcon, req); + /* note the request may have been freed in + * evhttp_read_body */ + break; + case EVCON_READING_BODY: + evhttp_read_body(evcon, req); + /* note the request may have been freed in + * evhttp_read_body */ + break; + case EVCON_READING_TRAILER: + evhttp_read_trailer(evcon, req); + break; + case EVCON_IDLE: + { +#ifdef USE_DEBUG + struct evbuffer *input; + size_t total_len; + + input = bufferevent_get_input(evcon->bufev); + total_len = evbuffer_get_length(input); + event_debug(("%s: read "EV_SIZE_FMT + " bytes in EVCON_IDLE state," + " resetting connection", + __func__, EV_SIZE_ARG(total_len))); +#endif + + evhttp_connection_reset_(evcon); + } + break; + case EVCON_DISCONNECTED: + case EVCON_CONNECTING: + case EVCON_WRITING: + default: + event_errx(1, "%s: illegal connection state %d", + __func__, evcon->state); + } +} + +static void +evhttp_deferred_read_cb(struct event_callback *cb, void *data) +{ + struct evhttp_connection *evcon = data; + struct bufferevent *bev = evcon->bufev; + if (bev->readcb) + (bev->readcb)(evcon->bufev, evcon); +} + +static void +evhttp_write_connectioncb(struct evhttp_connection *evcon, void *arg) +{ + /* This is after writing the request to the server */ + struct evhttp_request *req = TAILQ_FIRST(&evcon->requests); + struct evbuffer *output = bufferevent_get_output(evcon->bufev); + EVUTIL_ASSERT(req != NULL); + + EVUTIL_ASSERT(evcon->state == EVCON_WRITING); + + /* We need to wait until we've written all of our output data before we can + * continue */ + if (evbuffer_get_length(output) > 0) + return; + + /* We are done writing our header and are now expecting the response */ + req->kind = EVHTTP_RESPONSE; + + evhttp_start_read_(evcon); +} + +/* + * Clean up a connection object + */ + +void +evhttp_connection_free(struct evhttp_connection *evcon) +{ + struct evhttp_request *req; + int need_close = 0; + + /* notify interested parties that this connection is going down */ + if (evcon->fd != -1) { + if (evhttp_connected(evcon) && evcon->closecb != NULL) + (*evcon->closecb)(evcon, evcon->closecb_arg); + } + + /* remove all requests that might be queued on this + * connection. for server connections, this should be empty. + * because it gets dequeued either in evhttp_connection_done or + * evhttp_connection_fail_. + */ + while ((req = TAILQ_FIRST(&evcon->requests)) != NULL) { + evhttp_request_free_(evcon, req); + } + + if (evcon->http_server != NULL) { + struct evhttp *http = evcon->http_server; + TAILQ_REMOVE(&http->connections, evcon, next); + } + + if (event_initialized(&evcon->retry_ev)) { + event_del(&evcon->retry_ev); + event_debug_unassign(&evcon->retry_ev); + } + + event_deferred_cb_cancel_(get_deferred_queue(evcon), + &evcon->read_more_deferred_cb); + + if (evcon->bufev != NULL) { + need_close = + !(bufferevent_get_options_(evcon->bufev) & BEV_OPT_CLOSE_ON_FREE); + if (evcon->fd == -1) + evcon->fd = bufferevent_getfd(evcon->bufev); + + bufferevent_free(evcon->bufev); + } + + if (evcon->fd != -1) { + shutdown(evcon->fd, EVUTIL_SHUT_WR); + if (need_close) + evutil_closesocket(evcon->fd); + } + + if (evcon->bind_address != NULL) + mm_free(evcon->bind_address); + + if (evcon->address != NULL) + mm_free(evcon->address); + + mm_free(evcon); +} + +void +evhttp_connection_free_on_completion(struct evhttp_connection *evcon) { + evcon->flags |= EVHTTP_CON_AUTOFREE; +} + +void +evhttp_connection_set_local_address(struct evhttp_connection *evcon, + const char *address) +{ + EVUTIL_ASSERT(evcon->state == EVCON_DISCONNECTED); + if (evcon->bind_address) + mm_free(evcon->bind_address); + if ((evcon->bind_address = mm_strdup(address)) == NULL) + event_warn("%s: strdup", __func__); +} + +void +evhttp_connection_set_local_port(struct evhttp_connection *evcon, + ev_uint16_t port) +{ + EVUTIL_ASSERT(evcon->state == EVCON_DISCONNECTED); + evcon->bind_port = port; +} + +static void +evhttp_request_dispatch(struct evhttp_connection* evcon) +{ + struct evhttp_request *req = TAILQ_FIRST(&evcon->requests); + + /* this should not usually happy but it's possible */ + if (req == NULL) + return; + + EVUTIL_ASSERT(req->kind == EVHTTP_REQUEST); + + /* delete possible close detection events */ + evhttp_connection_stop_detectclose(evcon); + + /* we assume that the connection is connected already */ + EVUTIL_ASSERT(evcon->state == EVCON_IDLE); + + evcon->state = EVCON_WRITING; + + /* Create the header from the store arguments */ + evhttp_make_header(evcon, req); + + evhttp_write_buffer(evcon, evhttp_write_connectioncb, NULL); +} + +/* Reset our connection state: disables reading/writing, closes our fd (if +* any), clears out buffers, and puts us in state DISCONNECTED. */ +void +evhttp_connection_reset_(struct evhttp_connection *evcon) +{ + struct evbuffer *tmp; + int err; + + bufferevent_setcb(evcon->bufev, NULL, NULL, NULL, NULL); + + /* XXXX This is not actually an optimal fix. Instead we ought to have + an API for "stop connecting", or use bufferevent_setfd to turn off + connecting. But for Libevent 2.0, this seems like a minimal change + least likely to disrupt the rest of the bufferevent and http code. + + Why is this here? If the fd is set in the bufferevent, and the + bufferevent is connecting, then you can't actually stop the + bufferevent from trying to connect with bufferevent_disable(). The + connect will never trigger, since we close the fd, but the timeout + might. That caused an assertion failure in evhttp_connection_fail_. + */ + bufferevent_disable_hard_(evcon->bufev, EV_READ|EV_WRITE); + + if (evcon->fd == -1) + evcon->fd = bufferevent_getfd(evcon->bufev); + + if (evcon->fd != -1) { + /* inform interested parties about connection close */ + if (evhttp_connected(evcon) && evcon->closecb != NULL) + (*evcon->closecb)(evcon, evcon->closecb_arg); + + shutdown(evcon->fd, EVUTIL_SHUT_WR); + evutil_closesocket(evcon->fd); + evcon->fd = -1; + } + err = bufferevent_setfd(evcon->bufev, -1); + EVUTIL_ASSERT(!err && "setfd"); + + /* we need to clean up any buffered data */ + tmp = bufferevent_get_output(evcon->bufev); + err = evbuffer_drain(tmp, -1); + EVUTIL_ASSERT(!err && "drain output"); + tmp = bufferevent_get_input(evcon->bufev); + err = evbuffer_drain(tmp, -1); + EVUTIL_ASSERT(!err && "drain input"); + + evcon->flags &= ~EVHTTP_CON_READING_ERROR; + + evcon->state = EVCON_DISCONNECTED; +} + +static void +evhttp_connection_start_detectclose(struct evhttp_connection *evcon) +{ + evcon->flags |= EVHTTP_CON_CLOSEDETECT; + bufferevent_enable(evcon->bufev, EV_READ); +} + +static void +evhttp_connection_stop_detectclose(struct evhttp_connection *evcon) +{ + evcon->flags &= ~EVHTTP_CON_CLOSEDETECT; + bufferevent_disable(evcon->bufev, EV_READ); +} + +static void +evhttp_connection_retry(evutil_socket_t fd, short what, void *arg) +{ + struct evhttp_connection *evcon = arg; + + evcon->state = EVCON_DISCONNECTED; + evhttp_connection_connect_(evcon); +} + +static void +evhttp_connection_cb_cleanup(struct evhttp_connection *evcon) +{ + struct evcon_requestq requests; + + evhttp_connection_reset_(evcon); + if (evcon->retry_max < 0 || evcon->retry_cnt < evcon->retry_max) { + struct timeval tv_retry = evcon->initial_retry_timeout; + int i; + evtimer_assign(&evcon->retry_ev, evcon->base, evhttp_connection_retry, evcon); + /* XXXX handle failure from evhttp_add_event */ + for (i=0; i < evcon->retry_cnt; ++i) { + tv_retry.tv_usec *= 2; + if (tv_retry.tv_usec > 1000000) { + tv_retry.tv_usec -= 1000000; + tv_retry.tv_sec += 1; + } + tv_retry.tv_sec *= 2; + if (tv_retry.tv_sec > 3600) { + tv_retry.tv_sec = 3600; + tv_retry.tv_usec = 0; + } + } + event_add(&evcon->retry_ev, &tv_retry); + evcon->retry_cnt++; + return; + } + + /* + * User callback can do evhttp_make_request() on the same + * evcon so new request will be added to evcon->requests. To + * avoid freeing it prematurely we iterate over the copy of + * the queue. + */ + TAILQ_INIT(&requests); + while (TAILQ_FIRST(&evcon->requests) != NULL) { + struct evhttp_request *request = TAILQ_FIRST(&evcon->requests); + TAILQ_REMOVE(&evcon->requests, request, next); + TAILQ_INSERT_TAIL(&requests, request, next); + } + + /* for now, we just signal all requests by executing their callbacks */ + while (TAILQ_FIRST(&requests) != NULL) { + struct evhttp_request *request = TAILQ_FIRST(&requests); + TAILQ_REMOVE(&requests, request, next); + request->evcon = NULL; + + /* we might want to set an error here */ + request->cb(request, request->cb_arg); + evhttp_request_free_auto(request); + } +} + +static void +evhttp_connection_read_on_write_error(struct evhttp_connection *evcon, + struct evhttp_request *req) +{ + struct evbuffer *buf; + + /** Second time, we can't read anything */ + if (evcon->flags & EVHTTP_CON_READING_ERROR) { + evcon->flags &= ~EVHTTP_CON_READING_ERROR; + evhttp_connection_fail_(evcon, EVREQ_HTTP_EOF); + return; + } + + req->kind = EVHTTP_RESPONSE; + + buf = bufferevent_get_output(evcon->bufev); + evbuffer_unfreeze(buf, 1); + evbuffer_drain(buf, evbuffer_get_length(buf)); + evbuffer_freeze(buf, 1); + + evhttp_start_read_(evcon); + evcon->flags |= EVHTTP_CON_READING_ERROR; +} + +static void +evhttp_error_cb(struct bufferevent *bufev, short what, void *arg) +{ + struct evhttp_connection *evcon = arg; + struct evhttp_request *req = TAILQ_FIRST(&evcon->requests); + + if (evcon->fd == -1) + evcon->fd = bufferevent_getfd(bufev); + + switch (evcon->state) { + case EVCON_CONNECTING: + if (what & BEV_EVENT_TIMEOUT) { + event_debug(("%s: connection timeout for \"%s:%d\" on " + EV_SOCK_FMT, + __func__, evcon->address, evcon->port, + EV_SOCK_ARG(evcon->fd))); + evhttp_connection_cb_cleanup(evcon); + return; + } + break; + + case EVCON_READING_BODY: + if (!req->chunked && req->ntoread < 0 + && what == (BEV_EVENT_READING|BEV_EVENT_EOF)) { + /* EOF on read can be benign */ + evhttp_connection_done(evcon); + return; + } + break; + + case EVCON_DISCONNECTED: + case EVCON_IDLE: + case EVCON_READING_FIRSTLINE: + case EVCON_READING_HEADERS: + case EVCON_READING_TRAILER: + case EVCON_WRITING: + default: + break; + } + + /* when we are in close detect mode, a read error means that + * the other side closed their connection. + */ + if (evcon->flags & EVHTTP_CON_CLOSEDETECT) { + evcon->flags &= ~EVHTTP_CON_CLOSEDETECT; + EVUTIL_ASSERT(evcon->http_server == NULL); + /* For connections from the client, we just + * reset the connection so that it becomes + * disconnected. + */ + EVUTIL_ASSERT(evcon->state == EVCON_IDLE); + evhttp_connection_reset_(evcon); + + /* + * If we have no more requests that need completion + * and we want to auto-free the connection when all + * requests have been completed. + */ + if (TAILQ_FIRST(&evcon->requests) == NULL + && (evcon->flags & EVHTTP_CON_OUTGOING) + && (evcon->flags & EVHTTP_CON_AUTOFREE)) { + evhttp_connection_free(evcon); + } + return; + } + + if (what & BEV_EVENT_TIMEOUT) { + evhttp_connection_fail_(evcon, EVREQ_HTTP_TIMEOUT); + } else if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) { + if (what & BEV_EVENT_WRITING && + evcon->flags & EVHTTP_CON_READ_ON_WRITE_ERROR) { + evhttp_connection_read_on_write_error(evcon, req); + return; + } + + if (what & BEV_EVENT_READING && + evcon->flags & EVHTTP_CON_READ_ON_WRITE_ERROR && + evbuffer_get_length(bufferevent_get_input(bufev))) { + event_deferred_cb_schedule_(get_deferred_queue(evcon), + &evcon->read_more_deferred_cb); + return; + } + + evhttp_connection_fail_(evcon, EVREQ_HTTP_EOF); + } else if (what == BEV_EVENT_CONNECTED) { + } else { + evhttp_connection_fail_(evcon, EVREQ_HTTP_BUFFER_ERROR); + } +} + +/* + * Event callback for asynchronous connection attempt. + */ +static void +evhttp_connection_cb(struct bufferevent *bufev, short what, void *arg) +{ + struct evhttp_connection *evcon = arg; + int error; + ev_socklen_t errsz = sizeof(error); + + if (evcon->fd == -1) + evcon->fd = bufferevent_getfd(bufev); + + if (!(what & BEV_EVENT_CONNECTED)) { + /* some operating systems return ECONNREFUSED immediately + * when connecting to a local address. the cleanup is going + * to reschedule this function call. + */ +#ifndef _WIN32 + if (errno == ECONNREFUSED) + goto cleanup; +#endif + evhttp_error_cb(bufev, what, arg); + return; + } + + if (evcon->fd == -1) { + event_debug(("%s: bufferevent_getfd returned -1", + __func__)); + goto cleanup; + } + + /* Check if the connection completed */ + if (getsockopt(evcon->fd, SOL_SOCKET, SO_ERROR, (void*)&error, + &errsz) == -1) { + event_debug(("%s: getsockopt for \"%s:%d\" on "EV_SOCK_FMT, + __func__, evcon->address, evcon->port, + EV_SOCK_ARG(evcon->fd))); + goto cleanup; + } + + if (error) { + event_debug(("%s: connect failed for \"%s:%d\" on " + EV_SOCK_FMT": %s", + __func__, evcon->address, evcon->port, + EV_SOCK_ARG(evcon->fd), + evutil_socket_error_to_string(error))); + goto cleanup; + } + + /* We are connected to the server now */ + event_debug(("%s: connected to \"%s:%d\" on "EV_SOCK_FMT"\n", + __func__, evcon->address, evcon->port, + EV_SOCK_ARG(evcon->fd))); + + /* Reset the retry count as we were successful in connecting */ + evcon->retry_cnt = 0; + evcon->state = EVCON_IDLE; + + /* reset the bufferevent cbs */ + bufferevent_setcb(evcon->bufev, + evhttp_read_cb, + evhttp_write_cb, + evhttp_error_cb, + evcon); + + if (!evutil_timerisset(&evcon->timeout)) { + const struct timeval read_tv = { HTTP_READ_TIMEOUT, 0 }; + const struct timeval write_tv = { HTTP_WRITE_TIMEOUT, 0 }; + bufferevent_set_timeouts(evcon->bufev, &read_tv, &write_tv); + } else { + bufferevent_set_timeouts(evcon->bufev, &evcon->timeout, &evcon->timeout); + } + + /* try to start requests that have queued up on this connection */ + evhttp_request_dispatch(evcon); + return; + + cleanup: + evhttp_connection_cb_cleanup(evcon); +} + +/* + * Check if we got a valid response code. + */ + +static int +evhttp_valid_response_code(int code) +{ + if (code == 0) + return (0); + + return (1); +} + +static int +evhttp_parse_http_version(const char *version, struct evhttp_request *req) +{ + int major, minor; + char ch; + int n = sscanf(version, "HTTP/%d.%d%c", &major, &minor, &ch); + if (n != 2 || major > 1) { + event_debug(("%s: bad version %s on message %p from %s", + __func__, version, req, req->remote_host)); + return (-1); + } + req->major = major; + req->minor = minor; + return (0); +} + +/* Parses the status line of a web server */ + +static int +evhttp_parse_response_line(struct evhttp_request *req, char *line) +{ + char *protocol; + char *number; + const char *readable = ""; + + protocol = strsep(&line, " "); + if (line == NULL) + return (-1); + number = strsep(&line, " "); + if (line != NULL) + readable = line; + + if (evhttp_parse_http_version(protocol, req) < 0) + return (-1); + + req->response_code = atoi(number); + if (!evhttp_valid_response_code(req->response_code)) { + event_debug(("%s: bad response code \"%s\"", + __func__, number)); + return (-1); + } + + if (req->response_code_line != NULL) + mm_free(req->response_code_line); + if ((req->response_code_line = mm_strdup(readable)) == NULL) { + event_warn("%s: strdup", __func__); + return (-1); + } + + return (0); +} + +/* Parse the first line of a HTTP request */ + +static int +evhttp_parse_request_line(struct evhttp_request *req, char *line, size_t len) +{ + char *eos = line + len; + char *method; + char *uri; + char *version; + const char *hostname; + const char *scheme; + size_t method_len; + enum evhttp_cmd_type type; + + while (eos > line && *(eos-1) == ' ') { + *(eos-1) = '\0'; + --eos; + --len; + } + if (len < strlen("GET / HTTP/1.0")) + return -1; + + /* Parse the request line */ + method = strsep(&line, " "); + if (!line) + return -1; + uri = line; + version = strrchr(uri, ' '); + if (!version || uri == version) + return -1; + *version = '\0'; + version++; + + method_len = (uri - method) - 1; + type = EVHTTP_REQ_UNKNOWN_; + + /* First line */ + switch (method_len) { + case 3: + /* The length of the method string is 3, meaning it can only be one of two methods: GET or PUT */ + + /* Since both GET and PUT share the same character 'T' at the end, + * if the string doesn't have 'T', we can immediately determine this + * is an invalid HTTP method */ + + if (method[2] != 'T') { + break; + } + + switch (*method) { + case 'G': + /* This first byte is 'G', so make sure the next byte is + * 'E', if it isn't then this isn't a valid method */ + + if (method[1] == 'E') { + type = EVHTTP_REQ_GET; + } + + break; + case 'P': + /* First byte is P, check second byte for 'U', if not, + * we know it's an invalid method */ + if (method[1] == 'U') { + type = EVHTTP_REQ_PUT; + } + break; + default: + break; + } + break; + case 4: + /* The method length is 4 bytes, leaving only the methods "POST" and "HEAD" */ + switch (*method) { + case 'P': + if (method[3] == 'T' && method[2] == 'S' && method[1] == 'O') { + type = EVHTTP_REQ_POST; + } + break; + case 'H': + if (method[3] == 'D' && method[2] == 'A' && method[1] == 'E') { + type = EVHTTP_REQ_HEAD; + } + break; + default: + break; + } + break; + case 5: + /* Method length is 5 bytes, which can only encompass PATCH and TRACE */ + switch (*method) { + case 'P': + if (method[4] == 'H' && method[3] == 'C' && method[2] == 'T' && method[1] == 'A') { + type = EVHTTP_REQ_PATCH; + } + break; + case 'T': + if (method[4] == 'E' && method[3] == 'C' && method[2] == 'A' && method[1] == 'R') { + type = EVHTTP_REQ_TRACE; + } + + break; + default: + break; + } + break; + case 6: + /* Method length is 6, only valid method 6 bytes in length is DELEte */ + + /* If the first byte isn't 'D' then it's invalid */ + if (*method != 'D') { + break; + } + + if (method[5] == 'E' && method[4] == 'T' && method[3] == 'E' && method[2] == 'L' && method[1] == 'E') { + type = EVHTTP_REQ_DELETE; + } + + break; + case 7: + /* Method length is 7, only valid methods are "OPTIONS" and "CONNECT" */ + switch (*method) { + case 'O': + if (method[6] == 'S' && method[5] == 'N' && method[4] == 'O' && + method[3] == 'I' && method[2] == 'T' && method[1] == 'P') { + type = EVHTTP_REQ_OPTIONS; + } + + break; + case 'C': + if (method[6] == 'T' && method[5] == 'C' && method[4] == 'E' && + method[3] == 'N' && method[2] == 'N' && method[1] == 'O') { + type = EVHTTP_REQ_CONNECT; + } + + break; + default: + break; + } + break; + } /* switch */ + + if ((int)type == EVHTTP_REQ_UNKNOWN_) { + event_debug(("%s: bad method %s on request %p from %s", + __func__, method, req, req->remote_host)); + /* No error yet; we'll give a better error later when + * we see that req->type is unsupported. */ + } + + req->type = type; + + if (evhttp_parse_http_version(version, req) < 0) + return -1; + + if ((req->uri = mm_strdup(uri)) == NULL) { + event_debug(("%s: mm_strdup", __func__)); + return -1; + } + + if (type == EVHTTP_REQ_CONNECT) { + if ((req->uri_elems = evhttp_uri_parse_authority(req->uri)) == NULL) { + return -1; + } + } else { + if ((req->uri_elems = evhttp_uri_parse_with_flags(req->uri, + EVHTTP_URI_NONCONFORMANT)) == NULL) { + return -1; + } + } + + /* If we have an absolute-URI, check to see if it is an http request + for a known vhost or server alias. If we don't know about this + host, we consider it a proxy request. */ + scheme = evhttp_uri_get_scheme(req->uri_elems); + hostname = evhttp_uri_get_host(req->uri_elems); + if (scheme && (!evutil_ascii_strcasecmp(scheme, "http") || + !evutil_ascii_strcasecmp(scheme, "https")) && + hostname && + !evhttp_find_vhost(req->evcon->http_server, NULL, hostname)) + req->flags |= EVHTTP_PROXY_REQUEST; + + return 0; +} + +const char * +evhttp_find_header(const struct evkeyvalq *headers, const char *key) +{ + struct evkeyval *header; + + TAILQ_FOREACH(header, headers, next) { + if (evutil_ascii_strcasecmp(header->key, key) == 0) + return (header->value); + } + + return (NULL); +} + +void +evhttp_clear_headers(struct evkeyvalq *headers) +{ + struct evkeyval *header; + + for (header = TAILQ_FIRST(headers); + header != NULL; + header = TAILQ_FIRST(headers)) { + TAILQ_REMOVE(headers, header, next); + mm_free(header->key); + mm_free(header->value); + mm_free(header); + } +} + +/* + * Returns 0, if the header was successfully removed. + * Returns -1, if the header could not be found. + */ + +int +evhttp_remove_header(struct evkeyvalq *headers, const char *key) +{ + struct evkeyval *header; + + TAILQ_FOREACH(header, headers, next) { + if (evutil_ascii_strcasecmp(header->key, key) == 0) + break; + } + + if (header == NULL) + return (-1); + + /* Free and remove the header that we found */ + TAILQ_REMOVE(headers, header, next); + mm_free(header->key); + mm_free(header->value); + mm_free(header); + + return (0); +} + +static int +evhttp_header_is_valid_value(const char *value) +{ + const char *p = value; + + while ((p = strpbrk(p, "\r\n")) != NULL) { + /* we really expect only one new line */ + p += strspn(p, "\r\n"); + /* we expect a space or tab for continuation */ + if (*p != ' ' && *p != '\t') + return (0); + } + return (1); +} + +int +evhttp_add_header(struct evkeyvalq *headers, + const char *key, const char *value) +{ + event_debug(("%s: key: %s val: %s\n", __func__, key, value)); + + if (strchr(key, '\r') != NULL || strchr(key, '\n') != NULL) { + /* drop illegal headers */ + event_debug(("%s: dropping illegal header key\n", __func__)); + return (-1); + } + + if (!evhttp_header_is_valid_value(value)) { + event_debug(("%s: dropping illegal header value\n", __func__)); + return (-1); + } + + return (evhttp_add_header_internal(headers, key, value)); +} + +static int +evhttp_add_header_internal(struct evkeyvalq *headers, + const char *key, const char *value) +{ + struct evkeyval *header = mm_calloc(1, sizeof(struct evkeyval)); + if (header == NULL) { + event_warn("%s: calloc", __func__); + return (-1); + } + if ((header->key = mm_strdup(key)) == NULL) { + mm_free(header); + event_warn("%s: strdup", __func__); + return (-1); + } + if ((header->value = mm_strdup(value)) == NULL) { + mm_free(header->key); + mm_free(header); + event_warn("%s: strdup", __func__); + return (-1); + } + + TAILQ_INSERT_TAIL(headers, header, next); + + return (0); +} + +/* + * Parses header lines from a request or a response into the specified + * request object given an event buffer. + * + * Returns + * DATA_CORRUPTED on error + * MORE_DATA_EXPECTED when we need to read more headers + * ALL_DATA_READ when all headers have been read. + */ + +enum message_read_status +evhttp_parse_firstline_(struct evhttp_request *req, struct evbuffer *buffer) +{ + char *line; + enum message_read_status status = ALL_DATA_READ; + + size_t len; + /* XXX try */ + line = evbuffer_readln(buffer, &len, EVBUFFER_EOL_CRLF); + if (line == NULL) { + if (req->evcon != NULL && + evbuffer_get_length(buffer) > req->evcon->max_headers_size) + return (DATA_TOO_LONG); + else + return (MORE_DATA_EXPECTED); + } + + if (req->evcon != NULL && len > req->evcon->max_headers_size) { + mm_free(line); + return (DATA_TOO_LONG); + } + + req->headers_size = len; + + switch (req->kind) { + case EVHTTP_REQUEST: + if (evhttp_parse_request_line(req, line, len) == -1) + status = DATA_CORRUPTED; + break; + case EVHTTP_RESPONSE: + if (evhttp_parse_response_line(req, line) == -1) + status = DATA_CORRUPTED; + break; + default: + status = DATA_CORRUPTED; + } + + mm_free(line); + return (status); +} + +static int +evhttp_append_to_last_header(struct evkeyvalq *headers, char *line) +{ + struct evkeyval *header = TAILQ_LAST(headers, evkeyvalq); + char *newval; + size_t old_len, line_len; + + if (header == NULL) + return (-1); + + old_len = strlen(header->value); + + /* Strip space from start and end of line. */ + while (*line == ' ' || *line == '\t') + ++line; + evutil_rtrim_lws_(line); + + line_len = strlen(line); + + newval = mm_realloc(header->value, old_len + line_len + 2); + if (newval == NULL) + return (-1); + + newval[old_len] = ' '; + memcpy(newval + old_len + 1, line, line_len + 1); + header->value = newval; + + return (0); +} + +enum message_read_status +evhttp_parse_headers_(struct evhttp_request *req, struct evbuffer* buffer) +{ + enum message_read_status errcode = DATA_CORRUPTED; + char *line; + enum message_read_status status = MORE_DATA_EXPECTED; + + struct evkeyvalq* headers = req->input_headers; + size_t len; + while ((line = evbuffer_readln(buffer, &len, EVBUFFER_EOL_CRLF)) + != NULL) { + char *skey, *svalue; + + req->headers_size += len; + + if (req->evcon != NULL && + req->headers_size > req->evcon->max_headers_size) { + errcode = DATA_TOO_LONG; + goto error; + } + + if (*line == '\0') { /* Last header - Done */ + status = ALL_DATA_READ; + mm_free(line); + break; + } + + /* Check if this is a continuation line */ + if (*line == ' ' || *line == '\t') { + if (evhttp_append_to_last_header(headers, line) == -1) + goto error; + mm_free(line); + continue; + } + + /* Processing of header lines */ + svalue = line; + skey = strsep(&svalue, ":"); + if (svalue == NULL) + goto error; + + svalue += strspn(svalue, " "); + evutil_rtrim_lws_(svalue); + + if (evhttp_add_header(headers, skey, svalue) == -1) + goto error; + + mm_free(line); + } + + if (status == MORE_DATA_EXPECTED) { + if (req->evcon != NULL && + req->headers_size + evbuffer_get_length(buffer) > req->evcon->max_headers_size) + return (DATA_TOO_LONG); + } + + return (status); + + error: + mm_free(line); + return (errcode); +} + +static int +evhttp_get_body_length(struct evhttp_request *req) +{ + struct evkeyvalq *headers = req->input_headers; + const char *content_length; + const char *connection; + + content_length = evhttp_find_header(headers, "Content-Length"); + connection = evhttp_find_header(headers, "Connection"); + + if (content_length == NULL && connection == NULL) + req->ntoread = -1; + else if (content_length == NULL && + evutil_ascii_strcasecmp(connection, "Close") != 0) { + req->ntoread = 0; + } else if (content_length == NULL) { + req->ntoread = -1; + } else { + char *endp; + ev_int64_t ntoread = evutil_strtoll(content_length, &endp, 10); + if (*content_length == '\0' || *endp != '\0' || ntoread < 0) { + event_debug(("%s: illegal content length: %s", + __func__, content_length)); + return (-1); + } + req->ntoread = ntoread; + } + + event_debug(("%s: bytes to read: "EV_I64_FMT" (in buffer "EV_SIZE_FMT")\n", + __func__, EV_I64_ARG(req->ntoread), + EV_SIZE_ARG(evbuffer_get_length(bufferevent_get_input(req->evcon->bufev))))); + + return (0); +} + +static int +evhttp_method_may_have_body(enum evhttp_cmd_type type) +{ + switch (type) { + case EVHTTP_REQ_POST: + case EVHTTP_REQ_PUT: + case EVHTTP_REQ_PATCH: + + case EVHTTP_REQ_GET: + case EVHTTP_REQ_DELETE: + case EVHTTP_REQ_OPTIONS: + case EVHTTP_REQ_CONNECT: + return 1; + + case EVHTTP_REQ_TRACE: + case EVHTTP_REQ_HEAD: + default: + return 0; + } +} + +static void +evhttp_get_body(struct evhttp_connection *evcon, struct evhttp_request *req) +{ + const char *xfer_enc; + + /* If this is a request without a body, then we are done */ + if (req->kind == EVHTTP_REQUEST && + !evhttp_method_may_have_body(req->type)) { + evhttp_connection_done(evcon); + return; + } + evcon->state = EVCON_READING_BODY; + xfer_enc = evhttp_find_header(req->input_headers, "Transfer-Encoding"); + if (xfer_enc != NULL && evutil_ascii_strcasecmp(xfer_enc, "chunked") == 0) { + req->chunked = 1; + req->ntoread = -1; + } else { + if (evhttp_get_body_length(req) == -1) { + evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER); + return; + } + if (req->kind == EVHTTP_REQUEST && req->ntoread < 1) { + /* An incoming request with no content-length and no + * transfer-encoding has no body. */ + evhttp_connection_done(evcon); + return; + } + } + + /* Should we send a 100 Continue status line? */ + switch (evhttp_have_expect(req, 1)) { + case CONTINUE: + /* XXX It would be nice to do some sanity + checking here. Does the resource exist? + Should the resource accept post requests? If + no, we should respond with an error. For + now, just optimistically tell the client to + send their message body. */ + if (req->ntoread > 0) { + /* ntoread is ev_int64_t, max_body_size is ev_uint64_t */ + if ((req->evcon->max_body_size <= EV_INT64_MAX) && + (ev_uint64_t)req->ntoread > req->evcon->max_body_size) { + evhttp_lingering_fail(evcon, req); + return; + } + } + if (!evbuffer_get_length(bufferevent_get_input(evcon->bufev))) + evhttp_send_continue(evcon, req); + break; + case OTHER: + evhttp_send_error(req, HTTP_EXPECTATIONFAILED, NULL); + return; + case NO: break; + } + + evhttp_read_body(evcon, req); + /* note the request may have been freed in evhttp_read_body */ +} + +static void +evhttp_read_firstline(struct evhttp_connection *evcon, + struct evhttp_request *req) +{ + enum message_read_status res; + + res = evhttp_parse_firstline_(req, bufferevent_get_input(evcon->bufev)); + if (res == DATA_CORRUPTED || res == DATA_TOO_LONG) { + /* Error while reading, terminate */ + event_debug(("%s: bad header lines on "EV_SOCK_FMT"\n", + __func__, EV_SOCK_ARG(evcon->fd))); + evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER); + return; + } else if (res == MORE_DATA_EXPECTED) { + /* Need more header lines */ + return; + } + + evcon->state = EVCON_READING_HEADERS; + evhttp_read_header(evcon, req); +} + +static void +evhttp_read_header(struct evhttp_connection *evcon, + struct evhttp_request *req) +{ + enum message_read_status res; + evutil_socket_t fd = evcon->fd; + + res = evhttp_parse_headers_(req, bufferevent_get_input(evcon->bufev)); + if (res == DATA_CORRUPTED || res == DATA_TOO_LONG) { + /* Error while reading, terminate */ + event_debug(("%s: bad header lines on "EV_SOCK_FMT"\n", + __func__, EV_SOCK_ARG(fd))); + evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER); + return; + } else if (res == MORE_DATA_EXPECTED) { + /* Need more header lines */ + return; + } + + /* Callback can shut down connection with negative return value */ + if (req->header_cb != NULL) { + if ((*req->header_cb)(req, req->cb_arg) < 0) { + evhttp_connection_fail_(evcon, EVREQ_HTTP_EOF); + return; + } + } + + /* Done reading headers, do the real work */ + switch (req->kind) { + case EVHTTP_REQUEST: + event_debug(("%s: checking for post data on "EV_SOCK_FMT"\n", + __func__, EV_SOCK_ARG(fd))); + evhttp_get_body(evcon, req); + /* note the request may have been freed in evhttp_get_body */ + break; + + case EVHTTP_RESPONSE: + /* Start over if we got a 100 Continue response. */ + if (req->response_code == 100) { + struct evbuffer *output = bufferevent_get_output(evcon->bufev); + evbuffer_add_buffer(output, req->output_buffer); + evhttp_start_write_(evcon); + return; + } + if (!evhttp_response_needs_body(req)) { + event_debug(("%s: skipping body for code %d\n", + __func__, req->response_code)); + evhttp_connection_done(evcon); + } else { + event_debug(("%s: start of read body for %s on " + EV_SOCK_FMT"\n", + __func__, req->remote_host, EV_SOCK_ARG(fd))); + evhttp_get_body(evcon, req); + /* note the request may have been freed in + * evhttp_get_body */ + } + break; + + default: + event_warnx("%s: bad header on "EV_SOCK_FMT, __func__, + EV_SOCK_ARG(fd)); + evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER); + break; + } + /* request may have been freed above */ +} + +/* + * Creates a TCP connection to the specified port and executes a callback + * when finished. Failure or success is indicate by the passed connection + * object. + * + * Although this interface accepts a hostname, it is intended to take + * only numeric hostnames so that non-blocking DNS resolution can + * happen elsewhere. + */ + +struct evhttp_connection * +evhttp_connection_new(const char *address, ev_uint16_t port) +{ + return (evhttp_connection_base_new(NULL, NULL, address, port)); +} + +struct evhttp_connection * +evhttp_connection_base_bufferevent_new(struct event_base *base, struct evdns_base *dnsbase, struct bufferevent* bev, + const char *address, ev_uint16_t port) +{ + struct evhttp_connection *evcon = NULL; + + event_debug(("Attempting connection to %s:%d\n", address, port)); + + if ((evcon = mm_calloc(1, sizeof(struct evhttp_connection))) == NULL) { + event_warn("%s: calloc failed", __func__); + goto error; + } + + evcon->fd = -1; + evcon->port = port; + + evcon->max_headers_size = EV_SIZE_MAX; + evcon->max_body_size = EV_SIZE_MAX; + + evutil_timerclear(&evcon->timeout); + evcon->retry_cnt = evcon->retry_max = 0; + + if ((evcon->address = mm_strdup(address)) == NULL) { + event_warn("%s: strdup failed", __func__); + goto error; + } + + if (bev == NULL) { + if (!(bev = bufferevent_socket_new(base, -1, 0))) { + event_warn("%s: bufferevent_socket_new failed", __func__); + goto error; + } + } + + bufferevent_setcb(bev, evhttp_read_cb, evhttp_write_cb, evhttp_error_cb, evcon); + evcon->bufev = bev; + + evcon->state = EVCON_DISCONNECTED; + TAILQ_INIT(&evcon->requests); + + evcon->initial_retry_timeout.tv_sec = 2; + evcon->initial_retry_timeout.tv_usec = 0; + + if (base != NULL) { + evcon->base = base; + if (bufferevent_get_base(bev) != base) + bufferevent_base_set(base, evcon->bufev); + } + + event_deferred_cb_init_( + &evcon->read_more_deferred_cb, + bufferevent_get_priority(bev), + evhttp_deferred_read_cb, evcon); + + evcon->dns_base = dnsbase; + evcon->ai_family = AF_UNSPEC; + + return (evcon); + + error: + if (evcon != NULL) + evhttp_connection_free(evcon); + return (NULL); +} + +struct bufferevent* evhttp_connection_get_bufferevent(struct evhttp_connection *evcon) +{ + return evcon->bufev; +} + +struct evhttp * +evhttp_connection_get_server(struct evhttp_connection *evcon) +{ + return evcon->http_server; +} + +struct evhttp_connection * +evhttp_connection_base_new(struct event_base *base, struct evdns_base *dnsbase, + const char *address, ev_uint16_t port) +{ + return evhttp_connection_base_bufferevent_new(base, dnsbase, NULL, address, port); +} + +void evhttp_connection_set_family(struct evhttp_connection *evcon, + int family) +{ + evcon->ai_family = family; +} + +int evhttp_connection_set_flags(struct evhttp_connection *evcon, + int flags) +{ + int avail_flags = 0; + avail_flags |= EVHTTP_CON_REUSE_CONNECTED_ADDR; + avail_flags |= EVHTTP_CON_READ_ON_WRITE_ERROR; + + if (flags & ~avail_flags || flags > EVHTTP_CON_PUBLIC_FLAGS_END) + return 1; + evcon->flags &= ~avail_flags; + + evcon->flags |= flags; + + return 0; +} + +void +evhttp_connection_set_base(struct evhttp_connection *evcon, + struct event_base *base) +{ + EVUTIL_ASSERT(evcon->base == NULL); + EVUTIL_ASSERT(evcon->state == EVCON_DISCONNECTED); + evcon->base = base; + bufferevent_base_set(base, evcon->bufev); +} + +void +evhttp_connection_set_timeout(struct evhttp_connection *evcon, + int timeout_in_secs) +{ + if (timeout_in_secs == -1) + evhttp_connection_set_timeout_tv(evcon, NULL); + else { + struct timeval tv; + tv.tv_sec = timeout_in_secs; + tv.tv_usec = 0; + evhttp_connection_set_timeout_tv(evcon, &tv); + } +} + +void +evhttp_connection_set_timeout_tv(struct evhttp_connection *evcon, + const struct timeval* tv) +{ + if (tv) { + evcon->timeout = *tv; + bufferevent_set_timeouts(evcon->bufev, &evcon->timeout, &evcon->timeout); + } else { + const struct timeval read_tv = { HTTP_READ_TIMEOUT, 0 }; + const struct timeval write_tv = { HTTP_WRITE_TIMEOUT, 0 }; + evutil_timerclear(&evcon->timeout); + bufferevent_set_timeouts(evcon->bufev, &read_tv, &write_tv); + } +} + +void +evhttp_connection_set_initial_retry_tv(struct evhttp_connection *evcon, + const struct timeval *tv) +{ + if (tv) { + evcon->initial_retry_timeout = *tv; + } else { + evutil_timerclear(&evcon->initial_retry_timeout); + evcon->initial_retry_timeout.tv_sec = 2; + } +} + +void +evhttp_connection_set_retries(struct evhttp_connection *evcon, + int retry_max) +{ + evcon->retry_max = retry_max; +} + +void +evhttp_connection_set_closecb(struct evhttp_connection *evcon, + void (*cb)(struct evhttp_connection *, void *), void *cbarg) +{ + evcon->closecb = cb; + evcon->closecb_arg = cbarg; +} + +void +evhttp_connection_get_peer(struct evhttp_connection *evcon, + char **address, ev_uint16_t *port) +{ + *address = evcon->address; + *port = evcon->port; +} + +const struct sockaddr* +evhttp_connection_get_addr(struct evhttp_connection *evcon) +{ + return bufferevent_socket_get_conn_address_(evcon->bufev); +} + +int +evhttp_connection_connect_(struct evhttp_connection *evcon) +{ + int old_state = evcon->state; + const char *address = evcon->address; + const struct sockaddr *sa = evhttp_connection_get_addr(evcon); + int ret; + + if (evcon->state == EVCON_CONNECTING) + return (0); + + evhttp_connection_reset_(evcon); + + EVUTIL_ASSERT(!(evcon->flags & EVHTTP_CON_INCOMING)); + evcon->flags |= EVHTTP_CON_OUTGOING; + + if (evcon->bind_address || evcon->bind_port) { + evcon->fd = bind_socket( + evcon->bind_address, evcon->bind_port, 0 /*reuse*/); + if (evcon->fd == -1) { + event_debug(("%s: failed to bind to \"%s\"", + __func__, evcon->bind_address)); + return (-1); + } + + if (bufferevent_setfd(evcon->bufev, evcon->fd)) + return (-1); + } else { + if (bufferevent_setfd(evcon->bufev, -1)) + return (-1); + } + + /* Set up a callback for successful connection setup */ + bufferevent_setcb(evcon->bufev, + NULL /* evhttp_read_cb */, + NULL /* evhttp_write_cb */, + evhttp_connection_cb, + evcon); + if (!evutil_timerisset(&evcon->timeout)) { + const struct timeval conn_tv = { HTTP_CONNECT_TIMEOUT, 0 }; + bufferevent_set_timeouts(evcon->bufev, &conn_tv, &conn_tv); + } else { + bufferevent_set_timeouts(evcon->bufev, &evcon->timeout, &evcon->timeout); + } + /* make sure that we get a write callback */ + if (bufferevent_enable(evcon->bufev, EV_WRITE)) + return (-1); + + evcon->state = EVCON_CONNECTING; + + if (evcon->flags & EVHTTP_CON_REUSE_CONNECTED_ADDR && + sa && + (sa->sa_family == AF_INET || sa->sa_family == AF_INET6)) { + int socklen = sizeof(struct sockaddr_in); + if (sa->sa_family == AF_INET6) { + socklen = sizeof(struct sockaddr_in6); + } + ret = bufferevent_socket_connect(evcon->bufev, sa, socklen); + } else { + ret = bufferevent_socket_connect_hostname(evcon->bufev, + evcon->dns_base, evcon->ai_family, address, evcon->port); + } + + if (ret < 0) { + evcon->state = old_state; + event_sock_warn(evcon->fd, "%s: connection to \"%s\" failed", + __func__, evcon->address); + /* some operating systems return ECONNREFUSED immediately + * when connecting to a local address. the cleanup is going + * to reschedule this function call. + */ + evhttp_connection_cb_cleanup(evcon); + return (0); + } + + return (0); +} + +/* + * Starts an HTTP request on the provided evhttp_connection object. + * If the connection object is not connected to the web server already, + * this will start the connection. + */ + +int +evhttp_make_request(struct evhttp_connection *evcon, + struct evhttp_request *req, + enum evhttp_cmd_type type, const char *uri) +{ + /* We are making a request */ + req->kind = EVHTTP_REQUEST; + req->type = type; + if (req->uri != NULL) + mm_free(req->uri); + if ((req->uri = mm_strdup(uri)) == NULL) { + event_warn("%s: strdup", __func__); + evhttp_request_free_auto(req); + return (-1); + } + + /* Set the protocol version if it is not supplied */ + if (!req->major && !req->minor) { + req->major = 1; + req->minor = 1; + } + + EVUTIL_ASSERT(req->evcon == NULL); + req->evcon = evcon; + EVUTIL_ASSERT(!(req->flags & EVHTTP_REQ_OWN_CONNECTION)); + + TAILQ_INSERT_TAIL(&evcon->requests, req, next); + + /* We do not want to conflict with retry_ev */ + if (evcon->retry_cnt) + return (0); + + /* If the connection object is not connected; make it so */ + if (!evhttp_connected(evcon)) { + int res = evhttp_connection_connect_(evcon); + /* evhttp_connection_fail_(), which is called through + * evhttp_connection_connect_(), assumes that req lies in + * evcon->requests. Thus, enqueue the request in advance and + * remove it in the error case. */ + if (res != 0) + TAILQ_REMOVE(&evcon->requests, req, next); + + return (res); + } + + /* + * If it's connected already and we are the first in the queue, + * then we can dispatch this request immediately. Otherwise, it + * will be dispatched once the pending requests are completed. + */ + if (TAILQ_FIRST(&evcon->requests) == req) + evhttp_request_dispatch(evcon); + + return (0); +} + +void +evhttp_cancel_request(struct evhttp_request *req) +{ + struct evhttp_connection *evcon = req->evcon; + if (evcon != NULL) { + /* We need to remove it from the connection */ + if (TAILQ_FIRST(&evcon->requests) == req) { + /* it's currently being worked on, so reset + * the connection. + */ + evhttp_connection_fail_(evcon, + EVREQ_HTTP_REQUEST_CANCEL); + + /* connection fail freed the request */ + return; + } else { + /* otherwise, we can just remove it from the + * queue + */ + TAILQ_REMOVE(&evcon->requests, req, next); + } + } + + evhttp_request_free_auto(req); +} + +/* + * Reads data from file descriptor into request structure + * Request structure needs to be set up correctly. + */ + +void +evhttp_start_read_(struct evhttp_connection *evcon) +{ + bufferevent_disable(evcon->bufev, EV_WRITE); + bufferevent_enable(evcon->bufev, EV_READ); + + evcon->state = EVCON_READING_FIRSTLINE; + /* Reset the bufferevent callbacks */ + bufferevent_setcb(evcon->bufev, + evhttp_read_cb, + evhttp_write_cb, + evhttp_error_cb, + evcon); + + /* If there's still data pending, process it next time through the + * loop. Don't do it now; that could get recusive. */ + if (evbuffer_get_length(bufferevent_get_input(evcon->bufev))) { + event_deferred_cb_schedule_(get_deferred_queue(evcon), + &evcon->read_more_deferred_cb); + } +} + +void +evhttp_start_write_(struct evhttp_connection *evcon) +{ + bufferevent_disable(evcon->bufev, EV_WRITE); + bufferevent_enable(evcon->bufev, EV_READ); + + evcon->state = EVCON_WRITING; + evhttp_write_buffer(evcon, evhttp_write_connectioncb, NULL); +} + +static void +evhttp_send_done(struct evhttp_connection *evcon, void *arg) +{ + int need_close; + struct evhttp_request *req = TAILQ_FIRST(&evcon->requests); + TAILQ_REMOVE(&evcon->requests, req, next); + + if (req->on_complete_cb != NULL) { + req->on_complete_cb(req, req->on_complete_cb_arg); + } + + need_close = + (REQ_VERSION_BEFORE(req, 1, 1) && + !evhttp_is_connection_keepalive(req->input_headers)) || + evhttp_is_request_connection_close(req); + + EVUTIL_ASSERT(req->flags & EVHTTP_REQ_OWN_CONNECTION); + evhttp_request_free(req); + + if (need_close) { + evhttp_connection_free(evcon); + return; + } + + /* we have a persistent connection; try to accept another request. */ + if (evhttp_associate_new_request_with_connection(evcon) == -1) { + evhttp_connection_free(evcon); + } +} + +/* + * Returns an error page. + */ + +void +evhttp_send_error(struct evhttp_request *req, int error, const char *reason) +{ + +#define ERR_FORMAT "\n" \ + "%d %s\n" \ + "\n" \ + "

%s

\n" \ + "\n" + + struct evbuffer *buf = evbuffer_new(); + if (buf == NULL) { + /* if we cannot allocate memory; we just drop the connection */ + evhttp_connection_free(req->evcon); + return; + } + if (reason == NULL) { + reason = evhttp_response_phrase_internal(error); + } + + evhttp_response_code_(req, error, reason); + + evbuffer_add_printf(buf, ERR_FORMAT, error, reason, reason); + + evhttp_send_page_(req, buf); + + evbuffer_free(buf); +#undef ERR_FORMAT +} + +/* Requires that headers and response code are already set up */ + +static inline void +evhttp_send(struct evhttp_request *req, struct evbuffer *databuf) +{ + struct evhttp_connection *evcon = req->evcon; + + if (evcon == NULL) { + evhttp_request_free(req); + return; + } + + EVUTIL_ASSERT(TAILQ_FIRST(&evcon->requests) == req); + + /* we expect no more calls form the user on this request */ + req->userdone = 1; + + /* xxx: not sure if we really should expose the data buffer this way */ + if (databuf != NULL) + evbuffer_add_buffer(req->output_buffer, databuf); + + /* Adds headers to the response */ + evhttp_make_header(evcon, req); + + evhttp_write_buffer(evcon, evhttp_send_done, NULL); +} + +void +evhttp_send_reply(struct evhttp_request *req, int code, const char *reason, + struct evbuffer *databuf) +{ + evhttp_response_code_(req, code, reason); + + evhttp_send(req, databuf); +} + +void +evhttp_send_reply_start(struct evhttp_request *req, int code, + const char *reason) +{ + evhttp_response_code_(req, code, reason); + + if (req->evcon == NULL) + return; + + if (evhttp_find_header(req->output_headers, "Content-Length") == NULL && + REQ_VERSION_ATLEAST(req, 1, 1) && + evhttp_response_needs_body(req)) { + /* + * prefer HTTP/1.1 chunked encoding to closing the connection; + * note RFC 2616 section 4.4 forbids it with Content-Length: + * and it's not necessary then anyway. + */ + evhttp_add_header(req->output_headers, "Transfer-Encoding", + "chunked"); + req->chunked = 1; + } else { + req->chunked = 0; + } + evhttp_make_header(req->evcon, req); + evhttp_write_buffer(req->evcon, NULL, NULL); +} + +void +evhttp_send_reply_chunk_with_cb(struct evhttp_request *req, struct evbuffer *databuf, + void (*cb)(struct evhttp_connection *, void *), void *arg) +{ + struct evhttp_connection *evcon = req->evcon; + struct evbuffer *output; + + if (evcon == NULL) + return; + + output = bufferevent_get_output(evcon->bufev); + + if (evbuffer_get_length(databuf) == 0) + return; + if (!evhttp_response_needs_body(req)) + return; + if (req->chunked) { + evbuffer_add_printf(output, "%x\r\n", + (unsigned)evbuffer_get_length(databuf)); + } + evbuffer_add_buffer(output, databuf); + if (req->chunked) { + evbuffer_add(output, "\r\n", 2); + } + evhttp_write_buffer(evcon, cb, arg); +} + +void +evhttp_send_reply_chunk(struct evhttp_request *req, struct evbuffer *databuf) +{ + evhttp_send_reply_chunk_with_cb(req, databuf, NULL, NULL); +} +void +evhttp_send_reply_end(struct evhttp_request *req) +{ + struct evhttp_connection *evcon = req->evcon; + struct evbuffer *output; + + if (evcon == NULL) { + evhttp_request_free(req); + return; + } + + output = bufferevent_get_output(evcon->bufev); + + /* we expect no more calls form the user on this request */ + req->userdone = 1; + + if (req->chunked) { + evbuffer_add(output, "0\r\n\r\n", 5); + evhttp_write_buffer(req->evcon, evhttp_send_done, NULL); + req->chunked = 0; + } else if (evbuffer_get_length(output) == 0) { + /* let the connection know that we are done with the request */ + evhttp_send_done(evcon, NULL); + } else { + /* make the callback execute after all data has been written */ + evcon->cb = evhttp_send_done; + evcon->cb_arg = NULL; + } +} + +static const char *informational_phrases[] = { + /* 100 */ "Continue", + /* 101 */ "Switching Protocols" +}; + +static const char *success_phrases[] = { + /* 200 */ "OK", + /* 201 */ "Created", + /* 202 */ "Accepted", + /* 203 */ "Non-Authoritative Information", + /* 204 */ "No Content", + /* 205 */ "Reset Content", + /* 206 */ "Partial Content" +}; + +static const char *redirection_phrases[] = { + /* 300 */ "Multiple Choices", + /* 301 */ "Moved Permanently", + /* 302 */ "Found", + /* 303 */ "See Other", + /* 304 */ "Not Modified", + /* 305 */ "Use Proxy", + /* 307 */ "Temporary Redirect" +}; + +static const char *client_error_phrases[] = { + /* 400 */ "Bad Request", + /* 401 */ "Unauthorized", + /* 402 */ "Payment Required", + /* 403 */ "Forbidden", + /* 404 */ "Not Found", + /* 405 */ "Method Not Allowed", + /* 406 */ "Not Acceptable", + /* 407 */ "Proxy Authentication Required", + /* 408 */ "Request Time-out", + /* 409 */ "Conflict", + /* 410 */ "Gone", + /* 411 */ "Length Required", + /* 412 */ "Precondition Failed", + /* 413 */ "Request Entity Too Large", + /* 414 */ "Request-URI Too Large", + /* 415 */ "Unsupported Media Type", + /* 416 */ "Requested range not satisfiable", + /* 417 */ "Expectation Failed" +}; + +static const char *server_error_phrases[] = { + /* 500 */ "Internal Server Error", + /* 501 */ "Not Implemented", + /* 502 */ "Bad Gateway", + /* 503 */ "Service Unavailable", + /* 504 */ "Gateway Time-out", + /* 505 */ "HTTP Version not supported" +}; + +struct response_class { + const char *name; + size_t num_responses; + const char **responses; +}; + +#ifndef MEMBERSOF +#define MEMBERSOF(x) (sizeof(x)/sizeof(x[0])) +#endif + +static const struct response_class response_classes[] = { + /* 1xx */ { "Informational", MEMBERSOF(informational_phrases), informational_phrases }, + /* 2xx */ { "Success", MEMBERSOF(success_phrases), success_phrases }, + /* 3xx */ { "Redirection", MEMBERSOF(redirection_phrases), redirection_phrases }, + /* 4xx */ { "Client Error", MEMBERSOF(client_error_phrases), client_error_phrases }, + /* 5xx */ { "Server Error", MEMBERSOF(server_error_phrases), server_error_phrases } +}; + +static const char * +evhttp_response_phrase_internal(int code) +{ + int klass = code / 100 - 1; + int subcode = code % 100; + + /* Unknown class - can't do any better here */ + if (klass < 0 || klass >= (int) MEMBERSOF(response_classes)) + return "Unknown Status Class"; + + /* Unknown sub-code, return class name at least */ + if (subcode >= (int) response_classes[klass].num_responses) + return response_classes[klass].name; + + return response_classes[klass].responses[subcode]; +} + +void +evhttp_response_code_(struct evhttp_request *req, int code, const char *reason) +{ + req->kind = EVHTTP_RESPONSE; + req->response_code = code; + if (req->response_code_line != NULL) + mm_free(req->response_code_line); + if (reason == NULL) + reason = evhttp_response_phrase_internal(code); + req->response_code_line = mm_strdup(reason); + if (req->response_code_line == NULL) { + event_warn("%s: strdup", __func__); + /* XXX what else can we do? */ + } +} + +void +evhttp_send_page_(struct evhttp_request *req, struct evbuffer *databuf) +{ + if (!req->major || !req->minor) { + req->major = 1; + req->minor = 1; + } + + if (req->kind != EVHTTP_RESPONSE) + evhttp_response_code_(req, 200, "OK"); + + evhttp_clear_headers(req->output_headers); + evhttp_add_header(req->output_headers, "Content-Type", "text/html"); + evhttp_add_header(req->output_headers, "Connection", "close"); + + evhttp_send(req, databuf); +} + +static const char uri_chars[256] = { + /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, + /* 64 */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, + /* 128 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* 192 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +#define CHAR_IS_UNRESERVED(c) \ + (uri_chars[(unsigned char)(c)]) + +/* + * Helper functions to encode/decode a string for inclusion in a URI. + * The returned string must be freed by the caller. + */ +char * +evhttp_uriencode(const char *uri, ev_ssize_t len, int space_as_plus) +{ + struct evbuffer *buf = evbuffer_new(); + const char *p, *end; + char *result = NULL; + + if (!buf) { + goto out; + } + + if (len >= 0) { + if (uri + len < uri) { + goto out; + } + + end = uri + len; + } else { + size_t slen = strlen(uri); + + if (slen >= EV_SSIZE_MAX) { + /* we don't want to mix signed and unsigned */ + goto out; + } + + if (uri + slen < uri) { + goto out; + } + + end = uri + slen; + } + + for (p = uri; p < end; p++) { + if (CHAR_IS_UNRESERVED(*p)) { + evbuffer_add(buf, p, 1); + } else if (*p == ' ' && space_as_plus) { + evbuffer_add(buf, "+", 1); + } else { + evbuffer_add_printf(buf, "%%%02X", (unsigned char)(*p)); + } + } + + evbuffer_add(buf, "", 1); /* NUL-terminator. */ + result = mm_malloc(evbuffer_get_length(buf)); + + if (result) + evbuffer_remove(buf, result, evbuffer_get_length(buf)); + +out: + if (buf) + evbuffer_free(buf); + return result; +} + +char * +evhttp_encode_uri(const char *str) +{ + return evhttp_uriencode(str, -1, 0); +} + +/* + * @param decode_plus_ctl: if 1, we decode plus into space. If 0, we don't. + * If -1, when true we transform plus to space only after we've seen + * a ?. -1 is deprecated. + * @return the number of bytes written to 'ret'. + */ +int +evhttp_decode_uri_internal( + const char *uri, size_t length, char *ret, int decode_plus_ctl) +{ + char c; + int j; + int decode_plus = (decode_plus_ctl == 1) ? 1: 0; + unsigned i; + + for (i = j = 0; i < length; i++) { + c = uri[i]; + if (c == '?') { + if (decode_plus_ctl < 0) + decode_plus = 1; + } else if (c == '+' && decode_plus) { + c = ' '; + } else if ((i + 2) < length && c == '%' && + EVUTIL_ISXDIGIT_(uri[i+1]) && EVUTIL_ISXDIGIT_(uri[i+2])) { + char tmp[3]; + tmp[0] = uri[i+1]; + tmp[1] = uri[i+2]; + tmp[2] = '\0'; + c = (char)strtol(tmp, NULL, 16); + i += 2; + } + ret[j++] = c; + } + ret[j] = '\0'; + + return (j); +} + +/* deprecated */ +char * +evhttp_decode_uri(const char *uri) +{ + char *ret; + + if ((ret = mm_malloc(strlen(uri) + 1)) == NULL) { + event_warn("%s: malloc(%lu)", __func__, + (unsigned long)(strlen(uri) + 1)); + return (NULL); + } + + evhttp_decode_uri_internal(uri, strlen(uri), + ret, -1 /*always_decode_plus*/); + + return (ret); +} + +char * +evhttp_uridecode(const char *uri, int decode_plus, size_t *size_out) +{ + char *ret; + int n; + + if ((ret = mm_malloc(strlen(uri) + 1)) == NULL) { + event_warn("%s: malloc(%lu)", __func__, + (unsigned long)(strlen(uri) + 1)); + return (NULL); + } + + n = evhttp_decode_uri_internal(uri, strlen(uri), + ret, !!decode_plus/*always_decode_plus*/); + + if (size_out) { + EVUTIL_ASSERT(n >= 0); + *size_out = (size_t)n; + } + + return (ret); +} + +/* + * Helper function to parse out arguments in a query. + * The arguments are separated by key and value. + */ + +static int +evhttp_parse_query_impl(const char *str, struct evkeyvalq *headers, + int is_whole_uri) +{ + char *line=NULL; + char *argument; + char *p; + const char *query_part; + int result = -1; + struct evhttp_uri *uri=NULL; + + TAILQ_INIT(headers); + + if (is_whole_uri) { + uri = evhttp_uri_parse(str); + if (!uri) + goto error; + query_part = evhttp_uri_get_query(uri); + } else { + query_part = str; + } + + /* No arguments - we are done */ + if (!query_part || !strlen(query_part)) { + result = 0; + goto done; + } + + if ((line = mm_strdup(query_part)) == NULL) { + event_warn("%s: strdup", __func__); + goto error; + } + + p = argument = line; + while (p != NULL && *p != '\0') { + char *key, *value, *decoded_value; + argument = strsep(&p, "&"); + + value = argument; + key = strsep(&value, "="); + if (value == NULL || *key == '\0') { + goto error; + } + + if ((decoded_value = mm_malloc(strlen(value) + 1)) == NULL) { + event_warn("%s: mm_malloc", __func__); + goto error; + } + evhttp_decode_uri_internal(value, strlen(value), + decoded_value, 1 /*always_decode_plus*/); + event_debug(("Query Param: %s -> %s\n", key, decoded_value)); + evhttp_add_header_internal(headers, key, decoded_value); + mm_free(decoded_value); + } + + result = 0; + goto done; +error: + evhttp_clear_headers(headers); +done: + if (line) + mm_free(line); + if (uri) + evhttp_uri_free(uri); + return result; +} + +int +evhttp_parse_query(const char *uri, struct evkeyvalq *headers) +{ + return evhttp_parse_query_impl(uri, headers, 1); +} +int +evhttp_parse_query_str(const char *uri, struct evkeyvalq *headers) +{ + return evhttp_parse_query_impl(uri, headers, 0); +} + +static struct evhttp_cb * +evhttp_dispatch_callback(struct httpcbq *callbacks, struct evhttp_request *req) +{ + struct evhttp_cb *cb; + size_t offset = 0; + char *translated; + const char *path; + + /* Test for different URLs */ + path = evhttp_uri_get_path(req->uri_elems); + offset = strlen(path); + if ((translated = mm_malloc(offset + 1)) == NULL) + return (NULL); + evhttp_decode_uri_internal(path, offset, translated, + 0 /* decode_plus */); + + TAILQ_FOREACH(cb, callbacks, next) { + if (!strcmp(cb->what, translated)) { + mm_free(translated); + return (cb); + } + } + + mm_free(translated); + return (NULL); +} + + +static int +prefix_suffix_match(const char *pattern, const char *name, int ignorecase) +{ + char c; + + while (1) { + switch (c = *pattern++) { + case '\0': + return *name == '\0'; + + case '*': + while (*name != '\0') { + if (prefix_suffix_match(pattern, name, + ignorecase)) + return (1); + ++name; + } + return (0); + default: + if (c != *name) { + if (!ignorecase || + EVUTIL_TOLOWER_(c) != EVUTIL_TOLOWER_(*name)) + return (0); + } + ++name; + } + } + /* NOTREACHED */ +} + +/* + Search the vhost hierarchy beginning with http for a server alias + matching hostname. If a match is found, and outhttp is non-null, + outhttp is set to the matching http object and 1 is returned. +*/ + +static int +evhttp_find_alias(struct evhttp *http, struct evhttp **outhttp, + const char *hostname) +{ + struct evhttp_server_alias *alias; + struct evhttp *vhost; + + TAILQ_FOREACH(alias, &http->aliases, next) { + /* XXX Do we need to handle IP addresses? */ + if (!evutil_ascii_strcasecmp(alias->alias, hostname)) { + if (outhttp) + *outhttp = http; + return 1; + } + } + + /* XXX It might be good to avoid recursion here, but I don't + see a way to do that w/o a list. */ + TAILQ_FOREACH(vhost, &http->virtualhosts, next_vhost) { + if (evhttp_find_alias(vhost, outhttp, hostname)) + return 1; + } + + return 0; +} + +/* + Attempts to find the best http object to handle a request for a hostname. + All aliases for the root http object and vhosts are searched for an exact + match. Then, the vhost hierarchy is traversed again for a matching + pattern. + + If an alias or vhost is matched, 1 is returned, and outhttp, if non-null, + is set with the best matching http object. If there are no matches, the + root http object is stored in outhttp and 0 is returned. +*/ + +static int +evhttp_find_vhost(struct evhttp *http, struct evhttp **outhttp, + const char *hostname) +{ + struct evhttp *vhost; + struct evhttp *oldhttp; + int match_found = 0; + + if (evhttp_find_alias(http, outhttp, hostname)) + return 1; + + do { + oldhttp = http; + TAILQ_FOREACH(vhost, &http->virtualhosts, next_vhost) { + if (prefix_suffix_match(vhost->vhost_pattern, + hostname, 1 /* ignorecase */)) { + http = vhost; + match_found = 1; + break; + } + } + } while (oldhttp != http); + + if (outhttp) + *outhttp = http; + + return match_found; +} + +static void +evhttp_handle_request(struct evhttp_request *req, void *arg) +{ + struct evhttp *http = arg; + struct evhttp_cb *cb = NULL; + const char *hostname; + + /* we have a new request on which the user needs to take action */ + req->userdone = 0; + + bufferevent_disable(req->evcon->bufev, EV_READ); + + if (req->type == 0 || req->uri == NULL) { + evhttp_send_error(req, req->response_code, NULL); + return; + } + + if ((http->allowed_methods & req->type) == 0) { + event_debug(("Rejecting disallowed method %x (allowed: %x)\n", + (unsigned)req->type, (unsigned)http->allowed_methods)); + evhttp_send_error(req, HTTP_NOTIMPLEMENTED, NULL); + return; + } + + /* handle potential virtual hosts */ + hostname = evhttp_request_get_host(req); + if (hostname != NULL) { + evhttp_find_vhost(http, &http, hostname); + } + + if ((cb = evhttp_dispatch_callback(&http->callbacks, req)) != NULL) { + (*cb->cb)(req, cb->cbarg); + return; + } + + /* Generic call back */ + if (http->gencb) { + (*http->gencb)(req, http->gencbarg); + return; + } else { + /* We need to send a 404 here */ +#define ERR_FORMAT "" \ + "404 Not Found" \ + "" \ + "

Not Found

" \ + "

The requested URL %s was not found on this server.

"\ + "\n" + + char *escaped_html; + struct evbuffer *buf; + + if ((escaped_html = evhttp_htmlescape(req->uri)) == NULL) { + evhttp_connection_free(req->evcon); + return; + } + + if ((buf = evbuffer_new()) == NULL) { + mm_free(escaped_html); + evhttp_connection_free(req->evcon); + return; + } + + evhttp_response_code_(req, HTTP_NOTFOUND, "Not Found"); + + evbuffer_add_printf(buf, ERR_FORMAT, escaped_html); + + mm_free(escaped_html); + + evhttp_send_page_(req, buf); + + evbuffer_free(buf); +#undef ERR_FORMAT + } +} + +/* Listener callback when a connection arrives at a server. */ +static void +accept_socket_cb(struct evconnlistener *listener, evutil_socket_t nfd, struct sockaddr *peer_sa, int peer_socklen, void *arg) +{ + struct evhttp *http = arg; + + evhttp_get_request(http, nfd, peer_sa, peer_socklen); +} + +int +evhttp_bind_socket(struct evhttp *http, const char *address, ev_uint16_t port) +{ + struct evhttp_bound_socket *bound = + evhttp_bind_socket_with_handle(http, address, port); + if (bound == NULL) + return (-1); + return (0); +} + +struct evhttp_bound_socket * +evhttp_bind_socket_with_handle(struct evhttp *http, const char *address, ev_uint16_t port) +{ + evutil_socket_t fd; + struct evhttp_bound_socket *bound; + int serrno; + + if ((fd = bind_socket(address, port, 1 /*reuse*/)) == -1) + return (NULL); + + if (listen(fd, 128) == -1) { + serrno = EVUTIL_SOCKET_ERROR(); + event_sock_warn(fd, "%s: listen", __func__); + evutil_closesocket(fd); + EVUTIL_SET_SOCKET_ERROR(serrno); + return (NULL); + } + + bound = evhttp_accept_socket_with_handle(http, fd); + + if (bound != NULL) { + event_debug(("Bound to port %d - Awaiting connections ... ", + port)); + return (bound); + } + + return (NULL); +} + +int +evhttp_accept_socket(struct evhttp *http, evutil_socket_t fd) +{ + struct evhttp_bound_socket *bound = + evhttp_accept_socket_with_handle(http, fd); + if (bound == NULL) + return (-1); + return (0); +} + +void +evhttp_foreach_bound_socket(struct evhttp *http, + evhttp_bound_socket_foreach_fn *function, + void *argument) +{ + struct evhttp_bound_socket *bound; + + TAILQ_FOREACH(bound, &http->sockets, next) + function(bound, argument); +} + +struct evhttp_bound_socket * +evhttp_accept_socket_with_handle(struct evhttp *http, evutil_socket_t fd) +{ + struct evhttp_bound_socket *bound; + struct evconnlistener *listener; + const int flags = + LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_EXEC|LEV_OPT_CLOSE_ON_FREE; + + listener = evconnlistener_new(http->base, NULL, NULL, + flags, + 0, /* Backlog is '0' because we already said 'listen' */ + fd); + if (!listener) + return (NULL); + + bound = evhttp_bind_listener(http, listener); + if (!bound) { + evconnlistener_free(listener); + return (NULL); + } + return (bound); +} + +struct evhttp_bound_socket * +evhttp_bind_listener(struct evhttp *http, struct evconnlistener *listener) +{ + struct evhttp_bound_socket *bound; + + bound = mm_malloc(sizeof(struct evhttp_bound_socket)); + if (bound == NULL) + return (NULL); + + bound->listener = listener; + TAILQ_INSERT_TAIL(&http->sockets, bound, next); + + evconnlistener_set_cb(listener, accept_socket_cb, http); + return bound; +} + +evutil_socket_t +evhttp_bound_socket_get_fd(struct evhttp_bound_socket *bound) +{ + return evconnlistener_get_fd(bound->listener); +} + +struct evconnlistener * +evhttp_bound_socket_get_listener(struct evhttp_bound_socket *bound) +{ + return bound->listener; +} + +void +evhttp_del_accept_socket(struct evhttp *http, struct evhttp_bound_socket *bound) +{ + TAILQ_REMOVE(&http->sockets, bound, next); + evconnlistener_free(bound->listener); + mm_free(bound); +} + +static struct evhttp* +evhttp_new_object(void) +{ + struct evhttp *http = NULL; + + if ((http = mm_calloc(1, sizeof(struct evhttp))) == NULL) { + event_warn("%s: calloc", __func__); + return (NULL); + } + + evutil_timerclear(&http->timeout); + evhttp_set_max_headers_size(http, EV_SIZE_MAX); + evhttp_set_max_body_size(http, EV_SIZE_MAX); + evhttp_set_default_content_type(http, "text/html; charset=ISO-8859-1"); + evhttp_set_allowed_methods(http, + EVHTTP_REQ_GET | + EVHTTP_REQ_POST | + EVHTTP_REQ_HEAD | + EVHTTP_REQ_PUT | + EVHTTP_REQ_DELETE); + + TAILQ_INIT(&http->sockets); + TAILQ_INIT(&http->callbacks); + TAILQ_INIT(&http->connections); + TAILQ_INIT(&http->virtualhosts); + TAILQ_INIT(&http->aliases); + + return (http); +} + +struct evhttp * +evhttp_new(struct event_base *base) +{ + struct evhttp *http = NULL; + + http = evhttp_new_object(); + if (http == NULL) + return (NULL); + http->base = base; + + return (http); +} + +/* + * Start a web server on the specified address and port. + */ + +struct evhttp * +evhttp_start(const char *address, ev_uint16_t port) +{ + struct evhttp *http = NULL; + + http = evhttp_new_object(); + if (http == NULL) + return (NULL); + if (evhttp_bind_socket(http, address, port) == -1) { + mm_free(http); + return (NULL); + } + + return (http); +} + +void +evhttp_free(struct evhttp* http) +{ + struct evhttp_cb *http_cb; + struct evhttp_connection *evcon; + struct evhttp_bound_socket *bound; + struct evhttp* vhost; + struct evhttp_server_alias *alias; + + /* Remove the accepting part */ + while ((bound = TAILQ_FIRST(&http->sockets)) != NULL) { + TAILQ_REMOVE(&http->sockets, bound, next); + + evconnlistener_free(bound->listener); + + mm_free(bound); + } + + while ((evcon = TAILQ_FIRST(&http->connections)) != NULL) { + /* evhttp_connection_free removes the connection */ + evhttp_connection_free(evcon); + } + + while ((http_cb = TAILQ_FIRST(&http->callbacks)) != NULL) { + TAILQ_REMOVE(&http->callbacks, http_cb, next); + mm_free(http_cb->what); + mm_free(http_cb); + } + + while ((vhost = TAILQ_FIRST(&http->virtualhosts)) != NULL) { + TAILQ_REMOVE(&http->virtualhosts, vhost, next_vhost); + + evhttp_free(vhost); + } + + if (http->vhost_pattern != NULL) + mm_free(http->vhost_pattern); + + while ((alias = TAILQ_FIRST(&http->aliases)) != NULL) { + TAILQ_REMOVE(&http->aliases, alias, next); + mm_free(alias->alias); + mm_free(alias); + } + + mm_free(http); +} + +int +evhttp_add_virtual_host(struct evhttp* http, const char *pattern, + struct evhttp* vhost) +{ + /* a vhost can only be a vhost once and should not have bound sockets */ + if (vhost->vhost_pattern != NULL || + TAILQ_FIRST(&vhost->sockets) != NULL) + return (-1); + + vhost->vhost_pattern = mm_strdup(pattern); + if (vhost->vhost_pattern == NULL) + return (-1); + + TAILQ_INSERT_TAIL(&http->virtualhosts, vhost, next_vhost); + + return (0); +} + +int +evhttp_remove_virtual_host(struct evhttp* http, struct evhttp* vhost) +{ + if (vhost->vhost_pattern == NULL) + return (-1); + + TAILQ_REMOVE(&http->virtualhosts, vhost, next_vhost); + + mm_free(vhost->vhost_pattern); + vhost->vhost_pattern = NULL; + + return (0); +} + +int +evhttp_add_server_alias(struct evhttp *http, const char *alias) +{ + struct evhttp_server_alias *evalias; + + evalias = mm_calloc(1, sizeof(*evalias)); + if (!evalias) + return -1; + + evalias->alias = mm_strdup(alias); + if (!evalias->alias) { + mm_free(evalias); + return -1; + } + + TAILQ_INSERT_TAIL(&http->aliases, evalias, next); + + return 0; +} + +int +evhttp_remove_server_alias(struct evhttp *http, const char *alias) +{ + struct evhttp_server_alias *evalias; + + TAILQ_FOREACH(evalias, &http->aliases, next) { + if (evutil_ascii_strcasecmp(evalias->alias, alias) == 0) { + TAILQ_REMOVE(&http->aliases, evalias, next); + mm_free(evalias->alias); + mm_free(evalias); + return 0; + } + } + + return -1; +} + +void +evhttp_set_timeout(struct evhttp* http, int timeout_in_secs) +{ + if (timeout_in_secs == -1) { + evhttp_set_timeout_tv(http, NULL); + } else { + struct timeval tv; + tv.tv_sec = timeout_in_secs; + tv.tv_usec = 0; + evhttp_set_timeout_tv(http, &tv); + } +} + +void +evhttp_set_timeout_tv(struct evhttp* http, const struct timeval* tv) +{ + if (tv) { + http->timeout = *tv; + } else { + evutil_timerclear(&http->timeout); + } +} + +int evhttp_set_flags(struct evhttp *http, int flags) +{ + int avail_flags = 0; + avail_flags |= EVHTTP_SERVER_LINGERING_CLOSE; + + if (flags & ~avail_flags) + return 1; + http->flags &= ~avail_flags; + + http->flags |= flags; + + return 0; +} + +void +evhttp_set_max_headers_size(struct evhttp* http, ev_ssize_t max_headers_size) +{ + if (max_headers_size < 0) + http->default_max_headers_size = EV_SIZE_MAX; + else + http->default_max_headers_size = max_headers_size; +} + +void +evhttp_set_max_body_size(struct evhttp* http, ev_ssize_t max_body_size) +{ + if (max_body_size < 0) + http->default_max_body_size = EV_UINT64_MAX; + else + http->default_max_body_size = max_body_size; +} + +void +evhttp_set_default_content_type(struct evhttp *http, + const char *content_type) { + http->default_content_type = content_type; +} + +void +evhttp_set_allowed_methods(struct evhttp* http, ev_uint16_t methods) +{ + http->allowed_methods = methods; +} + +int +evhttp_set_cb(struct evhttp *http, const char *uri, + void (*cb)(struct evhttp_request *, void *), void *cbarg) +{ + struct evhttp_cb *http_cb; + + TAILQ_FOREACH(http_cb, &http->callbacks, next) { + if (strcmp(http_cb->what, uri) == 0) + return (-1); + } + + if ((http_cb = mm_calloc(1, sizeof(struct evhttp_cb))) == NULL) { + event_warn("%s: calloc", __func__); + return (-2); + } + + http_cb->what = mm_strdup(uri); + if (http_cb->what == NULL) { + event_warn("%s: strdup", __func__); + mm_free(http_cb); + return (-3); + } + http_cb->cb = cb; + http_cb->cbarg = cbarg; + + TAILQ_INSERT_TAIL(&http->callbacks, http_cb, next); + + return (0); +} + +int +evhttp_del_cb(struct evhttp *http, const char *uri) +{ + struct evhttp_cb *http_cb; + + TAILQ_FOREACH(http_cb, &http->callbacks, next) { + if (strcmp(http_cb->what, uri) == 0) + break; + } + if (http_cb == NULL) + return (-1); + + TAILQ_REMOVE(&http->callbacks, http_cb, next); + mm_free(http_cb->what); + mm_free(http_cb); + + return (0); +} + +void +evhttp_set_gencb(struct evhttp *http, + void (*cb)(struct evhttp_request *, void *), void *cbarg) +{ + http->gencb = cb; + http->gencbarg = cbarg; +} + +void +evhttp_set_bevcb(struct evhttp *http, + struct bufferevent* (*cb)(struct event_base *, void *), void *cbarg) +{ + http->bevcb = cb; + http->bevcbarg = cbarg; +} + +/* + * Request related functions + */ + +struct evhttp_request * +evhttp_request_new(void (*cb)(struct evhttp_request *, void *), void *arg) +{ + struct evhttp_request *req = NULL; + + /* Allocate request structure */ + if ((req = mm_calloc(1, sizeof(struct evhttp_request))) == NULL) { + event_warn("%s: calloc", __func__); + goto error; + } + + req->headers_size = 0; + req->body_size = 0; + + req->kind = EVHTTP_RESPONSE; + req->input_headers = mm_calloc(1, sizeof(struct evkeyvalq)); + if (req->input_headers == NULL) { + event_warn("%s: calloc", __func__); + goto error; + } + TAILQ_INIT(req->input_headers); + + req->output_headers = mm_calloc(1, sizeof(struct evkeyvalq)); + if (req->output_headers == NULL) { + event_warn("%s: calloc", __func__); + goto error; + } + TAILQ_INIT(req->output_headers); + + if ((req->input_buffer = evbuffer_new()) == NULL) { + event_warn("%s: evbuffer_new", __func__); + goto error; + } + + if ((req->output_buffer = evbuffer_new()) == NULL) { + event_warn("%s: evbuffer_new", __func__); + goto error; + } + + req->cb = cb; + req->cb_arg = arg; + + return (req); + + error: + if (req != NULL) + evhttp_request_free(req); + return (NULL); +} + +void +evhttp_request_free(struct evhttp_request *req) +{ + if ((req->flags & EVHTTP_REQ_DEFER_FREE) != 0) { + req->flags |= EVHTTP_REQ_NEEDS_FREE; + return; + } + + if (req->remote_host != NULL) + mm_free(req->remote_host); + if (req->uri != NULL) + mm_free(req->uri); + if (req->uri_elems != NULL) + evhttp_uri_free(req->uri_elems); + if (req->response_code_line != NULL) + mm_free(req->response_code_line); + if (req->host_cache != NULL) + mm_free(req->host_cache); + + evhttp_clear_headers(req->input_headers); + mm_free(req->input_headers); + + evhttp_clear_headers(req->output_headers); + mm_free(req->output_headers); + + if (req->input_buffer != NULL) + evbuffer_free(req->input_buffer); + + if (req->output_buffer != NULL) + evbuffer_free(req->output_buffer); + + mm_free(req); +} + +void +evhttp_request_own(struct evhttp_request *req) +{ + req->flags |= EVHTTP_USER_OWNED; +} + +int +evhttp_request_is_owned(struct evhttp_request *req) +{ + return (req->flags & EVHTTP_USER_OWNED) != 0; +} + +struct evhttp_connection * +evhttp_request_get_connection(struct evhttp_request *req) +{ + return req->evcon; +} + +struct event_base * +evhttp_connection_get_base(struct evhttp_connection *conn) +{ + return conn->base; +} + +void +evhttp_request_set_chunked_cb(struct evhttp_request *req, + void (*cb)(struct evhttp_request *, void *)) +{ + req->chunk_cb = cb; +} + +void +evhttp_request_set_header_cb(struct evhttp_request *req, + int (*cb)(struct evhttp_request *, void *)) +{ + req->header_cb = cb; +} + +void +evhttp_request_set_error_cb(struct evhttp_request *req, + void (*cb)(enum evhttp_request_error, void *)) +{ + req->error_cb = cb; +} + +void +evhttp_request_set_on_complete_cb(struct evhttp_request *req, + void (*cb)(struct evhttp_request *, void *), void *cb_arg) +{ + req->on_complete_cb = cb; + req->on_complete_cb_arg = cb_arg; +} + +/* + * Allows for inspection of the request URI + */ + +const char * +evhttp_request_get_uri(const struct evhttp_request *req) { + if (req->uri == NULL) + event_debug(("%s: request %p has no uri\n", __func__, req)); + return (req->uri); +} + +const struct evhttp_uri * +evhttp_request_get_evhttp_uri(const struct evhttp_request *req) { + if (req->uri_elems == NULL) + event_debug(("%s: request %p has no uri elems\n", + __func__, req)); + return (req->uri_elems); +} + +const char * +evhttp_request_get_host(struct evhttp_request *req) +{ + const char *host = NULL; + + if (req->host_cache) + return req->host_cache; + + if (req->uri_elems) + host = evhttp_uri_get_host(req->uri_elems); + if (!host && req->input_headers) { + const char *p; + size_t len; + + host = evhttp_find_header(req->input_headers, "Host"); + /* The Host: header may include a port. Remove it here + to be consistent with uri_elems case above. */ + if (host) { + p = host + strlen(host) - 1; + while (p > host && EVUTIL_ISDIGIT_(*p)) + --p; + if (p > host && *p == ':') { + len = p - host; + req->host_cache = mm_malloc(len + 1); + if (!req->host_cache) { + event_warn("%s: malloc", __func__); + return NULL; + } + memcpy(req->host_cache, host, len); + req->host_cache[len] = '\0'; + host = req->host_cache; + } + } + } + + return host; +} + +enum evhttp_cmd_type +evhttp_request_get_command(const struct evhttp_request *req) { + return (req->type); +} + +int +evhttp_request_get_response_code(const struct evhttp_request *req) +{ + return req->response_code; +} + +const char * +evhttp_request_get_response_code_line(const struct evhttp_request *req) +{ + return req->response_code_line; +} + +/** Returns the input headers */ +struct evkeyvalq *evhttp_request_get_input_headers(struct evhttp_request *req) +{ + return (req->input_headers); +} + +/** Returns the output headers */ +struct evkeyvalq *evhttp_request_get_output_headers(struct evhttp_request *req) +{ + return (req->output_headers); +} + +/** Returns the input buffer */ +struct evbuffer *evhttp_request_get_input_buffer(struct evhttp_request *req) +{ + return (req->input_buffer); +} + +/** Returns the output buffer */ +struct evbuffer *evhttp_request_get_output_buffer(struct evhttp_request *req) +{ + return (req->output_buffer); +} + + +/* + * Takes a file descriptor to read a request from. + * The callback is executed once the whole request has been read. + */ + +static struct evhttp_connection* +evhttp_get_request_connection( + struct evhttp* http, + evutil_socket_t fd, struct sockaddr *sa, ev_socklen_t salen) +{ + struct evhttp_connection *evcon; + char *hostname = NULL, *portname = NULL; + struct bufferevent* bev = NULL; + +#ifdef EVENT__HAVE_STRUCT_SOCKADDR_UN + if (sa->sa_family == AF_UNIX) { + struct sockaddr_un *sa_un = (struct sockaddr_un *)sa; + sa_un->sun_path[0] = '\0'; + } +#endif + + name_from_addr(sa, salen, &hostname, &portname); + if (hostname == NULL || portname == NULL) { + if (hostname) mm_free(hostname); + if (portname) mm_free(portname); + return (NULL); + } + + event_debug(("%s: new request from %s:%s on "EV_SOCK_FMT"\n", + __func__, hostname, portname, EV_SOCK_ARG(fd))); + + /* we need a connection object to put the http request on */ + if (http->bevcb != NULL) { + bev = (*http->bevcb)(http->base, http->bevcbarg); + } + evcon = evhttp_connection_base_bufferevent_new( + http->base, NULL, bev, hostname, atoi(portname)); + mm_free(hostname); + mm_free(portname); + if (evcon == NULL) + return (NULL); + + evcon->max_headers_size = http->default_max_headers_size; + evcon->max_body_size = http->default_max_body_size; + if (http->flags & EVHTTP_SERVER_LINGERING_CLOSE) + evcon->flags |= EVHTTP_CON_LINGERING_CLOSE; + + evcon->flags |= EVHTTP_CON_INCOMING; + evcon->state = EVCON_READING_FIRSTLINE; + + evcon->fd = fd; + + if (bufferevent_setfd(evcon->bufev, fd)) + goto err; + if (bufferevent_enable(evcon->bufev, EV_READ)) + goto err; + if (bufferevent_disable(evcon->bufev, EV_WRITE)) + goto err; + bufferevent_socket_set_conn_address_(evcon->bufev, sa, salen); + + return (evcon); + +err: + evhttp_connection_free(evcon); + return (NULL); +} + +static int +evhttp_associate_new_request_with_connection(struct evhttp_connection *evcon) +{ + struct evhttp *http = evcon->http_server; + struct evhttp_request *req; + if ((req = evhttp_request_new(evhttp_handle_request, http)) == NULL) + return (-1); + + if ((req->remote_host = mm_strdup(evcon->address)) == NULL) { + event_warn("%s: strdup", __func__); + evhttp_request_free(req); + return (-1); + } + req->remote_port = evcon->port; + + req->evcon = evcon; /* the request ends up owning the connection */ + req->flags |= EVHTTP_REQ_OWN_CONNECTION; + + /* We did not present the request to the user user yet, so treat it as + * if the user was done with the request. This allows us to free the + * request on a persistent connection if the client drops it without + * sending a request. + */ + req->userdone = 1; + + TAILQ_INSERT_TAIL(&evcon->requests, req, next); + + req->kind = EVHTTP_REQUEST; + + + evhttp_start_read_(evcon); + + return (0); +} + +static void +evhttp_get_request(struct evhttp *http, evutil_socket_t fd, + struct sockaddr *sa, ev_socklen_t salen) +{ + struct evhttp_connection *evcon; + + evcon = evhttp_get_request_connection(http, fd, sa, salen); + if (evcon == NULL) { + event_sock_warn(fd, "%s: cannot get connection on "EV_SOCK_FMT, + __func__, EV_SOCK_ARG(fd)); + evutil_closesocket(fd); + return; + } + + /* the timeout can be used by the server to close idle connections */ + if (evutil_timerisset(&http->timeout)) + evhttp_connection_set_timeout_tv(evcon, &http->timeout); + + /* + * if we want to accept more than one request on a connection, + * we need to know which http server it belongs to. + */ + evcon->http_server = http; + TAILQ_INSERT_TAIL(&http->connections, evcon, next); + + if (evhttp_associate_new_request_with_connection(evcon) == -1) + evhttp_connection_free(evcon); +} + + +/* + * Network helper functions that we do not want to export to the rest of + * the world. + */ + +static void +name_from_addr(struct sockaddr *sa, ev_socklen_t salen, + char **phost, char **pport) +{ + char ntop[NI_MAXHOST]; + char strport[NI_MAXSERV]; + int ni_result; + +#ifdef EVENT__HAVE_GETNAMEINFO + ni_result = getnameinfo(sa, salen, + ntop, sizeof(ntop), strport, sizeof(strport), + NI_NUMERICHOST|NI_NUMERICSERV); + + if (ni_result != 0) { +#ifdef EAI_SYSTEM + /* Windows doesn't have an EAI_SYSTEM. */ + if (ni_result == EAI_SYSTEM) + event_err(1, "getnameinfo failed"); + else +#endif + event_errx(1, "getnameinfo failed: %s", gai_strerror(ni_result)); + return; + } +#else + ni_result = fake_getnameinfo(sa, salen, + ntop, sizeof(ntop), strport, sizeof(strport), + NI_NUMERICHOST|NI_NUMERICSERV); + if (ni_result != 0) + return; +#endif + + *phost = mm_strdup(ntop); + *pport = mm_strdup(strport); +} + +/* Create a non-blocking socket and bind it */ +/* todo: rename this function */ +static evutil_socket_t +bind_socket_ai(struct evutil_addrinfo *ai, int reuse) +{ + evutil_socket_t fd; + + int on = 1, r; + int serrno; + + /* Create listen socket */ + fd = evutil_socket_(ai ? ai->ai_family : AF_INET, + SOCK_STREAM|EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC, 0); + if (fd == -1) { + event_sock_warn(-1, "socket"); + return (-1); + } + + if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, sizeof(on))<0) + goto out; + if (reuse) { + if (evutil_make_listen_socket_reuseable(fd) < 0) + goto out; + } + + if (ai != NULL) { + r = bind(fd, ai->ai_addr, (ev_socklen_t)ai->ai_addrlen); + if (r == -1) + goto out; + } + + return (fd); + + out: + serrno = EVUTIL_SOCKET_ERROR(); + evutil_closesocket(fd); + EVUTIL_SET_SOCKET_ERROR(serrno); + return (-1); +} + +static struct evutil_addrinfo * +make_addrinfo(const char *address, ev_uint16_t port) +{ + struct evutil_addrinfo *ai = NULL; + + struct evutil_addrinfo hints; + char strport[NI_MAXSERV]; + int ai_result; + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + /* turn NULL hostname into INADDR_ANY, and skip looking up any address + * types we don't have an interface to connect to. */ + hints.ai_flags = EVUTIL_AI_PASSIVE|EVUTIL_AI_ADDRCONFIG; + evutil_snprintf(strport, sizeof(strport), "%d", port); + if ((ai_result = evutil_getaddrinfo(address, strport, &hints, &ai)) + != 0) { + if (ai_result == EVUTIL_EAI_SYSTEM) + event_warn("getaddrinfo"); + else + event_warnx("getaddrinfo: %s", + evutil_gai_strerror(ai_result)); + return (NULL); + } + + return (ai); +} + +static evutil_socket_t +bind_socket(const char *address, ev_uint16_t port, int reuse) +{ + evutil_socket_t fd; + struct evutil_addrinfo *aitop = NULL; + + /* just create an unbound socket */ + if (address == NULL && port == 0) + return bind_socket_ai(NULL, 0); + + aitop = make_addrinfo(address, port); + + if (aitop == NULL) + return (-1); + + fd = bind_socket_ai(aitop, reuse); + + evutil_freeaddrinfo(aitop); + + return (fd); +} + +struct evhttp_uri { + unsigned flags; + char *scheme; /* scheme; e.g http, ftp etc */ + char *userinfo; /* userinfo (typically username:pass), or NULL */ + char *host; /* hostname, IP address, or NULL */ + int port; /* port, or zero */ + char *path; /* path, or "". */ + char *query; /* query, or NULL */ + char *fragment; /* fragment or NULL */ +}; + +struct evhttp_uri * +evhttp_uri_new(void) +{ + struct evhttp_uri *uri = mm_calloc(sizeof(struct evhttp_uri), 1); + if (uri) + uri->port = -1; + return uri; +} + +void +evhttp_uri_set_flags(struct evhttp_uri *uri, unsigned flags) +{ + uri->flags = flags; +} + +/* Return true if the string starting at s and ending immediately before eos + * is a valid URI scheme according to RFC3986 + */ +static int +scheme_ok(const char *s, const char *eos) +{ + /* scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) */ + EVUTIL_ASSERT(eos >= s); + if (s == eos) + return 0; + if (!EVUTIL_ISALPHA_(*s)) + return 0; + while (++s < eos) { + if (! EVUTIL_ISALNUM_(*s) && + *s != '+' && *s != '-' && *s != '.') + return 0; + } + return 1; +} + +#define SUBDELIMS "!$&'()*+,;=" + +/* Return true iff [s..eos) is a valid userinfo */ +static int +userinfo_ok(const char *s, const char *eos) +{ + while (s < eos) { + if (CHAR_IS_UNRESERVED(*s) || + strchr(SUBDELIMS, *s) || + *s == ':') + ++s; + else if (*s == '%' && s+2 < eos && + EVUTIL_ISXDIGIT_(s[1]) && + EVUTIL_ISXDIGIT_(s[2])) + s += 3; + else + return 0; + } + return 1; +} + +static int +regname_ok(const char *s, const char *eos) +{ + while (s && s 65535) + return -1; + ++s; + } + return portnum; +} + +/* returns 0 for bad, 1 for ipv6, 2 for IPvFuture */ +static int +bracket_addr_ok(const char *s, const char *eos) +{ + if (s + 3 > eos || *s != '[' || *(eos-1) != ']') + return 0; + if (s[1] == 'v') { + /* IPvFuture, or junk. + "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" ) + */ + s += 2; /* skip [v */ + --eos; + if (!EVUTIL_ISXDIGIT_(*s)) /*require at least one*/ + return 0; + while (s < eos && *s != '.') { + if (EVUTIL_ISXDIGIT_(*s)) + ++s; + else + return 0; + } + if (*s != '.') + return 0; + ++s; + while (s < eos) { + if (CHAR_IS_UNRESERVED(*s) || + strchr(SUBDELIMS, *s) || + *s == ':') + ++s; + else + return 0; + } + return 2; + } else { + /* IPv6, or junk */ + char buf[64]; + ev_ssize_t n_chars = eos-s-2; + struct in6_addr in6; + if (n_chars >= 64) /* way too long */ + return 0; + memcpy(buf, s+1, n_chars); + buf[n_chars]='\0'; + return (evutil_inet_pton(AF_INET6,buf,&in6)==1) ? 1 : 0; + } +} + +static int +parse_authority(struct evhttp_uri *uri, char *s, char *eos) +{ + char *cp, *port; + EVUTIL_ASSERT(eos); + if (eos == s) { + uri->host = mm_strdup(""); + if (uri->host == NULL) { + event_warn("%s: strdup", __func__); + return -1; + } + return 0; + } + + /* Optionally, we start with "userinfo@" */ + + cp = strchr(s, '@'); + if (cp && cp < eos) { + if (! userinfo_ok(s,cp)) + return -1; + *cp++ = '\0'; + uri->userinfo = mm_strdup(s); + if (uri->userinfo == NULL) { + event_warn("%s: strdup", __func__); + return -1; + } + } else { + cp = s; + } + /* Optionally, we end with ":port" */ + for (port=eos-1; port >= cp && EVUTIL_ISDIGIT_(*port); --port) + ; + if (port >= cp && *port == ':') { + if (port+1 == eos) /* Leave port unspecified; the RFC allows a + * nil port */ + uri->port = -1; + else if ((uri->port = parse_port(port+1, eos))<0) + return -1; + eos = port; + } + /* Now, cp..eos holds the "host" port, which can be an IPv4Address, + * an IP-Literal, or a reg-name */ + EVUTIL_ASSERT(eos >= cp); + if (*cp == '[' && eos >= cp+2 && *(eos-1) == ']') { + /* IPv6address, IP-Literal, or junk. */ + if (! bracket_addr_ok(cp, eos)) + return -1; + } else { + /* Make sure the host part is ok. */ + if (! regname_ok(cp,eos)) /* Match IPv4Address or reg-name */ + return -1; + } + uri->host = mm_malloc(eos-cp+1); + if (uri->host == NULL) { + event_warn("%s: malloc", __func__); + return -1; + } + memcpy(uri->host, cp, eos-cp); + uri->host[eos-cp] = '\0'; + return 0; + +} + +static char * +end_of_authority(char *cp) +{ + while (*cp) { + if (*cp == '?' || *cp == '#' || *cp == '/') + return cp; + ++cp; + } + return cp; +} + +enum uri_part { + PART_PATH, + PART_QUERY, + PART_FRAGMENT +}; + +/* Return the character after the longest prefix of 'cp' that matches... + * *pchar / "/" if allow_qchars is false, or + * *(pchar / "/" / "?") if allow_qchars is true. + */ +static char * +end_of_path(char *cp, enum uri_part part, unsigned flags) +{ + if (flags & EVHTTP_URI_NONCONFORMANT) { + /* If NONCONFORMANT: + * Path is everything up to a # or ? or nul. + * Query is everything up a # or nul + * Fragment is everything up to a nul. + */ + switch (part) { + case PART_PATH: + while (*cp && *cp != '#' && *cp != '?') + ++cp; + break; + case PART_QUERY: + while (*cp && *cp != '#') + ++cp; + break; + case PART_FRAGMENT: + cp += strlen(cp); + break; + }; + return cp; + } + + while (*cp) { + if (CHAR_IS_UNRESERVED(*cp) || + strchr(SUBDELIMS, *cp) || + *cp == ':' || *cp == '@' || *cp == '/') + ++cp; + else if (*cp == '%' && EVUTIL_ISXDIGIT_(cp[1]) && + EVUTIL_ISXDIGIT_(cp[2])) + cp += 3; + else if (*cp == '?' && part != PART_PATH) + ++cp; + else + return cp; + } + return cp; +} + +static int +path_matches_noscheme(const char *cp) +{ + while (*cp) { + if (*cp == ':') + return 0; + else if (*cp == '/') + return 1; + ++cp; + } + return 1; +} + +struct evhttp_uri * +evhttp_uri_parse(const char *source_uri) +{ + return evhttp_uri_parse_with_flags(source_uri, 0); +} + +struct evhttp_uri * +evhttp_uri_parse_with_flags(const char *source_uri, unsigned flags) +{ + char *readbuf = NULL, *readp = NULL, *token = NULL, *query = NULL; + char *path = NULL, *fragment = NULL; + int got_authority = 0; + + struct evhttp_uri *uri = mm_calloc(1, sizeof(struct evhttp_uri)); + if (uri == NULL) { + event_warn("%s: calloc", __func__); + goto err; + } + uri->port = -1; + uri->flags = flags; + + readbuf = mm_strdup(source_uri); + if (readbuf == NULL) { + event_warn("%s: strdup", __func__); + goto err; + } + + readp = readbuf; + token = NULL; + + /* We try to follow RFC3986 here as much as we can, and match + the productions + + URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ] + + relative-ref = relative-part [ "?" query ] [ "#" fragment ] + */ + + /* 1. scheme: */ + token = strchr(readp, ':'); + if (token && scheme_ok(readp,token)) { + *token = '\0'; + uri->scheme = mm_strdup(readp); + if (uri->scheme == NULL) { + event_warn("%s: strdup", __func__); + goto err; + } + readp = token+1; /* eat : */ + } + + /* 2. Optionally, "//" then an 'authority' part. */ + if (readp[0]=='/' && readp[1] == '/') { + char *authority; + readp += 2; + authority = readp; + path = end_of_authority(readp); + if (parse_authority(uri, authority, path) < 0) + goto err; + readp = path; + got_authority = 1; + } + + /* 3. Query: path-abempty, path-absolute, path-rootless, or path-empty + */ + path = readp; + readp = end_of_path(path, PART_PATH, flags); + + /* Query */ + if (*readp == '?') { + *readp = '\0'; + ++readp; + query = readp; + readp = end_of_path(readp, PART_QUERY, flags); + } + /* fragment */ + if (*readp == '#') { + *readp = '\0'; + ++readp; + fragment = readp; + readp = end_of_path(readp, PART_FRAGMENT, flags); + } + if (*readp != '\0') { + goto err; + } + + /* These next two cases may be unreachable; I'm leaving them + * in to be defensive. */ + /* If you didn't get an authority, the path can't begin with "//" */ + if (!got_authority && path[0]=='/' && path[1]=='/') + goto err; + /* If you did get an authority, the path must begin with "/" or be + * empty. */ + if (got_authority && path[0] != '/' && path[0] != '\0') + goto err; + /* (End of maybe-unreachable cases) */ + + /* If there was no scheme, the first part of the path (if any) must + * have no colon in it. */ + if (! uri->scheme && !path_matches_noscheme(path)) + goto err; + + EVUTIL_ASSERT(path); + uri->path = mm_strdup(path); + if (uri->path == NULL) { + event_warn("%s: strdup", __func__); + goto err; + } + + if (query) { + uri->query = mm_strdup(query); + if (uri->query == NULL) { + event_warn("%s: strdup", __func__); + goto err; + } + } + if (fragment) { + uri->fragment = mm_strdup(fragment); + if (uri->fragment == NULL) { + event_warn("%s: strdup", __func__); + goto err; + } + } + + mm_free(readbuf); + + return uri; +err: + if (uri) + evhttp_uri_free(uri); + if (readbuf) + mm_free(readbuf); + return NULL; +} + +static struct evhttp_uri * +evhttp_uri_parse_authority(char *source_uri) +{ + struct evhttp_uri *uri = mm_calloc(1, sizeof(struct evhttp_uri)); + char *end; + + if (uri == NULL) { + event_warn("%s: calloc", __func__); + goto err; + } + uri->port = -1; + uri->flags = 0; + + end = end_of_authority(source_uri); + if (parse_authority(uri, source_uri, end) < 0) + goto err; + + uri->path = mm_strdup(""); + if (uri->path == NULL) { + event_warn("%s: strdup", __func__); + goto err; + } + + return uri; +err: + if (uri) + evhttp_uri_free(uri); + return NULL; +} + +void +evhttp_uri_free(struct evhttp_uri *uri) +{ +#define URI_FREE_STR_(f) \ + if (uri->f) { \ + mm_free(uri->f); \ + } + + URI_FREE_STR_(scheme); + URI_FREE_STR_(userinfo); + URI_FREE_STR_(host); + URI_FREE_STR_(path); + URI_FREE_STR_(query); + URI_FREE_STR_(fragment); + + mm_free(uri); +#undef URI_FREE_STR_ +} + +char * +evhttp_uri_join(struct evhttp_uri *uri, char *buf, size_t limit) +{ + struct evbuffer *tmp = 0; + size_t joined_size = 0; + char *output = NULL; + +#define URI_ADD_(f) evbuffer_add(tmp, uri->f, strlen(uri->f)) + + if (!uri || !buf || !limit) + return NULL; + + tmp = evbuffer_new(); + if (!tmp) + return NULL; + + if (uri->scheme) { + URI_ADD_(scheme); + evbuffer_add(tmp, ":", 1); + } + if (uri->host) { + evbuffer_add(tmp, "//", 2); + if (uri->userinfo) + evbuffer_add_printf(tmp,"%s@", uri->userinfo); + URI_ADD_(host); + if (uri->port >= 0) + evbuffer_add_printf(tmp,":%d", uri->port); + + if (uri->path && uri->path[0] != '/' && uri->path[0] != '\0') + goto err; + } + + if (uri->path) + URI_ADD_(path); + + if (uri->query) { + evbuffer_add(tmp, "?", 1); + URI_ADD_(query); + } + + if (uri->fragment) { + evbuffer_add(tmp, "#", 1); + URI_ADD_(fragment); + } + + evbuffer_add(tmp, "\0", 1); /* NUL */ + + joined_size = evbuffer_get_length(tmp); + + if (joined_size > limit) { + /* It doesn't fit. */ + evbuffer_free(tmp); + return NULL; + } + evbuffer_remove(tmp, buf, joined_size); + + output = buf; +err: + evbuffer_free(tmp); + + return output; +#undef URI_ADD_ +} + +const char * +evhttp_uri_get_scheme(const struct evhttp_uri *uri) +{ + return uri->scheme; +} +const char * +evhttp_uri_get_userinfo(const struct evhttp_uri *uri) +{ + return uri->userinfo; +} +const char * +evhttp_uri_get_host(const struct evhttp_uri *uri) +{ + return uri->host; +} +int +evhttp_uri_get_port(const struct evhttp_uri *uri) +{ + return uri->port; +} +const char * +evhttp_uri_get_path(const struct evhttp_uri *uri) +{ + return uri->path; +} +const char * +evhttp_uri_get_query(const struct evhttp_uri *uri) +{ + return uri->query; +} +const char * +evhttp_uri_get_fragment(const struct evhttp_uri *uri) +{ + return uri->fragment; +} + +#define URI_SET_STR_(f) do { \ + if (uri->f) \ + mm_free(uri->f); \ + if (f) { \ + if ((uri->f = mm_strdup(f)) == NULL) { \ + event_warn("%s: strdup()", __func__); \ + return -1; \ + } \ + } else { \ + uri->f = NULL; \ + } \ + } while(0) + +int +evhttp_uri_set_scheme(struct evhttp_uri *uri, const char *scheme) +{ + if (scheme && !scheme_ok(scheme, scheme+strlen(scheme))) + return -1; + + URI_SET_STR_(scheme); + return 0; +} +int +evhttp_uri_set_userinfo(struct evhttp_uri *uri, const char *userinfo) +{ + if (userinfo && !userinfo_ok(userinfo, userinfo+strlen(userinfo))) + return -1; + URI_SET_STR_(userinfo); + return 0; +} +int +evhttp_uri_set_host(struct evhttp_uri *uri, const char *host) +{ + if (host) { + if (host[0] == '[') { + if (! bracket_addr_ok(host, host+strlen(host))) + return -1; + } else { + if (! regname_ok(host, host+strlen(host))) + return -1; + } + } + + URI_SET_STR_(host); + return 0; +} +int +evhttp_uri_set_port(struct evhttp_uri *uri, int port) +{ + if (port < -1) + return -1; + uri->port = port; + return 0; +} +#define end_of_cpath(cp,p,f) \ + ((const char*)(end_of_path(((char*)(cp)), (p), (f)))) + +int +evhttp_uri_set_path(struct evhttp_uri *uri, const char *path) +{ + if (path && end_of_cpath(path, PART_PATH, uri->flags) != path+strlen(path)) + return -1; + + URI_SET_STR_(path); + return 0; +} +int +evhttp_uri_set_query(struct evhttp_uri *uri, const char *query) +{ + if (query && end_of_cpath(query, PART_QUERY, uri->flags) != query+strlen(query)) + return -1; + URI_SET_STR_(query); + return 0; +} +int +evhttp_uri_set_fragment(struct evhttp_uri *uri, const char *fragment) +{ + if (fragment && end_of_cpath(fragment, PART_FRAGMENT, uri->flags) != fragment+strlen(fragment)) + return -1; + URI_SET_STR_(fragment); + return 0; +} diff --git a/probe-busybox/libevent-2.1.11-stable/include/evdns.h b/probe-busybox/libevent-2.1.11-stable/include/evdns.h new file mode 100644 index 00000000..8672db03 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/evdns.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT1_EVDNS_H_INCLUDED_ +#define EVENT1_EVDNS_H_INCLUDED_ + +/** @file evdns.h + + A dns subsystem for Libevent. + + The header is deprecated in Libevent 2.0 and later; please + use instead. Depending on what functionality you + need, you may also want to include more of the other + headers. + */ + +#include +#include +#include +#include + +#endif /* EVENT1_EVDNS_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event.h b/probe-busybox/libevent-2.1.11-stable/include/event.h new file mode 100644 index 00000000..ba518671 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT1_EVENT_H_INCLUDED_ +#define EVENT1_EVENT_H_INCLUDED_ + +/** @file event.h + + A library for writing event-driven network servers. + + The header is deprecated in Libevent 2.0 and later; please + use instead. Depending on what functionality you + need, you may also want to include more of the other event2/ + headers. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#ifdef EVENT__HAVE_STDINT_H +#include +#endif +#include + +/* For int types. */ +#include + +#ifdef _WIN32 +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include +#undef WIN32_LEAN_AND_MEAN +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT1_EVENT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/buffer.h b/probe-busybox/libevent-2.1.11-stable/include/event2/buffer.h new file mode 100644 index 00000000..468588b9 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/buffer.h @@ -0,0 +1,1076 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_BUFFER_H_INCLUDED_ +#define EVENT2_BUFFER_H_INCLUDED_ + +/** @file event2/buffer.h + + Functions for buffering data for network sending or receiving. + + An evbuffer can be used for preparing data before sending it to + the network or conversely for reading data from the network. + Evbuffers try to avoid memory copies as much as possible. As a + result, evbuffers can be used to pass data around without actually + incurring the overhead of copying the data. + + A new evbuffer can be allocated with evbuffer_new(), and can be + freed with evbuffer_free(). Most users will be using evbuffers via + the bufferevent interface. To access a bufferevent's evbuffers, use + bufferevent_get_input() and bufferevent_get_output(). + + There are several guidelines for using evbuffers. + + - if you already know how much data you are going to add as a result + of calling evbuffer_add() multiple times, it makes sense to use + evbuffer_expand() first to make sure that enough memory is allocated + before hand. + + - evbuffer_add_buffer() adds the contents of one buffer to the other + without incurring any unnecessary memory copies. + + - evbuffer_add() and evbuffer_add_buffer() do not mix very well: + if you use them, you will wind up with fragmented memory in your + buffer. + + - For high-performance code, you may want to avoid copying data into and out + of buffers. You can skip the copy step by using + evbuffer_reserve_space()/evbuffer_commit_space() when writing into a + buffer, and evbuffer_peek() when reading. + + In Libevent 2.0 and later, evbuffers are represented using a linked + list of memory chunks, with pointers to the first and last chunk in + the chain. + + As the contents of an evbuffer can be stored in multiple different + memory blocks, it cannot be accessed directly. Instead, evbuffer_pullup() + can be used to force a specified number of bytes to be contiguous. This + will cause memory reallocation and memory copies if the data is split + across multiple blocks. It is more efficient, however, to use + evbuffer_peek() if you don't require that the memory to be contiguous. + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_UIO_H +#include +#endif +#include + +/** + An evbuffer is an opaque data type for efficiently buffering data to be + sent or received on the network. + + @see event2/event.h for more information +*/ +struct evbuffer +#ifdef EVENT_IN_DOXYGEN_ +{} +#endif +; + +/** + Pointer to a position within an evbuffer. + + Used when repeatedly searching through a buffer. Calling any function + that modifies or re-packs the buffer contents may invalidate all + evbuffer_ptrs for that buffer. Do not modify or contruct these values + except with evbuffer_ptr_set. + + An evbuffer_ptr can represent any position from the start of a buffer up + to a position immediately after the end of a buffer. + + @see evbuffer_ptr_set() + */ +struct evbuffer_ptr { + ev_ssize_t pos; + + /* Do not alter or rely on the values of fields: they are for internal + * use */ + struct { + void *chain; + size_t pos_in_chain; + } internal_; +}; + +/** Describes a single extent of memory inside an evbuffer. Used for + direct-access functions. + + @see evbuffer_reserve_space, evbuffer_commit_space, evbuffer_peek + */ +#ifdef EVENT__HAVE_SYS_UIO_H +#define evbuffer_iovec iovec +/* Internal use -- defined only if we are using the native struct iovec */ +#define EVBUFFER_IOVEC_IS_NATIVE_ +#else +struct evbuffer_iovec { + /** The start of the extent of memory. */ + void *iov_base; + /** The length of the extent of memory. */ + size_t iov_len; +}; +#endif + +/** + Allocate storage for a new evbuffer. + + @return a pointer to a newly allocated evbuffer struct, or NULL if an error + occurred + */ +EVENT2_EXPORT_SYMBOL +struct evbuffer *evbuffer_new(void); +/** + Deallocate storage for an evbuffer. + + @param buf pointer to the evbuffer to be freed + */ +EVENT2_EXPORT_SYMBOL +void evbuffer_free(struct evbuffer *buf); + +/** + Enable locking on an evbuffer so that it can safely be used by multiple + threads at the same time. + + NOTE: when locking is enabled, the lock will be held when callbacks are + invoked. This could result in deadlock if you aren't careful. Plan + accordingly! + + @param buf An evbuffer to make lockable. + @param lock A lock object, or NULL if we should allocate our own. + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_enable_locking(struct evbuffer *buf, void *lock); + +/** + Acquire the lock on an evbuffer. Has no effect if locking was not enabled + with evbuffer_enable_locking. +*/ +EVENT2_EXPORT_SYMBOL +void evbuffer_lock(struct evbuffer *buf); + +/** + Release the lock on an evbuffer. Has no effect if locking was not enabled + with evbuffer_enable_locking. +*/ +EVENT2_EXPORT_SYMBOL +void evbuffer_unlock(struct evbuffer *buf); + + +/** If this flag is set, then we will not use evbuffer_peek(), + * evbuffer_remove(), evbuffer_remove_buffer(), and so on to read bytes + * from this buffer: we'll only take bytes out of this buffer by + * writing them to the network (as with evbuffer_write_atmost), by + * removing them without observing them (as with evbuffer_drain), + * or by copying them all out at once (as with evbuffer_add_buffer). + * + * Using this option allows the implementation to use sendfile-based + * operations for evbuffer_add_file(); see that function for more + * information. + * + * This flag is on by default for bufferevents that can take advantage + * of it; you should never actually need to set it on a bufferevent's + * output buffer. + */ +#define EVBUFFER_FLAG_DRAINS_TO_FD 1 + +/** Change the flags that are set for an evbuffer by adding more. + * + * @param buffer the evbuffer that the callback is watching. + * @param cb the callback whose status we want to change. + * @param flags One or more EVBUFFER_FLAG_* options + * @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags); +/** Change the flags that are set for an evbuffer by removing some. + * + * @param buffer the evbuffer that the callback is watching. + * @param cb the callback whose status we want to change. + * @param flags One or more EVBUFFER_FLAG_* options + * @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags); + +/** + Returns the total number of bytes stored in the evbuffer + + @param buf pointer to the evbuffer + @return the number of bytes stored in the evbuffer +*/ +EVENT2_EXPORT_SYMBOL +size_t evbuffer_get_length(const struct evbuffer *buf); + +/** + Returns the number of contiguous available bytes in the first buffer chain. + + This is useful when processing data that might be split into multiple + chains, or that might all be in the first chain. Calls to + evbuffer_pullup() that cause reallocation and copying of data can thus be + avoided. + + @param buf pointer to the evbuffer + @return 0 if no data is available, otherwise the number of available bytes + in the first buffer chain. +*/ +EVENT2_EXPORT_SYMBOL +size_t evbuffer_get_contiguous_space(const struct evbuffer *buf); + +/** + Expands the available space in an evbuffer. + + Expands the available space in the evbuffer to at least datlen, so that + appending datlen additional bytes will not require any new allocations. + + @param buf the evbuffer to be expanded + @param datlen the new minimum length requirement + @return 0 if successful, or -1 if an error occurred +*/ +EVENT2_EXPORT_SYMBOL +int evbuffer_expand(struct evbuffer *buf, size_t datlen); + +/** + Reserves space in the last chain or chains of an evbuffer. + + Makes space available in the last chain or chains of an evbuffer that can + be arbitrarily written to by a user. The space does not become + available for reading until it has been committed with + evbuffer_commit_space(). + + The space is made available as one or more extents, represented by + an initial pointer and a length. You can force the memory to be + available as only one extent. Allowing more extents, however, makes the + function more efficient. + + Multiple subsequent calls to this function will make the same space + available until evbuffer_commit_space() has been called. + + It is an error to do anything that moves around the buffer's internal + memory structures before committing the space. + + NOTE: The code currently does not ever use more than two extents. + This may change in future versions. + + @param buf the evbuffer in which to reserve space. + @param size how much space to make available, at minimum. The + total length of the extents may be greater than the requested + length. + @param vec an array of one or more evbuffer_iovec structures to + hold pointers to the reserved extents of memory. + @param n_vec The length of the vec array. Must be at least 1; + 2 is more efficient. + @return the number of provided extents, or -1 on error. + @see evbuffer_commit_space() +*/ +EVENT2_EXPORT_SYMBOL +int +evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, + struct evbuffer_iovec *vec, int n_vec); + +/** + Commits previously reserved space. + + Commits some of the space previously reserved with + evbuffer_reserve_space(). It then becomes available for reading. + + This function may return an error if the pointer in the extents do + not match those returned from evbuffer_reserve_space, or if data + has been added to the buffer since the space was reserved. + + If you want to commit less data than you got reserved space for, + modify the iov_len pointer of the appropriate extent to a smaller + value. Note that you may have received more space than you + requested if it was available! + + @param buf the evbuffer in which to reserve space. + @param vec one or two extents returned by evbuffer_reserve_space. + @param n_vecs the number of extents. + @return 0 on success, -1 on error + @see evbuffer_reserve_space() +*/ +EVENT2_EXPORT_SYMBOL +int evbuffer_commit_space(struct evbuffer *buf, + struct evbuffer_iovec *vec, int n_vecs); + +/** + Append data to the end of an evbuffer. + + @param buf the evbuffer to be appended to + @param data pointer to the beginning of the data buffer + @param datlen the number of bytes to be copied from the data buffer + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_add(struct evbuffer *buf, const void *data, size_t datlen); + + +/** + Read data from an evbuffer and drain the bytes read. + + If more bytes are requested than are available in the evbuffer, we + only extract as many bytes as were available. + + @param buf the evbuffer to be read from + @param data the destination buffer to store the result + @param datlen the maximum size of the destination buffer + @return the number of bytes read, or -1 if we can't drain the buffer. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_remove(struct evbuffer *buf, void *data, size_t datlen); + +/** + Read data from an evbuffer, and leave the buffer unchanged. + + If more bytes are requested than are available in the evbuffer, we + only extract as many bytes as were available. + + @param buf the evbuffer to be read from + @param data_out the destination buffer to store the result + @param datlen the maximum size of the destination buffer + @return the number of bytes read, or -1 if we can't drain the buffer. + */ +EVENT2_EXPORT_SYMBOL +ev_ssize_t evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen); + +/** + Read data from the middle of an evbuffer, and leave the buffer unchanged. + + If more bytes are requested than are available in the evbuffer, we + only extract as many bytes as were available. + + @param buf the evbuffer to be read from + @param pos the position to start reading from + @param data_out the destination buffer to store the result + @param datlen the maximum size of the destination buffer + @return the number of bytes read, or -1 if we can't drain the buffer. + */ +EVENT2_EXPORT_SYMBOL +ev_ssize_t evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, void *data_out, size_t datlen); + +/** + Read data from an evbuffer into another evbuffer, draining + the bytes from the source buffer. This function avoids copy + operations to the extent possible. + + If more bytes are requested than are available in src, the src + buffer is drained completely. + + @param src the evbuffer to be read from + @param dst the destination evbuffer to store the result into + @param datlen the maximum numbers of bytes to transfer + @return the number of bytes read + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, + size_t datlen); + +/** Used to tell evbuffer_readln what kind of line-ending to look for. + */ +enum evbuffer_eol_style { + /** Any sequence of CR and LF characters is acceptable as an + * EOL. + * + * Note that this style can produce ambiguous results: the + * sequence "CRLF" will be treated as a single EOL if it is + * all in the buffer at once, but if you first read a CR from + * the network and later read an LF from the network, it will + * be treated as two EOLs. + */ + EVBUFFER_EOL_ANY, + /** An EOL is an LF, optionally preceded by a CR. This style is + * most useful for implementing text-based internet protocols. */ + EVBUFFER_EOL_CRLF, + /** An EOL is a CR followed by an LF. */ + EVBUFFER_EOL_CRLF_STRICT, + /** An EOL is a LF. */ + EVBUFFER_EOL_LF, + /** An EOL is a NUL character (that is, a single byte with value 0) */ + EVBUFFER_EOL_NUL +}; + +/** + * Read a single line from an evbuffer. + * + * Reads a line terminated by an EOL as determined by the evbuffer_eol_style + * argument. Returns a newly allocated nul-terminated string; the caller must + * free the returned value. The EOL is not included in the returned string. + * + * @param buffer the evbuffer to read from + * @param n_read_out if non-NULL, points to a size_t that is set to the + * number of characters in the returned string. This is useful for + * strings that can contain NUL characters. + * @param eol_style the style of line-ending to use. + * @return pointer to a single line, or NULL if an error occurred + */ +EVENT2_EXPORT_SYMBOL +char *evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, + enum evbuffer_eol_style eol_style); + +/** + Move all data from one evbuffer into another evbuffer. + + This is a destructive add. The data from one buffer moves into + the other buffer. However, no unnecessary memory copies occur. + + @param outbuf the output buffer + @param inbuf the input buffer + @return 0 if successful, or -1 if an error occurred + + @see evbuffer_remove_buffer() + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf); + +/** + Copy data from one evbuffer into another evbuffer. + + This is a non-destructive add. The data from one buffer is copied + into the other buffer. However, no unnecessary memory copies occur. + + Note that buffers already containing buffer references can't be added + to other buffers. + + @param outbuf the output buffer + @param inbuf the input buffer + @return 0 if successful, or -1 if an error occurred + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_add_buffer_reference(struct evbuffer *outbuf, + struct evbuffer *inbuf); + +/** + A cleanup function for a piece of memory added to an evbuffer by + reference. + + @see evbuffer_add_reference() + */ +typedef void (*evbuffer_ref_cleanup_cb)(const void *data, + size_t datalen, void *extra); + +/** + Reference memory into an evbuffer without copying. + + The memory needs to remain valid until all the added data has been + read. This function keeps just a reference to the memory without + actually incurring the overhead of a copy. + + @param outbuf the output buffer + @param data the memory to reference + @param datlen how memory to reference + @param cleanupfn callback to be invoked when the memory is no longer + referenced by this evbuffer. + @param cleanupfn_arg optional argument to the cleanup callback + @return 0 if successful, or -1 if an error occurred + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_add_reference(struct evbuffer *outbuf, + const void *data, size_t datlen, + evbuffer_ref_cleanup_cb cleanupfn, void *cleanupfn_arg); + +/** + Copy data from a file into the evbuffer for writing to a socket. + + This function avoids unnecessary data copies between userland and + kernel. If sendfile is available and the EVBUFFER_FLAG_DRAINS_TO_FD + flag is set, it uses those functions. Otherwise, it tries to use + mmap (or CreateFileMapping on Windows). + + The function owns the resulting file descriptor and will close it + when finished transferring data. + + The results of using evbuffer_remove() or evbuffer_pullup() on + evbuffers whose data was added using this function are undefined. + + For more fine-grained control, use evbuffer_add_file_segment. + + @param outbuf the output buffer + @param fd the file descriptor + @param offset the offset from which to read data + @param length how much data to read, or -1 to read as much as possible. + (-1 requires that 'fd' support fstat.) + @return 0 if successful, or -1 if an error occurred +*/ + +EVENT2_EXPORT_SYMBOL +int evbuffer_add_file(struct evbuffer *outbuf, int fd, ev_off_t offset, + ev_off_t length); + +/** + An evbuffer_file_segment holds a reference to a range of a file -- + possibly the whole file! -- for use in writing from an evbuffer to a + socket. It could be implemented with mmap, sendfile, splice, or (if all + else fails) by just pulling all the data into RAM. A single + evbuffer_file_segment can be added more than once, and to more than one + evbuffer. + */ +struct evbuffer_file_segment; + +/** + Flag for creating evbuffer_file_segment: If this flag is set, then when + the evbuffer_file_segment is freed and no longer in use by any + evbuffer, the underlying fd is closed. + */ +#define EVBUF_FS_CLOSE_ON_FREE 0x01 +/** + Flag for creating evbuffer_file_segment: Disable memory-map based + implementations. + */ +#define EVBUF_FS_DISABLE_MMAP 0x02 +/** + Flag for creating evbuffer_file_segment: Disable direct fd-to-fd + implementations (including sendfile and splice). + + You might want to use this option if data needs to be taken from the + evbuffer by any means other than writing it to the network: the sendfile + backend is fast, but it only works for sending files directly to the + network. + */ +#define EVBUF_FS_DISABLE_SENDFILE 0x04 +/** + Flag for creating evbuffer_file_segment: Do not allocate a lock for this + segment. If this option is set, then neither the segment nor any + evbuffer it is added to may ever be accessed from more than one thread + at a time. + */ +#define EVBUF_FS_DISABLE_LOCKING 0x08 + +/** + A cleanup function for a evbuffer_file_segment added to an evbuffer + for reference. + */ +typedef void (*evbuffer_file_segment_cleanup_cb)( + struct evbuffer_file_segment const* seg, int flags, void* arg); + +/** + Create and return a new evbuffer_file_segment for reading data from a + file and sending it out via an evbuffer. + + This function avoids unnecessary data copies between userland and + kernel. Where available, it uses sendfile or splice. + + The file descriptor must not be closed so long as any evbuffer is using + this segment. + + The results of using evbuffer_remove() or evbuffer_pullup() or any other + function that reads bytes from an evbuffer on any evbuffer containing + the newly returned segment are undefined, unless you pass the + EVBUF_FS_DISABLE_SENDFILE flag to this function. + + @param fd an open file to read from. + @param offset an index within the file at which to start reading + @param length how much data to read, or -1 to read as much as possible. + (-1 requires that 'fd' support fstat.) + @param flags any number of the EVBUF_FS_* flags + @return a new evbuffer_file_segment, or NULL on failure. + **/ +EVENT2_EXPORT_SYMBOL +struct evbuffer_file_segment *evbuffer_file_segment_new( + int fd, ev_off_t offset, ev_off_t length, unsigned flags); + +/** + Free an evbuffer_file_segment + + It is safe to call this function even if the segment has been added to + one or more evbuffers. The evbuffer_file_segment will not be freed + until no more references to it exist. + */ +EVENT2_EXPORT_SYMBOL +void evbuffer_file_segment_free(struct evbuffer_file_segment *seg); + +/** + Add cleanup callback and argument for the callback to an + evbuffer_file_segment. + + The cleanup callback will be invoked when no more references to the + evbuffer_file_segment exist. + **/ +EVENT2_EXPORT_SYMBOL +void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg, + evbuffer_file_segment_cleanup_cb cb, void* arg); + +/** + Insert some or all of an evbuffer_file_segment at the end of an evbuffer + + Note that the offset and length parameters of this function have a + different meaning from those provided to evbuffer_file_segment_new: When + you create the segment, the offset is the offset _within the file_, and + the length is the length _of the segment_, whereas when you add a + segment to an evbuffer, the offset is _within the segment_ and the + length is the length of the _part of the segment you want to use. + + In other words, if you have a 10 KiB file, and you create an + evbuffer_file_segment for it with offset 20 and length 1000, it will + refer to bytes 20..1019 inclusive. If you then pass this segment to + evbuffer_add_file_segment and specify an offset of 20 and a length of + 50, you will be adding bytes 40..99 inclusive. + + @param buf the evbuffer to append to + @param seg the segment to add + @param offset the offset within the segment to start from + @param length the amount of data to add, or -1 to add it all. + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_add_file_segment(struct evbuffer *buf, + struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length); + +/** + Append a formatted string to the end of an evbuffer. + + The string is formated as printf. + + @param buf the evbuffer that will be appended to + @param fmt a format string + @param ... arguments that will be passed to printf(3) + @return The number of bytes added if successful, or -1 if an error occurred. + + @see evutil_printf(), evbuffer_add_vprintf() + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) +#ifdef __GNUC__ + __attribute__((format(printf, 2, 3))) +#endif +; + +/** + Append a va_list formatted string to the end of an evbuffer. + + @param buf the evbuffer that will be appended to + @param fmt a format string + @param ap a varargs va_list argument array that will be passed to vprintf(3) + @return The number of bytes added if successful, or -1 if an error occurred. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) +#ifdef __GNUC__ + __attribute__((format(printf, 2, 0))) +#endif +; + + +/** + Remove a specified number of bytes data from the beginning of an evbuffer. + + @param buf the evbuffer to be drained + @param len the number of bytes to drain from the beginning of the buffer + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_drain(struct evbuffer *buf, size_t len); + + +/** + Write the contents of an evbuffer to a file descriptor. + + The evbuffer will be drained after the bytes have been successfully written. + + @param buffer the evbuffer to be written and drained + @param fd the file descriptor to be written to + @return the number of bytes written, or -1 if an error occurred + @see evbuffer_read() + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd); + +/** + Write some of the contents of an evbuffer to a file descriptor. + + The evbuffer will be drained after the bytes have been successfully written. + + @param buffer the evbuffer to be written and drained + @param fd the file descriptor to be written to + @param howmuch the largest allowable number of bytes to write, or -1 + to write as many bytes as we can. + @return the number of bytes written, or -1 if an error occurred + @see evbuffer_read() + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, + ev_ssize_t howmuch); + +/** + Read from a file descriptor and store the result in an evbuffer. + + @param buffer the evbuffer to store the result + @param fd the file descriptor to read from + @param howmuch the number of bytes to be read + @return the number of bytes read, or -1 if an error occurred + @see evbuffer_write() + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_read(struct evbuffer *buffer, evutil_socket_t fd, int howmuch); + +/** + Search for a string within an evbuffer. + + @param buffer the evbuffer to be searched + @param what the string to be searched for + @param len the length of the search string + @param start NULL or a pointer to a valid struct evbuffer_ptr. + @return a struct evbuffer_ptr whose 'pos' field has the offset of the + first occurrence of the string in the buffer after 'start'. The 'pos' + field of the result is -1 if the string was not found. + */ +EVENT2_EXPORT_SYMBOL +struct evbuffer_ptr evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start); + +/** + Search for a string within part of an evbuffer. + + @param buffer the evbuffer to be searched + @param what the string to be searched for + @param len the length of the search string + @param start NULL or a pointer to a valid struct evbuffer_ptr that + indicates where we should start searching. + @param end NULL or a pointer to a valid struct evbuffer_ptr that + indicates where we should stop searching. + @return a struct evbuffer_ptr whose 'pos' field has the offset of the + first occurrence of the string in the buffer after 'start'. The 'pos' + field of the result is -1 if the string was not found. + */ +EVENT2_EXPORT_SYMBOL +struct evbuffer_ptr evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end); + +/** + Defines how to adjust an evbuffer_ptr by evbuffer_ptr_set() + + @see evbuffer_ptr_set() */ +enum evbuffer_ptr_how { + /** Sets the pointer to the position; can be called on with an + uninitialized evbuffer_ptr. */ + EVBUFFER_PTR_SET, + /** Advances the pointer by adding to the current position. */ + EVBUFFER_PTR_ADD +}; + +/** + Sets the search pointer in the buffer to position. + + There are two ways to use this function: you can call + evbuffer_ptr_set(buf, &pos, N, EVBUFFER_PTR_SET) + to move 'pos' to a position 'N' bytes after the start of the buffer, or + evbuffer_ptr_set(buf, &pos, N, EVBUFFER_PTR_ADD) + to move 'pos' forward by 'N' bytes. + + If evbuffer_ptr is not initialized, this function can only be called + with EVBUFFER_PTR_SET. + + An evbuffer_ptr can represent any position from the start of the buffer to + a position immediately after the end of the buffer. + + @param buffer the evbuffer to be search + @param ptr a pointer to a struct evbuffer_ptr + @param position the position at which to start the next search + @param how determines how the pointer should be manipulated. + @returns 0 on success or -1 otherwise +*/ +EVENT2_EXPORT_SYMBOL +int +evbuffer_ptr_set(struct evbuffer *buffer, struct evbuffer_ptr *ptr, + size_t position, enum evbuffer_ptr_how how); + +/** + Search for an end-of-line string within an evbuffer. + + @param buffer the evbuffer to be searched + @param start NULL or a pointer to a valid struct evbuffer_ptr to start + searching at. + @param eol_len_out If non-NULL, the pointed-to value will be set to + the length of the end-of-line string. + @param eol_style The kind of EOL to look for; see evbuffer_readln() for + more information + @return a struct evbuffer_ptr whose 'pos' field has the offset of the + first occurrence EOL in the buffer after 'start'. The 'pos' + field of the result is -1 if the string was not found. + */ +EVENT2_EXPORT_SYMBOL +struct evbuffer_ptr evbuffer_search_eol(struct evbuffer *buffer, + struct evbuffer_ptr *start, size_t *eol_len_out, + enum evbuffer_eol_style eol_style); + +/** Function to peek at data inside an evbuffer without removing it or + copying it out. + + Pointers to the data are returned by filling the 'vec_out' array + with pointers to one or more extents of data inside the buffer. + + The total data in the extents that you get back may be more than + you requested (if there is more data last extent than you asked + for), or less (if you do not provide enough evbuffer_iovecs, or if + the buffer does not have as much data as you asked to see). + + @param buffer the evbuffer to peek into, + @param len the number of bytes to try to peek. If len is negative, we + will try to fill as much of vec_out as we can. If len is negative + and vec_out is not provided, we return the number of evbuffer_iovecs + that would be needed to get all the data in the buffer. + @param start_at an evbuffer_ptr indicating the point at which we + should start looking for data. NULL means, "At the start of the + buffer." + @param vec_out an array of evbuffer_iovec + @param n_vec the length of vec_out. If 0, we only count how many + extents would be necessary to point to the requested amount of + data. + @return The number of extents needed. This may be less than n_vec + if we didn't need all the evbuffer_iovecs we were given, or more + than n_vec if we would need more to return all the data that was + requested. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, + struct evbuffer_ptr *start_at, + struct evbuffer_iovec *vec_out, int n_vec); + + +/** Structure passed to an evbuffer_cb_func evbuffer callback + + @see evbuffer_cb_func, evbuffer_add_cb() + */ +struct evbuffer_cb_info { + /** The number of bytes in this evbuffer when callbacks were last + * invoked. */ + size_t orig_size; + /** The number of bytes added since callbacks were last invoked. */ + size_t n_added; + /** The number of bytes removed since callbacks were last invoked. */ + size_t n_deleted; +}; + +/** Type definition for a callback that is invoked whenever data is added or + removed from an evbuffer. + + An evbuffer may have one or more callbacks set at a time. The order + in which they are executed is undefined. + + A callback function may add more callbacks, or remove itself from the + list of callbacks, or add or remove data from the buffer. It may not + remove another callback from the list. + + If a callback adds or removes data from the buffer or from another + buffer, this can cause a recursive invocation of your callback or + other callbacks. If you ask for an infinite loop, you might just get + one: watch out! + + @param buffer the buffer whose size has changed + @param info a structure describing how the buffer changed. + @param arg a pointer to user data +*/ +typedef void (*evbuffer_cb_func)(struct evbuffer *buffer, const struct evbuffer_cb_info *info, void *arg); + +struct evbuffer_cb_entry; +/** Add a new callback to an evbuffer. + + Subsequent calls to evbuffer_add_cb() add new callbacks. To remove this + callback, call evbuffer_remove_cb or evbuffer_remove_cb_entry. + + @param buffer the evbuffer to be monitored + @param cb the callback function to invoke when the evbuffer is modified, + or NULL to remove all callbacks. + @param cbarg an argument to be provided to the callback function + @return a handle to the callback on success, or NULL on failure. + */ +EVENT2_EXPORT_SYMBOL +struct evbuffer_cb_entry *evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg); + +/** Remove a callback from an evbuffer, given a handle returned from + evbuffer_add_cb. + + Calling this function invalidates the handle. + + @return 0 if a callback was removed, or -1 if no matching callback was + found. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_remove_cb_entry(struct evbuffer *buffer, + struct evbuffer_cb_entry *ent); + +/** Remove a callback from an evbuffer, given the function and argument + used to add it. + + @return 0 if a callback was removed, or -1 if no matching callback was + found. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg); + +/** If this flag is not set, then a callback is temporarily disabled, and + * should not be invoked. + * + * @see evbuffer_cb_set_flags(), evbuffer_cb_clear_flags() + */ +#define EVBUFFER_CB_ENABLED 1 + +/** Change the flags that are set for a callback on a buffer by adding more. + + @param buffer the evbuffer that the callback is watching. + @param cb the callback whose status we want to change. + @param flags EVBUFFER_CB_ENABLED to re-enable the callback. + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_cb_set_flags(struct evbuffer *buffer, + struct evbuffer_cb_entry *cb, ev_uint32_t flags); + +/** Change the flags that are set for a callback on a buffer by removing some + + @param buffer the evbuffer that the callback is watching. + @param cb the callback whose status we want to change. + @param flags EVBUFFER_CB_ENABLED to disable the callback. + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_cb_clear_flags(struct evbuffer *buffer, + struct evbuffer_cb_entry *cb, ev_uint32_t flags); + +#if 0 +/** Postpone calling a given callback until unsuspend is called later. + + This is different from disabling the callback, since the callback will get + invoked later if the buffer size changes between now and when we unsuspend + it. + + @param the buffer that the callback is watching. + @param cb the callback we want to suspend. + */ +EVENT2_EXPORT_SYMBOL +void evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb); +/** Stop postponing a callback that we postponed with evbuffer_cb_suspend. + + If data was added to or removed from the buffer while the callback was + suspended, the callback will get called once now. + + @param the buffer that the callback is watching. + @param cb the callback we want to stop suspending. + */ +EVENT2_EXPORT_SYMBOL +void evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb); +#endif + +/** + Makes the data at the beginning of an evbuffer contiguous. + + @param buf the evbuffer to make contiguous + @param size the number of bytes to make contiguous, or -1 to make the + entire buffer contiguous. + @return a pointer to the contiguous memory array, or NULL if param size + requested more data than is present in the buffer. +*/ + +EVENT2_EXPORT_SYMBOL +unsigned char *evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size); + +/** + Prepends data to the beginning of the evbuffer + + @param buf the evbuffer to which to prepend data + @param data a pointer to the memory to prepend + @param size the number of bytes to prepend + @return 0 if successful, or -1 otherwise +*/ + +EVENT2_EXPORT_SYMBOL +int evbuffer_prepend(struct evbuffer *buf, const void *data, size_t size); + +/** + Prepends all data from the src evbuffer to the beginning of the dst + evbuffer. + + @param dst the evbuffer to which to prepend data + @param src the evbuffer to prepend; it will be emptied as a result + @return 0 if successful, or -1 otherwise +*/ +EVENT2_EXPORT_SYMBOL +int evbuffer_prepend_buffer(struct evbuffer *dst, struct evbuffer* src); + +/** + Prevent calls that modify an evbuffer from succeeding. A buffer may + frozen at the front, at the back, or at both the front and the back. + + If the front of a buffer is frozen, operations that drain data from + the front of the buffer, or that prepend data to the buffer, will + fail until it is unfrozen. If the back a buffer is frozen, operations + that append data from the buffer will fail until it is unfrozen. + + @param buf The buffer to freeze + @param at_front If true, we freeze the front of the buffer. If false, + we freeze the back. + @return 0 on success, -1 on failure. +*/ +EVENT2_EXPORT_SYMBOL +int evbuffer_freeze(struct evbuffer *buf, int at_front); +/** + Re-enable calls that modify an evbuffer. + + @param buf The buffer to un-freeze + @param at_front If true, we unfreeze the front of the buffer. If false, + we unfreeze the back. + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_unfreeze(struct evbuffer *buf, int at_front); + +struct event_base; +/** + Force all the callbacks on an evbuffer to be run, not immediately after + the evbuffer is altered, but instead from inside the event loop. + + This can be used to serialize all the callbacks to a single thread + of execution. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base); + +/** + Append data from 1 or more iovec's to an evbuffer + + Calculates the number of bytes needed for an iovec structure and guarantees + all data will fit into a single chain. Can be used in lieu of functionality + which calls evbuffer_add() constantly before being used to increase + performance. + + @param buffer the destination buffer + @param vec the source iovec + @param n_vec the number of iovec structures. + @return the number of bytes successfully written to the output buffer. +*/ +EVENT2_EXPORT_SYMBOL +size_t evbuffer_add_iovec(struct evbuffer * buffer, struct evbuffer_iovec * vec, int n_vec); + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_BUFFER_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/buffer_compat.h b/probe-busybox/libevent-2.1.11-stable/include/event2/buffer_compat.h new file mode 100644 index 00000000..0ce10254 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/buffer_compat.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT2_BUFFER_COMPAT_H_INCLUDED_ +#define EVENT2_BUFFER_COMPAT_H_INCLUDED_ + +#include + +/** @file event2/buffer_compat.h + + Obsolete and deprecated versions of the functions in buffer.h: provided + only for backward compatibility. + */ + + +/** + Obsolete alias for evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY). + + @deprecated This function is deprecated because its behavior is not correct + for almost any protocol, and also because it's wholly subsumed by + evbuffer_readln(). + + @param buffer the evbuffer to read from + @return pointer to a single line, or NULL if an error occurred + +*/ +EVENT2_EXPORT_SYMBOL +char *evbuffer_readline(struct evbuffer *buffer); + +/** Type definition for a callback that is invoked whenever data is added or + removed from an evbuffer. + + An evbuffer may have one or more callbacks set at a time. The order + in which they are executed is undefined. + + A callback function may add more callbacks, or remove itself from the + list of callbacks, or add or remove data from the buffer. It may not + remove another callback from the list. + + If a callback adds or removes data from the buffer or from another + buffer, this can cause a recursive invocation of your callback or + other callbacks. If you ask for an infinite loop, you might just get + one: watch out! + + @param buffer the buffer whose size has changed + @param old_len the previous length of the buffer + @param new_len the current length of the buffer + @param arg a pointer to user data +*/ +typedef void (*evbuffer_cb)(struct evbuffer *buffer, size_t old_len, size_t new_len, void *arg); + +/** + Replace all callbacks on an evbuffer with a single new callback, or + remove them. + + Subsequent calls to evbuffer_setcb() replace callbacks set by previous + calls. Setting the callback to NULL removes any previously set callback. + + @deprecated This function is deprecated because it clears all previous + callbacks set on the evbuffer, which can cause confusing behavior if + multiple parts of the code all want to add their own callbacks on a + buffer. Instead, use evbuffer_add(), evbuffer_del(), and + evbuffer_setflags() to manage your own evbuffer callbacks without + interfering with callbacks set by others. + + @param buffer the evbuffer to be monitored + @param cb the callback function to invoke when the evbuffer is modified, + or NULL to remove all callbacks. + @param cbarg an argument to be provided to the callback function + @return 0 if successful, or -1 on error + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg); + + +/** + Find a string within an evbuffer. + + @param buffer the evbuffer to be searched + @param what the string to be searched for + @param len the length of the search string + @return a pointer to the beginning of the search string, or NULL if the search failed. + */ +EVENT2_EXPORT_SYMBOL +unsigned char *evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len); + +/** deprecated in favor of calling the functions directly */ +#define EVBUFFER_LENGTH(x) evbuffer_get_length(x) +/** deprecated in favor of calling the functions directly */ +#define EVBUFFER_DATA(x) evbuffer_pullup((x), -1) + +#endif + diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent.h b/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent.h new file mode 100644 index 00000000..48cd1535 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent.h @@ -0,0 +1,1024 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_BUFFEREVENT_H_INCLUDED_ +#define EVENT2_BUFFEREVENT_H_INCLUDED_ + +/** + @file event2/bufferevent.h + + Functions for buffering data for network sending or receiving. Bufferevents + are higher level than evbuffers: each has an underlying evbuffer for reading + and one for writing, and callbacks that are invoked under certain + circumstances. + + A bufferevent provides input and output buffers that get filled and + drained automatically. The user of a bufferevent no longer deals + directly with the I/O, but instead is reading from input and writing + to output buffers. + + Once initialized, the bufferevent structure can be used repeatedly + with bufferevent_enable() and bufferevent_disable(). + + When reading is enabled, the bufferevent will try to read from the + file descriptor onto its input buffer, and call the read callback. + When writing is enabled, the bufferevent will try to write data onto its + file descriptor when the output buffer has enough data, and call the write + callback when the output buffer is sufficiently drained. + + Bufferevents come in several flavors, including: + +
+
Socket-based bufferevents
+
A bufferevent that reads and writes data onto a network + socket. Created with bufferevent_socket_new().
+ +
Paired bufferevents
+
A pair of bufferevents that send and receive data to one + another without touching the network. Created with + bufferevent_pair_new().
+ +
Filtering bufferevents
+
A bufferevent that transforms data, and sends or receives it + over another underlying bufferevent. Created with + bufferevent_filter_new().
+ +
SSL-backed bufferevents
+
A bufferevent that uses the openssl library to send and + receive data over an encrypted connection. Created with + bufferevent_openssl_socket_new() or + bufferevent_openssl_filter_new().
+
+ */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +/* For int types. */ +#include + +/** @name Bufferevent event codes + + These flags are passed as arguments to a bufferevent's event callback. + + @{ +*/ +#define BEV_EVENT_READING 0x01 /**< error encountered while reading */ +#define BEV_EVENT_WRITING 0x02 /**< error encountered while writing */ +#define BEV_EVENT_EOF 0x10 /**< eof file reached */ +#define BEV_EVENT_ERROR 0x20 /**< unrecoverable error encountered */ +#define BEV_EVENT_TIMEOUT 0x40 /**< user-specified timeout reached */ +#define BEV_EVENT_CONNECTED 0x80 /**< connect operation finished. */ +/**@}*/ + +/** + An opaque type for handling buffered IO + + @see event2/bufferevent.h + */ +struct bufferevent +#ifdef EVENT_IN_DOXYGEN_ +{} +#endif +; +struct event_base; +struct evbuffer; +struct sockaddr; + +/** + A read or write callback for a bufferevent. + + The read callback is triggered when new data arrives in the input + buffer and the amount of readable data exceed the low watermark + which is 0 by default. + + The write callback is triggered if the write buffer has been + exhausted or fell below its low watermark. + + @param bev the bufferevent that triggered the callback + @param ctx the user-specified context for this bufferevent + */ +typedef void (*bufferevent_data_cb)(struct bufferevent *bev, void *ctx); + +/** + An event/error callback for a bufferevent. + + The event callback is triggered if either an EOF condition or another + unrecoverable error was encountered. + + For bufferevents with deferred callbacks, this is a bitwise OR of all errors + that have happened on the bufferevent since the last callback invocation. + + @param bev the bufferevent for which the error condition was reached + @param what a conjunction of flags: BEV_EVENT_READING or BEV_EVENT_WRITING + to indicate if the error was encountered on the read or write path, + and one of the following flags: BEV_EVENT_EOF, BEV_EVENT_ERROR, + BEV_EVENT_TIMEOUT, BEV_EVENT_CONNECTED. + + @param ctx the user-specified context for this bufferevent +*/ +typedef void (*bufferevent_event_cb)(struct bufferevent *bev, short what, void *ctx); + +/** Options that can be specified when creating a bufferevent */ +enum bufferevent_options { + /** If set, we close the underlying file + * descriptor/bufferevent/whatever when this bufferevent is freed. */ + BEV_OPT_CLOSE_ON_FREE = (1<<0), + + /** If set, and threading is enabled, operations on this bufferevent + * are protected by a lock */ + BEV_OPT_THREADSAFE = (1<<1), + + /** If set, callbacks are run deferred in the event loop. */ + BEV_OPT_DEFER_CALLBACKS = (1<<2), + + /** If set, callbacks are executed without locks being held on the + * bufferevent. This option currently requires that + * BEV_OPT_DEFER_CALLBACKS also be set; a future version of Libevent + * might remove the requirement.*/ + BEV_OPT_UNLOCK_CALLBACKS = (1<<3) +}; + +/** + Create a new socket bufferevent over an existing socket. + + @param base the event base to associate with the new bufferevent. + @param fd the file descriptor from which data is read and written to. + This file descriptor is not allowed to be a pipe(2). + It is safe to set the fd to -1, so long as you later + set it with bufferevent_setfd or bufferevent_socket_connect(). + @param options Zero or more BEV_OPT_* flags + @return a pointer to a newly allocated bufferevent struct, or NULL if an + error occurred + @see bufferevent_free() + */ +EVENT2_EXPORT_SYMBOL +struct bufferevent *bufferevent_socket_new(struct event_base *base, evutil_socket_t fd, int options); + +/** + Launch a connect() attempt with a socket-based bufferevent. + + When the connect succeeds, the eventcb will be invoked with + BEV_EVENT_CONNECTED set. + + If the bufferevent does not already have a socket set, we allocate a new + socket here and make it nonblocking before we begin. + + If no address is provided, we assume that the socket is already connecting, + and configure the bufferevent so that a BEV_EVENT_CONNECTED event will be + yielded when it is done connecting. + + @param bufev an existing bufferevent allocated with + bufferevent_socket_new(). + @param addr the address we should connect to + @param socklen The length of the address + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_socket_connect(struct bufferevent *, const struct sockaddr *, int); + +struct evdns_base; +/** + Resolve the hostname 'hostname' and connect to it as with + bufferevent_socket_connect(). + + @param bufev An existing bufferevent allocated with bufferevent_socket_new() + @param evdns_base Optionally, an evdns_base to use for resolving hostnames + asynchronously. May be set to NULL for a blocking resolve. + @param family A preferred address family to resolve addresses to, or + AF_UNSPEC for no preference. Only AF_INET, AF_INET6, and AF_UNSPEC are + supported. + @param hostname The hostname to resolve; see below for notes on recognized + formats + @param port The port to connect to on the resolved address. + @return 0 if successful, -1 on failure. + + Recognized hostname formats are: + + www.example.com (hostname) + 1.2.3.4 (ipv4address) + ::1 (ipv6address) + [::1] ([ipv6address]) + + Performance note: If you do not provide an evdns_base, this function + may block while it waits for a DNS response. This is probably not + what you want. + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_socket_connect_hostname(struct bufferevent *, + struct evdns_base *, int, const char *, int); + +/** + Return the error code for the last failed DNS lookup attempt made by + bufferevent_socket_connect_hostname(). + + @param bev The bufferevent object. + @return DNS error code. + @see evutil_gai_strerror() +*/ +EVENT2_EXPORT_SYMBOL +int bufferevent_socket_get_dns_error(struct bufferevent *bev); + +/** + Assign a bufferevent to a specific event_base. + + NOTE that only socket bufferevents support this function. + + @param base an event_base returned by event_init() + @param bufev a bufferevent struct returned by bufferevent_new() + or bufferevent_socket_new() + @return 0 if successful, or -1 if an error occurred + @see bufferevent_new() + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_base_set(struct event_base *base, struct bufferevent *bufev); + +/** + Return the event_base used by a bufferevent +*/ +EVENT2_EXPORT_SYMBOL +struct event_base *bufferevent_get_base(struct bufferevent *bev); + +/** + Assign a priority to a bufferevent. + + Only supported for socket bufferevents. + + @param bufev a bufferevent struct + @param pri the priority to be assigned + @return 0 if successful, or -1 if an error occurred + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_priority_set(struct bufferevent *bufev, int pri); + +/** + Return the priority of a bufferevent. + + Only supported for socket bufferevents + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_get_priority(const struct bufferevent *bufev); + +/** + Deallocate the storage associated with a bufferevent structure. + + If there is pending data to write on the bufferevent, it probably won't be + flushed before the bufferevent is freed. + + @param bufev the bufferevent structure to be freed. + */ +EVENT2_EXPORT_SYMBOL +void bufferevent_free(struct bufferevent *bufev); + + +/** + Changes the callbacks for a bufferevent. + + @param bufev the bufferevent object for which to change callbacks + @param readcb callback to invoke when there is data to be read, or NULL if + no callback is desired + @param writecb callback to invoke when the file descriptor is ready for + writing, or NULL if no callback is desired + @param eventcb callback to invoke when there is an event on the file + descriptor + @param cbarg an argument that will be supplied to each of the callbacks + (readcb, writecb, and errorcb) + @see bufferevent_new() + */ +EVENT2_EXPORT_SYMBOL +void bufferevent_setcb(struct bufferevent *bufev, + bufferevent_data_cb readcb, bufferevent_data_cb writecb, + bufferevent_event_cb eventcb, void *cbarg); + +/** + Retrieves the callbacks for a bufferevent. + + @param bufev the bufferevent to examine. + @param readcb_ptr if readcb_ptr is nonnull, *readcb_ptr is set to the current + read callback for the bufferevent. + @param writecb_ptr if writecb_ptr is nonnull, *writecb_ptr is set to the + current write callback for the bufferevent. + @param eventcb_ptr if eventcb_ptr is nonnull, *eventcb_ptr is set to the + current event callback for the bufferevent. + @param cbarg_ptr if cbarg_ptr is nonnull, *cbarg_ptr is set to the current + callback argument for the bufferevent. + @see buffervent_setcb() +*/ +EVENT2_EXPORT_SYMBOL +void bufferevent_getcb(struct bufferevent *bufev, + bufferevent_data_cb *readcb_ptr, + bufferevent_data_cb *writecb_ptr, + bufferevent_event_cb *eventcb_ptr, + void **cbarg_ptr); + +/** + Changes the file descriptor on which the bufferevent operates. + Not supported for all bufferevent types. + + @param bufev the bufferevent object for which to change the file descriptor + @param fd the file descriptor to operate on +*/ +EVENT2_EXPORT_SYMBOL +int bufferevent_setfd(struct bufferevent *bufev, evutil_socket_t fd); + +/** + Returns the file descriptor associated with a bufferevent, or -1 if + no file descriptor is associated with the bufferevent. + */ +EVENT2_EXPORT_SYMBOL +evutil_socket_t bufferevent_getfd(struct bufferevent *bufev); + +/** + Returns the underlying bufferevent associated with a bufferevent (if + the bufferevent is a wrapper), or NULL if there is no underlying bufferevent. + */ +EVENT2_EXPORT_SYMBOL +struct bufferevent *bufferevent_get_underlying(struct bufferevent *bufev); + +/** + Write data to a bufferevent buffer. + + The bufferevent_write() function can be used to write data to the file + descriptor. The data is appended to the output buffer and written to the + descriptor automatically as it becomes available for writing. + + @param bufev the bufferevent to be written to + @param data a pointer to the data to be written + @param size the length of the data, in bytes + @return 0 if successful, or -1 if an error occurred + @see bufferevent_write_buffer() + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_write(struct bufferevent *bufev, + const void *data, size_t size); + + +/** + Write data from an evbuffer to a bufferevent buffer. The evbuffer is + being drained as a result. + + @param bufev the bufferevent to be written to + @param buf the evbuffer to be written + @return 0 if successful, or -1 if an error occurred + @see bufferevent_write() + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf); + + +/** + Read data from a bufferevent buffer. + + The bufferevent_read() function is used to read data from the input buffer. + + @param bufev the bufferevent to be read from + @param data pointer to a buffer that will store the data + @param size the size of the data buffer, in bytes + @return the amount of data read, in bytes. + */ +EVENT2_EXPORT_SYMBOL +size_t bufferevent_read(struct bufferevent *bufev, void *data, size_t size); + +/** + Read data from a bufferevent buffer into an evbuffer. This avoids + memory copies. + + @param bufev the bufferevent to be read from + @param buf the evbuffer to which to add data + @return 0 if successful, or -1 if an error occurred. + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf); + +/** + Returns the input buffer. + + The user MUST NOT set the callback on this buffer. + + @param bufev the bufferevent from which to get the evbuffer + @return the evbuffer object for the input buffer + */ + +EVENT2_EXPORT_SYMBOL +struct evbuffer *bufferevent_get_input(struct bufferevent *bufev); + +/** + Returns the output buffer. + + The user MUST NOT set the callback on this buffer. + + When filters are being used, the filters need to be manually + triggered if the output buffer was manipulated. + + @param bufev the bufferevent from which to get the evbuffer + @return the evbuffer object for the output buffer + */ + +EVENT2_EXPORT_SYMBOL +struct evbuffer *bufferevent_get_output(struct bufferevent *bufev); + +/** + Enable a bufferevent. + + @param bufev the bufferevent to be enabled + @param event any combination of EV_READ | EV_WRITE. + @return 0 if successful, or -1 if an error occurred + @see bufferevent_disable() + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_enable(struct bufferevent *bufev, short event); + +/** + Disable a bufferevent. + + @param bufev the bufferevent to be disabled + @param event any combination of EV_READ | EV_WRITE. + @return 0 if successful, or -1 if an error occurred + @see bufferevent_enable() + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_disable(struct bufferevent *bufev, short event); + +/** + Return the events that are enabled on a given bufferevent. + + @param bufev the bufferevent to inspect + @return A combination of EV_READ | EV_WRITE + */ +EVENT2_EXPORT_SYMBOL +short bufferevent_get_enabled(struct bufferevent *bufev); + +/** + Set the read and write timeout for a bufferevent. + + A bufferevent's timeout will fire the first time that the indicated + amount of time has elapsed since a successful read or write operation, + during which the bufferevent was trying to read or write. + + (In other words, if reading or writing is disabled, or if the + bufferevent's read or write operation has been suspended because + there's no data to write, or not enough bandwidth, or so on, the + timeout isn't active. The timeout only becomes active when we we're + willing to actually read or write.) + + Calling bufferevent_enable or setting a timeout for a bufferevent + whose timeout is already pending resets its timeout. + + If the timeout elapses, the corresponding operation (EV_READ or + EV_WRITE) becomes disabled until you re-enable it again. The + bufferevent's event callback is called with the + BEV_EVENT_TIMEOUT|BEV_EVENT_READING or + BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING. + + @param bufev the bufferevent to be modified + @param timeout_read the read timeout, or NULL + @param timeout_write the write timeout, or NULL + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_set_timeouts(struct bufferevent *bufev, + const struct timeval *timeout_read, const struct timeval *timeout_write); + +/** + Sets the watermarks for read and write events. + + On input, a bufferevent does not invoke the user read callback unless + there is at least low watermark data in the buffer. If the read buffer + is beyond the high watermark, the bufferevent stops reading from the network. + But be aware that bufferevent input/read buffer can overrun high watermark + limit (typical example is openssl bufferevent), so you should not relay in + this. + + On output, the user write callback is invoked whenever the buffered data + falls below the low watermark. Filters that write to this bufev will try + not to write more bytes to this buffer than the high watermark would allow, + except when flushing. + + @param bufev the bufferevent to be modified + @param events EV_READ, EV_WRITE or both + @param lowmark the lower watermark to set + @param highmark the high watermark to set +*/ + +EVENT2_EXPORT_SYMBOL +void bufferevent_setwatermark(struct bufferevent *bufev, short events, + size_t lowmark, size_t highmark); + +/** + Retrieves the watermarks for read or write events. + Returns non-zero if events contains not only EV_READ or EV_WRITE. + Returns zero if events equal EV_READ or EV_WRITE + + @param bufev the bufferevent to be examined + @param events EV_READ or EV_WRITE + @param lowmark receives the lower watermark if not NULL + @param highmark receives the high watermark if not NULL +*/ +EVENT2_EXPORT_SYMBOL +int bufferevent_getwatermark(struct bufferevent *bufev, short events, + size_t *lowmark, size_t *highmark); + +/** + Acquire the lock on a bufferevent. Has no effect if locking was not + enabled with BEV_OPT_THREADSAFE. + */ +EVENT2_EXPORT_SYMBOL +void bufferevent_lock(struct bufferevent *bufev); + +/** + Release the lock on a bufferevent. Has no effect if locking was not + enabled with BEV_OPT_THREADSAFE. + */ +EVENT2_EXPORT_SYMBOL +void bufferevent_unlock(struct bufferevent *bufev); + + +/** + * Public interface to manually increase the reference count of a bufferevent + * this is useful in situations where a user may reference the bufferevent + * somewhere else (unknown to libevent) + * + * @param bufev the bufferevent to increase the refcount on + * + */ +EVENT2_EXPORT_SYMBOL +void bufferevent_incref(struct bufferevent *bufev); + +/** + * Public interface to manually decrement the reference count of a bufferevent + * + * Warning: make sure you know what you're doing. This is mainly used in + * conjunction with bufferevent_incref(). This will free up all data associated + * with a bufferevent if the reference count hits 0. + * + * @param bufev the bufferevent to decrement the refcount on + * + * @return 1 if the bufferevent was freed, otherwise 0 (still referenced) + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_decref(struct bufferevent *bufev); + +/** + Flags that can be passed into filters to let them know how to + deal with the incoming data. +*/ +enum bufferevent_flush_mode { + /** usually set when processing data */ + BEV_NORMAL = 0, + + /** want to checkpoint all data sent. */ + BEV_FLUSH = 1, + + /** encountered EOF on read or done sending data */ + BEV_FINISHED = 2 +}; + +/** + Triggers the bufferevent to produce more data if possible. + + @param bufev the bufferevent object + @param iotype either EV_READ or EV_WRITE or both. + @param mode either BEV_NORMAL or BEV_FLUSH or BEV_FINISHED + @return -1 on failure, 0 if no data was produces, 1 if data was produced + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_flush(struct bufferevent *bufev, + short iotype, + enum bufferevent_flush_mode mode); + +/** + Flags for bufferevent_trigger(_event) that modify when and how to trigger + the callback. +*/ +enum bufferevent_trigger_options { + /** trigger the callback regardless of the watermarks */ + BEV_TRIG_IGNORE_WATERMARKS = (1<<16), + + /** defer even if the callbacks are not */ + BEV_TRIG_DEFER_CALLBACKS = BEV_OPT_DEFER_CALLBACKS + + /* (Note: for internal reasons, these need to be disjoint from + * bufferevent_options, except when they mean the same thing. */ +}; + +/** + Triggers bufferevent data callbacks. + + The function will honor watermarks unless options contain + BEV_TRIG_IGNORE_WATERMARKS. If the options contain BEV_OPT_DEFER_CALLBACKS, + the callbacks are deferred. + + @param bufev the bufferevent object + @param iotype either EV_READ or EV_WRITE or both. + @param options + */ +EVENT2_EXPORT_SYMBOL +void bufferevent_trigger(struct bufferevent *bufev, short iotype, + int options); + +/** + Triggers the bufferevent event callback. + + If the options contain BEV_OPT_DEFER_CALLBACKS, the callbacks are deferred. + + @param bufev the bufferevent object + @param what the flags to pass onto the event callback + @param options + */ +EVENT2_EXPORT_SYMBOL +void bufferevent_trigger_event(struct bufferevent *bufev, short what, + int options); + +/** + @name Filtering support + + @{ +*/ +/** + Values that filters can return. + */ +enum bufferevent_filter_result { + /** everything is okay */ + BEV_OK = 0, + + /** the filter needs to read more data before output */ + BEV_NEED_MORE = 1, + + /** the filter encountered a critical error, no further data + can be processed. */ + BEV_ERROR = 2 +}; + +/** A callback function to implement a filter for a bufferevent. + + @param src An evbuffer to drain data from. + @param dst An evbuffer to add data to. + @param limit A suggested upper bound of bytes to write to dst. + The filter may ignore this value, but doing so means that + it will overflow the high-water mark associated with dst. + -1 means "no limit". + @param mode Whether we should write data as may be convenient + (BEV_NORMAL), or flush as much data as we can (BEV_FLUSH), + or flush as much as we can, possibly including an end-of-stream + marker (BEV_FINISH). + @param ctx A user-supplied pointer. + + @return BEV_OK if we wrote some data; BEV_NEED_MORE if we can't + produce any more output until we get some input; and BEV_ERROR + on an error. + */ +typedef enum bufferevent_filter_result (*bufferevent_filter_cb)( + struct evbuffer *src, struct evbuffer *dst, ev_ssize_t dst_limit, + enum bufferevent_flush_mode mode, void *ctx); + +/** + Allocate a new filtering bufferevent on top of an existing bufferevent. + + @param underlying the underlying bufferevent. + @param input_filter The filter to apply to data we read from the underlying + bufferevent + @param output_filter The filer to apply to data we write to the underlying + bufferevent + @param options A bitfield of bufferevent options. + @param free_context A function to use to free the filter context when + this bufferevent is freed. + @param ctx A context pointer to pass to the filter functions. + */ +EVENT2_EXPORT_SYMBOL +struct bufferevent * +bufferevent_filter_new(struct bufferevent *underlying, + bufferevent_filter_cb input_filter, + bufferevent_filter_cb output_filter, + int options, + void (*free_context)(void *), + void *ctx); +/**@}*/ + +/** + Allocate a pair of linked bufferevents. The bufferevents behave as would + two bufferevent_sock instances connected to opposite ends of a + socketpair(), except that no internal socketpair is allocated. + + @param base The event base to associate with the socketpair. + @param options A set of options for this bufferevent + @param pair A pointer to an array to hold the two new bufferevent objects. + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_pair_new(struct event_base *base, int options, + struct bufferevent *pair[2]); + +/** + Given one bufferevent returned by bufferevent_pair_new(), returns the + other one if it still exists. Otherwise returns NULL. + */ +EVENT2_EXPORT_SYMBOL +struct bufferevent *bufferevent_pair_get_partner(struct bufferevent *bev); + +/** + Abstract type used to configure rate-limiting on a bufferevent or a group + of bufferevents. + */ +struct ev_token_bucket_cfg; + +/** + A group of bufferevents which are configured to respect the same rate + limit. +*/ +struct bufferevent_rate_limit_group; + +/** Maximum configurable rate- or burst-limit. */ +#define EV_RATE_LIMIT_MAX EV_SSIZE_MAX + +/** + Initialize and return a new object to configure the rate-limiting behavior + of bufferevents. + + @param read_rate The maximum number of bytes to read per tick on + average. + @param read_burst The maximum number of bytes to read in any single tick. + @param write_rate The maximum number of bytes to write per tick on + average. + @param write_burst The maximum number of bytes to write in any single tick. + @param tick_len The length of a single tick. Defaults to one second. + Any fractions of a millisecond are ignored. + + Note that all rate-limits hare are currently best-effort: future versions + of Libevent may implement them more tightly. + */ +EVENT2_EXPORT_SYMBOL +struct ev_token_bucket_cfg *ev_token_bucket_cfg_new( + size_t read_rate, size_t read_burst, + size_t write_rate, size_t write_burst, + const struct timeval *tick_len); + +/** Free all storage held in 'cfg'. + + Note: 'cfg' is not currently reference-counted; it is not safe to free it + until no bufferevent is using it. + */ +EVENT2_EXPORT_SYMBOL +void ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg); + +/** + Set the rate-limit of a the bufferevent 'bev' to the one specified in + 'cfg'. If 'cfg' is NULL, disable any per-bufferevent rate-limiting on + 'bev'. + + Note that only some bufferevent types currently respect rate-limiting. + They are: socket-based bufferevents (normal and IOCP-based), and SSL-based + bufferevents. + + Return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_set_rate_limit(struct bufferevent *bev, + struct ev_token_bucket_cfg *cfg); + +/** + Create a new rate-limit group for bufferevents. A rate-limit group + constrains the maximum number of bytes sent and received, in toto, + by all of its bufferevents. + + @param base An event_base to run any necessary timeouts for the group. + Note that all bufferevents in the group do not necessarily need to share + this event_base. + @param cfg The rate-limit for this group. + + Note that all rate-limits hare are currently best-effort: future versions + of Libevent may implement them more tightly. + + Note also that only some bufferevent types currently respect rate-limiting. + They are: socket-based bufferevents (normal and IOCP-based), and SSL-based + bufferevents. + */ +EVENT2_EXPORT_SYMBOL +struct bufferevent_rate_limit_group *bufferevent_rate_limit_group_new( + struct event_base *base, + const struct ev_token_bucket_cfg *cfg); +/** + Change the rate-limiting settings for a given rate-limiting group. + + Return 0 on success, -1 on failure. +*/ +EVENT2_EXPORT_SYMBOL +int bufferevent_rate_limit_group_set_cfg( + struct bufferevent_rate_limit_group *, + const struct ev_token_bucket_cfg *); + +/** + Change the smallest quantum we're willing to allocate to any single + bufferevent in a group for reading or writing at a time. + + The rationale is that, because of TCP/IP protocol overheads and kernel + behavior, if a rate-limiting group is so tight on bandwidth that you're + only willing to send 1 byte per tick per bufferevent, you might instead + want to batch up the reads and writes so that you send N bytes per + 1/N of the bufferevents (chosen at random) each tick, so you still wind + up send 1 byte per tick per bufferevent on average, but you don't send + so many tiny packets. + + The default min-share is currently 64 bytes. + + Returns 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_rate_limit_group_set_min_share( + struct bufferevent_rate_limit_group *, size_t); + +/** + Free a rate-limiting group. The group must have no members when + this function is called. +*/ +EVENT2_EXPORT_SYMBOL +void bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group *); + +/** + Add 'bev' to the list of bufferevents whose aggregate reading and writing + is restricted by 'g'. If 'g' is NULL, remove 'bev' from its current group. + + A bufferevent may belong to no more than one rate-limit group at a time. + If 'bev' is already a member of a group, it will be removed from its old + group before being added to 'g'. + + Return 0 on success and -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_add_to_rate_limit_group(struct bufferevent *bev, + struct bufferevent_rate_limit_group *g); + +/** Remove 'bev' from its current rate-limit group (if any). */ +EVENT2_EXPORT_SYMBOL +int bufferevent_remove_from_rate_limit_group(struct bufferevent *bev); + +/** + Set the size limit for single read operation. + + Set to 0 for a reasonable default. + + Return 0 on success and -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_set_max_single_read(struct bufferevent *bev, size_t size); + +/** + Set the size limit for single write operation. + + Set to 0 for a reasonable default. + + Return 0 on success and -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_set_max_single_write(struct bufferevent *bev, size_t size); + +/** Get the current size limit for single read operation. */ +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_get_max_single_read(struct bufferevent *bev); + +/** Get the current size limit for single write operation. */ +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_get_max_single_write(struct bufferevent *bev); + +/** + @name Rate limit inspection + + Return the current read or write bucket size for a bufferevent. + If it is not configured with a per-bufferevent ratelimit, return + EV_SSIZE_MAX. This function does not inspect the group limit, if any. + Note that it can return a negative value if the bufferevent has been + made to read or write more than its limit. + + @{ + */ +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_get_read_limit(struct bufferevent *bev); +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_get_write_limit(struct bufferevent *bev); +/*@}*/ + +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_get_max_to_read(struct bufferevent *bev); +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_get_max_to_write(struct bufferevent *bev); + +EVENT2_EXPORT_SYMBOL +const struct ev_token_bucket_cfg *bufferevent_get_token_bucket_cfg(const struct bufferevent * bev); + +/** + @name Group Rate limit inspection + + Return the read or write bucket size for a bufferevent rate limit + group. Note that it can return a negative value if bufferevents in + the group have been made to read or write more than their limits. + + @{ + */ +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_rate_limit_group_get_read_limit( + struct bufferevent_rate_limit_group *); +EVENT2_EXPORT_SYMBOL +ev_ssize_t bufferevent_rate_limit_group_get_write_limit( + struct bufferevent_rate_limit_group *); +/*@}*/ + +/** + @name Rate limit manipulation + + Subtract a number of bytes from a bufferevent's read or write bucket. + The decrement value can be negative, if you want to manually refill + the bucket. If the change puts the bucket above or below zero, the + bufferevent will resume or suspend reading writing as appropriate. + These functions make no change in the buckets for the bufferevent's + group, if any. + + Returns 0 on success, -1 on internal error. + + @{ + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_decrement_read_limit(struct bufferevent *bev, ev_ssize_t decr); +EVENT2_EXPORT_SYMBOL +int bufferevent_decrement_write_limit(struct bufferevent *bev, ev_ssize_t decr); +/*@}*/ + +/** + @name Group rate limit manipulation + + Subtract a number of bytes from a bufferevent rate-limiting group's + read or write bucket. The decrement value can be negative, if you + want to manually refill the bucket. If the change puts the bucket + above or below zero, the bufferevents in the group will resume or + suspend reading writing as appropriate. + + Returns 0 on success, -1 on internal error. + + @{ + */ +EVENT2_EXPORT_SYMBOL +int bufferevent_rate_limit_group_decrement_read( + struct bufferevent_rate_limit_group *, ev_ssize_t); +EVENT2_EXPORT_SYMBOL +int bufferevent_rate_limit_group_decrement_write( + struct bufferevent_rate_limit_group *, ev_ssize_t); +/*@}*/ + + +/** + * Inspect the total bytes read/written on a group. + * + * Set the variable pointed to by total_read_out to the total number of bytes + * ever read on grp, and the variable pointed to by total_written_out to the + * total number of bytes ever written on grp. */ +EVENT2_EXPORT_SYMBOL +void bufferevent_rate_limit_group_get_totals( + struct bufferevent_rate_limit_group *grp, + ev_uint64_t *total_read_out, ev_uint64_t *total_written_out); + +/** + * Reset the total bytes read/written on a group. + * + * Reset the number of bytes read or written on grp as given by + * bufferevent_rate_limit_group_reset_totals(). */ +EVENT2_EXPORT_SYMBOL +void +bufferevent_rate_limit_group_reset_totals( + struct bufferevent_rate_limit_group *grp); + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_BUFFEREVENT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent_compat.h b/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent_compat.h new file mode 100644 index 00000000..a5a3c720 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent_compat.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2007-2012 Niels Provos, Nick Mathewson + * Copyright (c) 2000-2007 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_BUFFEREVENT_COMPAT_H_INCLUDED_ +#define EVENT2_BUFFEREVENT_COMPAT_H_INCLUDED_ + +#include + +#define evbuffercb bufferevent_data_cb +#define everrorcb bufferevent_event_cb + +/** + Create a new bufferevent for an fd. + + This function is deprecated. Use bufferevent_socket_new and + bufferevent_set_callbacks instead. + + Libevent provides an abstraction on top of the regular event callbacks. + This abstraction is called a buffered event. A buffered event provides + input and output buffers that get filled and drained automatically. The + user of a buffered event no longer deals directly with the I/O, but + instead is reading from input and writing to output buffers. + + Once initialized, the bufferevent structure can be used repeatedly with + bufferevent_enable() and bufferevent_disable(). + + When read enabled the bufferevent will try to read from the file descriptor + and call the read callback. The write callback is executed whenever the + output buffer is drained below the write low watermark, which is 0 by + default. + + If multiple bases are in use, bufferevent_base_set() must be called before + enabling the bufferevent for the first time. + + @deprecated This function is deprecated because it uses the current + event base, and as such can be error prone for multithreaded programs. + Use bufferevent_socket_new() instead. + + @param fd the file descriptor from which data is read and written to. + This file descriptor is not allowed to be a pipe(2). + @param readcb callback to invoke when there is data to be read, or NULL if + no callback is desired + @param writecb callback to invoke when the file descriptor is ready for + writing, or NULL if no callback is desired + @param errorcb callback to invoke when there is an error on the file + descriptor + @param cbarg an argument that will be supplied to each of the callbacks + (readcb, writecb, and errorcb) + @return a pointer to a newly allocated bufferevent struct, or NULL if an + error occurred + @see bufferevent_base_set(), bufferevent_free() + */ +EVENT2_EXPORT_SYMBOL +struct bufferevent *bufferevent_new(evutil_socket_t fd, + evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg); + + +/** + Set the read and write timeout for a buffered event. + + @param bufev the bufferevent to be modified + @param timeout_read the read timeout + @param timeout_write the write timeout + */ +EVENT2_EXPORT_SYMBOL +void bufferevent_settimeout(struct bufferevent *bufev, + int timeout_read, int timeout_write); + +#define EVBUFFER_READ BEV_EVENT_READING +#define EVBUFFER_WRITE BEV_EVENT_WRITING +#define EVBUFFER_EOF BEV_EVENT_EOF +#define EVBUFFER_ERROR BEV_EVENT_ERROR +#define EVBUFFER_TIMEOUT BEV_EVENT_TIMEOUT + +/** macro for getting access to the input buffer of a bufferevent */ +#define EVBUFFER_INPUT(x) bufferevent_get_input(x) +/** macro for getting access to the output buffer of a bufferevent */ +#define EVBUFFER_OUTPUT(x) bufferevent_get_output(x) + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent_ssl.h b/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent_ssl.h new file mode 100644 index 00000000..bf39b844 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent_ssl.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_BUFFEREVENT_SSL_H_INCLUDED_ +#define EVENT2_BUFFEREVENT_SSL_H_INCLUDED_ + +/** @file event2/bufferevent_ssl.h + + OpenSSL support for bufferevents. + */ +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is what openssl's SSL objects are underneath. */ +struct ssl_st; + +/** + The state of an SSL object to be used when creating a new + SSL bufferevent. + */ +enum bufferevent_ssl_state { + BUFFEREVENT_SSL_OPEN = 0, + BUFFEREVENT_SSL_CONNECTING = 1, + BUFFEREVENT_SSL_ACCEPTING = 2 +}; + +#if defined(EVENT__HAVE_OPENSSL) || defined(EVENT_IN_DOXYGEN_) +/** + Create a new SSL bufferevent to send its data over another bufferevent. + + @param base An event_base to use to detect reading and writing. It + must also be the base for the underlying bufferevent. + @param underlying A socket to use for this SSL + @param ssl A SSL* object from openssl. + @param state The current state of the SSL connection + @param options One or more bufferevent_options + @return A new bufferevent on success, or NULL on failure +*/ +EVENT2_EXPORT_SYMBOL +struct bufferevent * +bufferevent_openssl_filter_new(struct event_base *base, + struct bufferevent *underlying, + struct ssl_st *ssl, + enum bufferevent_ssl_state state, + int options); + +/** + Create a new SSL bufferevent to send its data over an SSL * on a socket. + + @param base An event_base to use to detect reading and writing + @param fd A socket to use for this SSL + @param ssl A SSL* object from openssl. + @param state The current state of the SSL connection + @param options One or more bufferevent_options + @return A new bufferevent on success, or NULL on failure. +*/ +EVENT2_EXPORT_SYMBOL +struct bufferevent * +bufferevent_openssl_socket_new(struct event_base *base, + evutil_socket_t fd, + struct ssl_st *ssl, + enum bufferevent_ssl_state state, + int options); + +/** Control how to report dirty SSL shutdowns. + + If the peer (or the network, or an attacker) closes the TCP + connection before closing the SSL channel, and the protocol is SSL >= v3, + this is a "dirty" shutdown. If allow_dirty_shutdown is 0 (default), + this is reported as BEV_EVENT_ERROR. + + If instead allow_dirty_shutdown=1, a dirty shutdown is reported as + BEV_EVENT_EOF. + + (Note that if the protocol is < SSLv3, you will always receive + BEV_EVENT_EOF, since SSL 2 and earlier cannot distinguish a secure + connection close from a dirty one. This is one reason (among many) + not to use SSL 2.) +*/ + +EVENT2_EXPORT_SYMBOL +int bufferevent_openssl_get_allow_dirty_shutdown(struct bufferevent *bev); +EVENT2_EXPORT_SYMBOL +void bufferevent_openssl_set_allow_dirty_shutdown(struct bufferevent *bev, + int allow_dirty_shutdown); + +/** Return the underlying openssl SSL * object for an SSL bufferevent. */ +EVENT2_EXPORT_SYMBOL +struct ssl_st * +bufferevent_openssl_get_ssl(struct bufferevent *bufev); + +/** Tells a bufferevent to begin SSL renegotiation. */ +EVENT2_EXPORT_SYMBOL +int bufferevent_ssl_renegotiate(struct bufferevent *bev); + +/** Return the most recent OpenSSL error reported on an SSL bufferevent. */ +EVENT2_EXPORT_SYMBOL +unsigned long bufferevent_get_openssl_error(struct bufferevent *bev); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_BUFFEREVENT_SSL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent_struct.h b/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent_struct.h new file mode 100644 index 00000000..e84c082c --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/bufferevent_struct.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_BUFFEREVENT_STRUCT_H_INCLUDED_ +#define EVENT2_BUFFEREVENT_STRUCT_H_INCLUDED_ + +/** @file event2/bufferevent_struct.h + + Data structures for bufferevents. Using these structures may hurt forward + compatibility with later versions of Libevent: be careful! + + @deprecated Use of bufferevent_struct.h is completely deprecated; these + structures are only exposed for backward compatibility with programs + written before Libevent 2.0 that used them. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +/* For int types. */ +#include +/* For struct event */ +#include + +struct event_watermark { + size_t low; + size_t high; +}; + +/** + Shared implementation of a bufferevent. + + This type is exposed only because it was exposed in previous versions, + and some people's code may rely on manipulating it. Otherwise, you + should really not rely on the layout, size, or contents of this structure: + it is fairly volatile, and WILL change in future versions of the code. +**/ +struct bufferevent { + /** Event base for which this bufferevent was created. */ + struct event_base *ev_base; + /** Pointer to a table of function pointers to set up how this + bufferevent behaves. */ + const struct bufferevent_ops *be_ops; + + /** A read event that triggers when a timeout has happened or a socket + is ready to read data. Only used by some subtypes of + bufferevent. */ + struct event ev_read; + /** A write event that triggers when a timeout has happened or a socket + is ready to write data. Only used by some subtypes of + bufferevent. */ + struct event ev_write; + + /** An input buffer. Only the bufferevent is allowed to add data to + this buffer, though the user is allowed to drain it. */ + struct evbuffer *input; + + /** An input buffer. Only the bufferevent is allowed to drain data + from this buffer, though the user is allowed to add it. */ + struct evbuffer *output; + + struct event_watermark wm_read; + struct event_watermark wm_write; + + bufferevent_data_cb readcb; + bufferevent_data_cb writecb; + /* This should be called 'eventcb', but renaming it would break + * backward compatibility */ + bufferevent_event_cb errorcb; + void *cbarg; + + struct timeval timeout_read; + struct timeval timeout_write; + + /** Events that are currently enabled: currently EV_READ and EV_WRITE + are supported. */ + short enabled; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_BUFFEREVENT_STRUCT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/dns.h b/probe-busybox/libevent-2.1.11-stable/include/event2/dns.h new file mode 100644 index 00000000..1895a7d0 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/dns.h @@ -0,0 +1,759 @@ +/* + * Copyright (c) 2006-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * The original DNS code is due to Adam Langley with heavy + * modifications by Nick Mathewson. Adam put his DNS software in the + * public domain. You can find his original copyright below. Please, + * aware that the code as part of Libevent is governed by the 3-clause + * BSD license above. + * + * This software is Public Domain. To view a copy of the public domain dedication, + * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to + * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA. + * + * I ask and expect, but do not require, that all derivative works contain an + * attribution similar to: + * Parts developed by Adam Langley + * + * You may wish to replace the word "Parts" with something else depending on + * the amount of original code. + * + * (Derivative works does not include programs which link against, run or include + * the source verbatim in their source distributions) + */ + +/** @file event2/dns.h + * + * Welcome, gentle reader + * + * Async DNS lookups are really a whole lot harder than they should be, + * mostly stemming from the fact that the libc resolver has never been + * very good at them. Before you use this library you should see if libc + * can do the job for you with the modern async call getaddrinfo_a + * (see http://www.imperialviolet.org/page25.html#e498). Otherwise, + * please continue. + * + * The library keeps track of the state of nameservers and will avoid + * them when they go down. Otherwise it will round robin between them. + * + * Quick start guide: + * #include "evdns.h" + * void callback(int result, char type, int count, int ttl, + * void *addresses, void *arg); + * evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf"); + * evdns_resolve("www.hostname.com", 0, callback, NULL); + * + * When the lookup is complete the callback function is called. The + * first argument will be one of the DNS_ERR_* defines in evdns.h. + * Hopefully it will be DNS_ERR_NONE, in which case type will be + * DNS_IPv4_A, count will be the number of IP addresses, ttl is the time + * which the data can be cached for (in seconds), addresses will point + * to an array of uint32_t's and arg will be whatever you passed to + * evdns_resolve. + * + * Searching: + * + * In order for this library to be a good replacement for glibc's resolver it + * supports searching. This involves setting a list of default domains, in + * which names will be queried for. The number of dots in the query name + * determines the order in which this list is used. + * + * Searching appears to be a single lookup from the point of view of the API, + * although many DNS queries may be generated from a single call to + * evdns_resolve. Searching can also drastically slow down the resolution + * of names. + * + * To disable searching: + * 1. Never set it up. If you never call evdns_resolv_conf_parse or + * evdns_search_add then no searching will occur. + * + * 2. If you do call evdns_resolv_conf_parse then don't pass + * DNS_OPTION_SEARCH (or DNS_OPTIONS_ALL, which implies it). + * + * 3. When calling evdns_resolve, pass the DNS_QUERY_NO_SEARCH flag. + * + * The order of searches depends on the number of dots in the name. If the + * number is greater than the ndots setting then the names is first tried + * globally. Otherwise each search domain is appended in turn. + * + * The ndots setting can either be set from a resolv.conf, or by calling + * evdns_search_ndots_set. + * + * For example, with ndots set to 1 (the default) and a search domain list of + * ["myhome.net"]: + * Query: www + * Order: www.myhome.net, www. + * + * Query: www.abc + * Order: www.abc., www.abc.myhome.net + * + * Internals: + * + * Requests are kept in two queues. The first is the inflight queue. In + * this queue requests have an allocated transaction id and nameserver. + * They will soon be transmitted if they haven't already been. + * + * The second is the waiting queue. The size of the inflight ring is + * limited and all other requests wait in waiting queue for space. This + * bounds the number of concurrent requests so that we don't flood the + * nameserver. Several algorithms require a full walk of the inflight + * queue and so bounding its size keeps thing going nicely under huge + * (many thousands of requests) loads. + * + * If a nameserver loses too many requests it is considered down and we + * try not to use it. After a while we send a probe to that nameserver + * (a lookup for google.com) and, if it replies, we consider it working + * again. If the nameserver fails a probe we wait longer to try again + * with the next probe. + */ + +#ifndef EVENT2_DNS_H_INCLUDED_ +#define EVENT2_DNS_H_INCLUDED_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* For integer types. */ +#include + +/** Error codes 0-5 are as described in RFC 1035. */ +#define DNS_ERR_NONE 0 +/** The name server was unable to interpret the query */ +#define DNS_ERR_FORMAT 1 +/** The name server was unable to process this query due to a problem with the + * name server */ +#define DNS_ERR_SERVERFAILED 2 +/** The domain name does not exist */ +#define DNS_ERR_NOTEXIST 3 +/** The name server does not support the requested kind of query */ +#define DNS_ERR_NOTIMPL 4 +/** The name server refuses to reform the specified operation for policy + * reasons */ +#define DNS_ERR_REFUSED 5 +/** The reply was truncated or ill-formatted */ +#define DNS_ERR_TRUNCATED 65 +/** An unknown error occurred */ +#define DNS_ERR_UNKNOWN 66 +/** Communication with the server timed out */ +#define DNS_ERR_TIMEOUT 67 +/** The request was canceled because the DNS subsystem was shut down. */ +#define DNS_ERR_SHUTDOWN 68 +/** The request was canceled via a call to evdns_cancel_request */ +#define DNS_ERR_CANCEL 69 +/** There were no answers and no error condition in the DNS packet. + * This can happen when you ask for an address that exists, but a record + * type that doesn't. */ +#define DNS_ERR_NODATA 70 + +#define DNS_IPv4_A 1 +#define DNS_PTR 2 +#define DNS_IPv6_AAAA 3 + +#define DNS_QUERY_NO_SEARCH 1 + +/* Allow searching */ +#define DNS_OPTION_SEARCH 1 +/* Parse "nameserver" and add default if no such section */ +#define DNS_OPTION_NAMESERVERS 2 +/* Parse additional options like: + * - timeout: + * - getaddrinfo-allow-skew: + * - max-timeouts: + * - max-inflight: + * - attempts: + * - randomize-case: + * - initial-probe-timeout: + */ +#define DNS_OPTION_MISC 4 +/* Load hosts file (i.e. "/etc/hosts") */ +#define DNS_OPTION_HOSTSFILE 8 +/** + * All above: + * - DNS_OPTION_SEARCH + * - DNS_OPTION_NAMESERVERS + * - DNS_OPTION_MISC + * - DNS_OPTION_HOSTSFILE + */ +#define DNS_OPTIONS_ALL ( \ + DNS_OPTION_SEARCH | \ + DNS_OPTION_NAMESERVERS | \ + DNS_OPTION_MISC | \ + DNS_OPTION_HOSTSFILE | \ + 0 \ +) +/* Do not "default" nameserver (i.e. "127.0.0.1:53") if there is no nameservers + * in resolv.conf, (iff DNS_OPTION_NAMESERVERS is set) */ +#define DNS_OPTION_NAMESERVERS_NO_DEFAULT 16 + +/* Obsolete name for DNS_QUERY_NO_SEARCH */ +#define DNS_NO_SEARCH DNS_QUERY_NO_SEARCH + +/** + * The callback that contains the results from a lookup. + * - result is one of the DNS_ERR_* values (DNS_ERR_NONE for success) + * - type is either DNS_IPv4_A or DNS_PTR or DNS_IPv6_AAAA + * - count contains the number of addresses of form type + * - ttl is the number of seconds the resolution may be cached for. + * - addresses needs to be cast according to type. It will be an array of + * 4-byte sequences for ipv4, or an array of 16-byte sequences for ipv6, + * or a nul-terminated string for PTR. + */ +typedef void (*evdns_callback_type) (int result, char type, int count, int ttl, void *addresses, void *arg); + +struct evdns_base; +struct event_base; + +/** Flag for evdns_base_new: process resolv.conf. */ +#define EVDNS_BASE_INITIALIZE_NAMESERVERS 1 +/** Flag for evdns_base_new: Do not prevent the libevent event loop from + * exiting when we have no active dns requests. */ +#define EVDNS_BASE_DISABLE_WHEN_INACTIVE 0x8000 +/** Flag for evdns_base_new: If EVDNS_BASE_INITIALIZE_NAMESERVERS isset, do not + * add default nameserver if there are no nameservers in resolv.conf + * @see DNS_OPTION_NAMESERVERS_NO_DEFAULT */ +#define EVDNS_BASE_NAMESERVERS_NO_DEFAULT 0x10000 + +/** + Initialize the asynchronous DNS library. + + This function initializes support for non-blocking name resolution by + calling evdns_resolv_conf_parse() on UNIX and + evdns_config_windows_nameservers() on Windows. + + @param event_base the event base to associate the dns client with + @param flags any of EVDNS_BASE_INITIALIZE_NAMESERVERS| + EVDNS_BASE_DISABLE_WHEN_INACTIVE|EVDNS_BASE_NAMESERVERS_NO_DEFAULT + @return evdns_base object if successful, or NULL if an error occurred. + @see evdns_base_free() + */ +EVENT2_EXPORT_SYMBOL +struct evdns_base * evdns_base_new(struct event_base *event_base, int initialize_nameservers); + + +/** + Shut down the asynchronous DNS resolver and terminate all active requests. + + If the 'fail_requests' option is enabled, all active requests will return + an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise, + the requests will be silently discarded. + + @param evdns_base the evdns base to free + @param fail_requests if zero, active requests will be aborted; if non-zero, + active requests will return DNS_ERR_SHUTDOWN. + @see evdns_base_new() + */ +EVENT2_EXPORT_SYMBOL +void evdns_base_free(struct evdns_base *base, int fail_requests); + +/** + Remove all hosts entries that have been loaded into the event_base via + evdns_base_load_hosts or via event_base_resolv_conf_parse. + + @param evdns_base the evdns base to remove outdated host addresses from + */ +EVENT2_EXPORT_SYMBOL +void evdns_base_clear_host_addresses(struct evdns_base *base); + +/** + Set the outging interface to be used for DNS queries + + @param base the evdns base to modify + @param interface_name the name of the interface to be used + */ +int evdns_base_set_interface(struct evdns_base *base, char *interface_name); + +/** + Convert a DNS error code to a string. + + @param err the DNS error code + @return a string containing an explanation of the error code +*/ +EVENT2_EXPORT_SYMBOL +const char *evdns_err_to_string(int err); + + +/** + Add a nameserver. + + The address should be an IPv4 address in network byte order. + The type of address is chosen so that it matches in_addr.s_addr. + + @param base the evdns_base to which to add the name server + @param address an IP address in network byte order + @return 0 if successful, or -1 if an error occurred + @see evdns_base_nameserver_ip_add() + */ +EVENT2_EXPORT_SYMBOL +int evdns_base_nameserver_add(struct evdns_base *base, + unsigned long int address); + +/** + Get the number of configured nameservers. + + This returns the number of configured nameservers (not necessarily the + number of running nameservers). This is useful for double-checking + whether our calls to the various nameserver configuration functions + have been successful. + + @param base the evdns_base to which to apply this operation + @return the number of configured nameservers + @see evdns_base_nameserver_add() + */ +EVENT2_EXPORT_SYMBOL +int evdns_base_count_nameservers(struct evdns_base *base); + +/** + Remove all configured nameservers, and suspend all pending resolves. + + Resolves will not necessarily be re-attempted until evdns_base_resume() is called. + + @param base the evdns_base to which to apply this operation + @return 0 if successful, or -1 if an error occurred + @see evdns_base_resume() + */ +EVENT2_EXPORT_SYMBOL +int evdns_base_clear_nameservers_and_suspend(struct evdns_base *base); + + +/** + Resume normal operation and continue any suspended resolve requests. + + Re-attempt resolves left in limbo after an earlier call to + evdns_base_clear_nameservers_and_suspend(). + + @param base the evdns_base to which to apply this operation + @return 0 if successful, or -1 if an error occurred + @see evdns_base_clear_nameservers_and_suspend() + */ +EVENT2_EXPORT_SYMBOL +int evdns_base_resume(struct evdns_base *base); + +/** + Add a nameserver by string address. + + This function parses a n IPv4 or IPv6 address from a string and adds it as a + nameserver. It supports the following formats: + - [IPv6Address]:port + - [IPv6Address] + - IPv6Address + - IPv4Address:port + - IPv4Address + + If no port is specified, it defaults to 53. + + @param base the evdns_base to which to apply this operation + @return 0 if successful, or -1 if an error occurred + @see evdns_base_nameserver_add() + */ +EVENT2_EXPORT_SYMBOL +int evdns_base_nameserver_ip_add(struct evdns_base *base, + const char *ip_as_string); + +/** + Add a nameserver by sockaddr. + **/ +EVENT2_EXPORT_SYMBOL +int +evdns_base_nameserver_sockaddr_add(struct evdns_base *base, + const struct sockaddr *sa, ev_socklen_t len, unsigned flags); + +struct evdns_request; + +/** + Lookup an A record for a given name. + + @param base the evdns_base to which to apply this operation + @param name a DNS hostname + @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query. + @param callback a callback function to invoke when the request is completed + @param ptr an argument to pass to the callback function + @return an evdns_request object if successful, or NULL if an error occurred. + @see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6(), evdns_cancel_request() + */ +EVENT2_EXPORT_SYMBOL +struct evdns_request *evdns_base_resolve_ipv4(struct evdns_base *base, const char *name, int flags, evdns_callback_type callback, void *ptr); + +/** + Lookup an AAAA record for a given name. + + @param base the evdns_base to which to apply this operation + @param name a DNS hostname + @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query. + @param callback a callback function to invoke when the request is completed + @param ptr an argument to pass to the callback function + @return an evdns_request object if successful, or NULL if an error occurred. + @see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6(), evdns_cancel_request() + */ +EVENT2_EXPORT_SYMBOL +struct evdns_request *evdns_base_resolve_ipv6(struct evdns_base *base, const char *name, int flags, evdns_callback_type callback, void *ptr); + +struct in_addr; +struct in6_addr; + +/** + Lookup a PTR record for a given IP address. + + @param base the evdns_base to which to apply this operation + @param in an IPv4 address + @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query. + @param callback a callback function to invoke when the request is completed + @param ptr an argument to pass to the callback function + @return an evdns_request object if successful, or NULL if an error occurred. + @see evdns_resolve_reverse_ipv6(), evdns_cancel_request() + */ +EVENT2_EXPORT_SYMBOL +struct evdns_request *evdns_base_resolve_reverse(struct evdns_base *base, const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr); + + +/** + Lookup a PTR record for a given IPv6 address. + + @param base the evdns_base to which to apply this operation + @param in an IPv6 address + @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query. + @param callback a callback function to invoke when the request is completed + @param ptr an argument to pass to the callback function + @return an evdns_request object if successful, or NULL if an error occurred. + @see evdns_resolve_reverse_ipv6(), evdns_cancel_request() + */ +EVENT2_EXPORT_SYMBOL +struct evdns_request *evdns_base_resolve_reverse_ipv6(struct evdns_base *base, const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr); + +/** + Cancels a pending DNS resolution request. + + @param base the evdns_base that was used to make the request + @param req the evdns_request that was returned by calling a resolve function + @see evdns_base_resolve_ipv4(), evdns_base_resolve_ipv6, evdns_base_resolve_reverse +*/ +EVENT2_EXPORT_SYMBOL +void evdns_cancel_request(struct evdns_base *base, struct evdns_request *req); + +/** + Set the value of a configuration option. + + The currently available configuration options are: + + ndots, timeout, max-timeouts, max-inflight, attempts, randomize-case, + bind-to, initial-probe-timeout, getaddrinfo-allow-skew, + so-rcvbuf, so-sndbuf. + + In versions before Libevent 2.0.3-alpha, the option name needed to end with + a colon. + + @param base the evdns_base to which to apply this operation + @param option the name of the configuration option to be modified + @param val the value to be set + @return 0 if successful, or -1 if an error occurred + */ +EVENT2_EXPORT_SYMBOL +int evdns_base_set_option(struct evdns_base *base, const char *option, const char *val); + + +/** + Parse a resolv.conf file. + + The 'flags' parameter determines what information is parsed from the + resolv.conf file. See the man page for resolv.conf for the format of this + file. + + The following directives are not parsed from the file: sortlist, rotate, + no-check-names, inet6, debug. + + If this function encounters an error, the possible return values are: 1 = + failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of + memory, 5 = short read from file, 6 = no nameservers listed in the file + + @param base the evdns_base to which to apply this operation + @param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC| + DNS_OPTION_HOSTSFILE|DNS_OPTIONS_ALL|DNS_OPTION_NAMESERVERS_NO_DEFAULT + @param filename the path to the resolv.conf file + @return 0 if successful, or various positive error codes if an error + occurred (see above) + @see resolv.conf(3), evdns_config_windows_nameservers() + */ +EVENT2_EXPORT_SYMBOL +int evdns_base_resolv_conf_parse(struct evdns_base *base, int flags, const char *const filename); + +/** + Load an /etc/hosts-style file from 'hosts_fname' into 'base'. + + If hosts_fname is NULL, add minimal entries for localhost, and nothing + else. + + Note that only evdns_getaddrinfo uses the /etc/hosts entries. + + This function does not replace previously loaded hosts entries; to do that, + call evdns_base_clear_host_addresses first. + + Return 0 on success, negative on failure. +*/ +EVENT2_EXPORT_SYMBOL +int evdns_base_load_hosts(struct evdns_base *base, const char *hosts_fname); + +#if defined(EVENT_IN_DOXYGEN_) || defined(_WIN32) +/** + Obtain nameserver information using the Windows API. + + Attempt to configure a set of nameservers based on platform settings on + a win32 host. Preferentially tries to use GetNetworkParams; if that fails, + looks in the registry. + + @return 0 if successful, or -1 if an error occurred + @see evdns_resolv_conf_parse() + */ +EVENT2_EXPORT_SYMBOL +int evdns_base_config_windows_nameservers(struct evdns_base *); +#define EVDNS_BASE_CONFIG_WINDOWS_NAMESERVERS_IMPLEMENTED +#endif + + +/** + Clear the list of search domains. + */ +EVENT2_EXPORT_SYMBOL +void evdns_base_search_clear(struct evdns_base *base); + + +/** + Add a domain to the list of search domains + + @param domain the domain to be added to the search list + */ +EVENT2_EXPORT_SYMBOL +void evdns_base_search_add(struct evdns_base *base, const char *domain); + + +/** + Set the 'ndots' parameter for searches. + + Sets the number of dots which, when found in a name, causes + the first query to be without any search domain. + + @param ndots the new ndots parameter + */ +EVENT2_EXPORT_SYMBOL +void evdns_base_search_ndots_set(struct evdns_base *base, const int ndots); + +/** + A callback that is invoked when a log message is generated + + @param is_warning indicates if the log message is a 'warning' + @param msg the content of the log message + */ +typedef void (*evdns_debug_log_fn_type)(int is_warning, const char *msg); + + +/** + Set the callback function to handle DNS log messages. If this + callback is not set, evdns log messages are handled with the regular + Libevent logging system. + + @param fn the callback to be invoked when a log message is generated + */ +EVENT2_EXPORT_SYMBOL +void evdns_set_log_fn(evdns_debug_log_fn_type fn); + +/** + Set a callback that will be invoked to generate transaction IDs. By + default, we pick transaction IDs based on the current clock time, which + is bad for security. + + @param fn the new callback, or NULL to use the default. + + NOTE: This function has no effect in Libevent 2.0.4-alpha and later, + since Libevent now provides its own secure RNG. + */ +EVENT2_EXPORT_SYMBOL +void evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void)); + +/** + Set a callback used to generate random bytes. By default, we use + the same function as passed to evdns_set_transaction_id_fn to generate + bytes two at a time. If a function is provided here, it's also used + to generate transaction IDs. + + NOTE: This function has no effect in Libevent 2.0.4-alpha and later, + since Libevent now provides its own secure RNG. +*/ +EVENT2_EXPORT_SYMBOL +void evdns_set_random_bytes_fn(void (*fn)(char *, size_t)); + +/* + * Functions used to implement a DNS server. + */ + +struct evdns_server_request; +struct evdns_server_question; + +/** + A callback to implement a DNS server. The callback function receives a DNS + request. It should then optionally add a number of answers to the reply + using the evdns_server_request_add_*_reply functions, before calling either + evdns_server_request_respond to send the reply back, or + evdns_server_request_drop to decline to answer the request. + + @param req A newly received request + @param user_data A pointer that was passed to + evdns_add_server_port_with_base(). + */ +typedef void (*evdns_request_callback_fn_type)(struct evdns_server_request *, void *); +#define EVDNS_ANSWER_SECTION 0 +#define EVDNS_AUTHORITY_SECTION 1 +#define EVDNS_ADDITIONAL_SECTION 2 + +#define EVDNS_TYPE_A 1 +#define EVDNS_TYPE_NS 2 +#define EVDNS_TYPE_CNAME 5 +#define EVDNS_TYPE_SOA 6 +#define EVDNS_TYPE_PTR 12 +#define EVDNS_TYPE_MX 15 +#define EVDNS_TYPE_TXT 16 +#define EVDNS_TYPE_AAAA 28 + +#define EVDNS_QTYPE_AXFR 252 +#define EVDNS_QTYPE_ALL 255 + +#define EVDNS_CLASS_INET 1 + +/* flags that can be set in answers; as part of the err parameter */ +#define EVDNS_FLAGS_AA 0x400 +#define EVDNS_FLAGS_RD 0x080 + +/** Create a new DNS server port. + + @param base The event base to handle events for the server port. + @param socket A UDP socket to accept DNS requests. + @param flags Always 0 for now. + @param callback A function to invoke whenever we get a DNS request + on the socket. + @param user_data Data to pass to the callback. + @return an evdns_server_port structure for this server port or NULL if + an error occurred. + */ +EVENT2_EXPORT_SYMBOL +struct evdns_server_port *evdns_add_server_port_with_base(struct event_base *base, evutil_socket_t socket, int flags, evdns_request_callback_fn_type callback, void *user_data); +/** Close down a DNS server port, and free associated structures. */ +EVENT2_EXPORT_SYMBOL +void evdns_close_server_port(struct evdns_server_port *port); + +/** Sets some flags in a reply we're building. + Allows setting of the AA or RD flags + */ +EVENT2_EXPORT_SYMBOL +void evdns_server_request_set_flags(struct evdns_server_request *req, int flags); + +/* Functions to add an answer to an in-progress DNS reply. + */ +EVENT2_EXPORT_SYMBOL +int evdns_server_request_add_reply(struct evdns_server_request *req, int section, const char *name, int type, int dns_class, int ttl, int datalen, int is_name, const char *data); +EVENT2_EXPORT_SYMBOL +int evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl); +EVENT2_EXPORT_SYMBOL +int evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl); +EVENT2_EXPORT_SYMBOL +int evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl); +EVENT2_EXPORT_SYMBOL +int evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl); + +/** + Send back a response to a DNS request, and free the request structure. +*/ +EVENT2_EXPORT_SYMBOL +int evdns_server_request_respond(struct evdns_server_request *req, int err); +/** + Free a DNS request without sending back a reply. +*/ +EVENT2_EXPORT_SYMBOL +int evdns_server_request_drop(struct evdns_server_request *req); +struct sockaddr; +/** + Get the address that made a DNS request. + */ +EVENT2_EXPORT_SYMBOL +int evdns_server_request_get_requesting_addr(struct evdns_server_request *req, struct sockaddr *sa, int addr_len); + +/** Callback for evdns_getaddrinfo. */ +typedef void (*evdns_getaddrinfo_cb)(int result, struct evutil_addrinfo *res, void *arg); + +struct evdns_base; +struct evdns_getaddrinfo_request; +/** Make a non-blocking getaddrinfo request using the dns_base in 'dns_base'. + * + * If we can answer the request immediately (with an error or not!), then we + * invoke cb immediately and return NULL. Otherwise we return + * an evdns_getaddrinfo_request and invoke cb later. + * + * When the callback is invoked, we pass as its first argument the error code + * that getaddrinfo would return (or 0 for no error). As its second argument, + * we pass the evutil_addrinfo structures we found (or NULL on error). We + * pass 'arg' as the third argument. + * + * Limitations: + * + * - The AI_V4MAPPED and AI_ALL flags are not currently implemented. + * - For ai_socktype, we only handle SOCKTYPE_STREAM, SOCKTYPE_UDP, and 0. + * - For ai_protocol, we only handle IPPROTO_TCP, IPPROTO_UDP, and 0. + */ +EVENT2_EXPORT_SYMBOL +struct evdns_getaddrinfo_request *evdns_getaddrinfo( + struct evdns_base *dns_base, + const char *nodename, const char *servname, + const struct evutil_addrinfo *hints_in, + evdns_getaddrinfo_cb cb, void *arg); + +/* Cancel an in-progress evdns_getaddrinfo. This MUST NOT be called after the + * getaddrinfo's callback has been invoked. The resolves will be canceled, + * and the callback will be invoked with the error EVUTIL_EAI_CANCEL. */ +EVENT2_EXPORT_SYMBOL +void evdns_getaddrinfo_cancel(struct evdns_getaddrinfo_request *req); + +/** + Retrieve the address of the 'idx'th configured nameserver. + + @param base The evdns_base to examine. + @param idx The index of the nameserver to get the address of. + @param sa A location to receive the server's address. + @param len The number of bytes available at sa. + + @return the number of bytes written into sa on success. On failure, returns + -1 if idx is greater than the number of configured nameservers, or a + value greater than 'len' if len was not high enough. + */ +EVENT2_EXPORT_SYMBOL +int evdns_base_get_nameserver_addr(struct evdns_base *base, int idx, + struct sockaddr *sa, ev_socklen_t len); + +#ifdef __cplusplus +} +#endif + +#endif /* !EVENT2_DNS_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/dns_compat.h b/probe-busybox/libevent-2.1.11-stable/include/event2/dns_compat.h new file mode 100644 index 00000000..a58c4b29 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/dns_compat.h @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2006-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_DNS_COMPAT_H_INCLUDED_ +#define EVENT2_DNS_COMPAT_H_INCLUDED_ + +/** @file event2/dns_compat.h + + Potentially non-threadsafe versions of the functions in dns.h: provided + only for backwards compatibility. + + + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +/* For int types. */ +#include +#include + +/** + Initialize the asynchronous DNS library. + + This function initializes support for non-blocking name resolution by + calling evdns_resolv_conf_parse() on UNIX and + evdns_config_windows_nameservers() on Windows. + + @deprecated This function is deprecated because it always uses the current + event base, and is easily confused by multiple calls to event_init(), and + so is not safe for multithreaded use. Additionally, it allocates a global + structure that only one thread can use. The replacement is + evdns_base_new(). + + @return 0 if successful, or -1 if an error occurred + @see evdns_shutdown() + */ +EVENT2_EXPORT_SYMBOL +int evdns_init(void); + +struct evdns_base; +/** + Return the global evdns_base created by event_init() and used by the other + deprecated functions. + + @deprecated This function is deprecated because use of the global + evdns_base is error-prone. + */ +EVENT2_EXPORT_SYMBOL +struct evdns_base *evdns_get_global_base(void); + +/** + Shut down the asynchronous DNS resolver and terminate all active requests. + + If the 'fail_requests' option is enabled, all active requests will return + an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise, + the requests will be silently discarded. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_shutdown(). + + @param fail_requests if zero, active requests will be aborted; if non-zero, + active requests will return DNS_ERR_SHUTDOWN. + @see evdns_init() + */ +EVENT2_EXPORT_SYMBOL +void evdns_shutdown(int fail_requests); + +/** + Add a nameserver. + + The address should be an IPv4 address in network byte order. + The type of address is chosen so that it matches in_addr.s_addr. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_nameserver_add(). + + @param address an IP address in network byte order + @return 0 if successful, or -1 if an error occurred + @see evdns_nameserver_ip_add() + */ +EVENT2_EXPORT_SYMBOL +int evdns_nameserver_add(unsigned long int address); + +/** + Get the number of configured nameservers. + + This returns the number of configured nameservers (not necessarily the + number of running nameservers). This is useful for double-checking + whether our calls to the various nameserver configuration functions + have been successful. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_count_nameservers(). + + @return the number of configured nameservers + @see evdns_nameserver_add() + */ +EVENT2_EXPORT_SYMBOL +int evdns_count_nameservers(void); + +/** + Remove all configured nameservers, and suspend all pending resolves. + + Resolves will not necessarily be re-attempted until evdns_resume() is called. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_clear_nameservers_and_suspend(). + + @return 0 if successful, or -1 if an error occurred + @see evdns_resume() + */ +EVENT2_EXPORT_SYMBOL +int evdns_clear_nameservers_and_suspend(void); + +/** + Resume normal operation and continue any suspended resolve requests. + + Re-attempt resolves left in limbo after an earlier call to + evdns_clear_nameservers_and_suspend(). + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_resume(). + + @return 0 if successful, or -1 if an error occurred + @see evdns_clear_nameservers_and_suspend() + */ +EVENT2_EXPORT_SYMBOL +int evdns_resume(void); + +/** + Add a nameserver. + + This wraps the evdns_nameserver_add() function by parsing a string as an IP + address and adds it as a nameserver. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_nameserver_ip_add(). + + @return 0 if successful, or -1 if an error occurred + @see evdns_nameserver_add() + */ +EVENT2_EXPORT_SYMBOL +int evdns_nameserver_ip_add(const char *ip_as_string); + +/** + Lookup an A record for a given name. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_resolve_ipv4(). + + @param name a DNS hostname + @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query. + @param callback a callback function to invoke when the request is completed + @param ptr an argument to pass to the callback function + @return 0 if successful, or -1 if an error occurred + @see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6() + */ +EVENT2_EXPORT_SYMBOL +int evdns_resolve_ipv4(const char *name, int flags, evdns_callback_type callback, void *ptr); + +/** + Lookup an AAAA record for a given name. + + @param name a DNS hostname + @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query. + @param callback a callback function to invoke when the request is completed + @param ptr an argument to pass to the callback function + @return 0 if successful, or -1 if an error occurred + @see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6() + */ +EVENT2_EXPORT_SYMBOL +int evdns_resolve_ipv6(const char *name, int flags, evdns_callback_type callback, void *ptr); + +struct in_addr; +struct in6_addr; + +/** + Lookup a PTR record for a given IP address. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_resolve_reverse(). + + @param in an IPv4 address + @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query. + @param callback a callback function to invoke when the request is completed + @param ptr an argument to pass to the callback function + @return 0 if successful, or -1 if an error occurred + @see evdns_resolve_reverse_ipv6() + */ +EVENT2_EXPORT_SYMBOL +int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr); + +/** + Lookup a PTR record for a given IPv6 address. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_resolve_reverse_ipv6(). + + @param in an IPv6 address + @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query. + @param callback a callback function to invoke when the request is completed + @param ptr an argument to pass to the callback function + @return 0 if successful, or -1 if an error occurred + @see evdns_resolve_reverse_ipv6() + */ +EVENT2_EXPORT_SYMBOL +int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr); + +/** + Set the value of a configuration option. + + The currently available configuration options are: + + ndots, timeout, max-timeouts, max-inflight, and attempts + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_set_option(). + + @param option the name of the configuration option to be modified + @param val the value to be set + @param flags Ignored. + @return 0 if successful, or -1 if an error occurred + */ +EVENT2_EXPORT_SYMBOL +int evdns_set_option(const char *option, const char *val, int flags); + +/** + Parse a resolv.conf file. + + The 'flags' parameter determines what information is parsed from the + resolv.conf file. See the man page for resolv.conf for the format of this + file. + + The following directives are not parsed from the file: sortlist, rotate, + no-check-names, inet6, debug. + + If this function encounters an error, the possible return values are: 1 = + failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of + memory, 5 = short read from file, 6 = no nameservers listed in the file + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_resolv_conf_parse(). + + @param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC| + DNS_OPTIONS_ALL + @param filename the path to the resolv.conf file + @return 0 if successful, or various positive error codes if an error + occurred (see above) + @see resolv.conf(3), evdns_config_windows_nameservers() + */ +EVENT2_EXPORT_SYMBOL +int evdns_resolv_conf_parse(int flags, const char *const filename); + +/** + Clear the list of search domains. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_search_clear(). + */ +EVENT2_EXPORT_SYMBOL +void evdns_search_clear(void); + +/** + Add a domain to the list of search domains + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_search_add(). + + @param domain the domain to be added to the search list + */ +EVENT2_EXPORT_SYMBOL +void evdns_search_add(const char *domain); + +/** + Set the 'ndots' parameter for searches. + + Sets the number of dots which, when found in a name, causes + the first query to be without any search domain. + + @deprecated This function is deprecated because it does not allow the + caller to specify which evdns_base it applies to. The recommended + function is evdns_base_search_ndots_set(). + + @param ndots the new ndots parameter + */ +EVENT2_EXPORT_SYMBOL +void evdns_search_ndots_set(const int ndots); + +/** + As evdns_server_new_with_base. + + @deprecated This function is deprecated because it does not allow the + caller to specify which even_base it uses. The recommended + function is evdns_add_server_port_with_base(). + +*/ +EVENT2_EXPORT_SYMBOL +struct evdns_server_port * +evdns_add_server_port(evutil_socket_t socket, int flags, + evdns_request_callback_fn_type callback, void *user_data); + +#ifdef _WIN32 +EVENT2_EXPORT_SYMBOL +int evdns_config_windows_nameservers(void); +#define EVDNS_CONFIG_WINDOWS_NAMESERVERS_IMPLEMENTED +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_EVENT_COMPAT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/dns_struct.h b/probe-busybox/libevent-2.1.11-stable/include/event2/dns_struct.h new file mode 100644 index 00000000..593a8a70 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/dns_struct.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_DNS_STRUCT_H_INCLUDED_ +#define EVENT2_DNS_STRUCT_H_INCLUDED_ + +/** @file event2/dns_struct.h + + Data structures for dns. Using these structures may hurt forward + compatibility with later versions of Libevent: be careful! + + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +/* For int types. */ +#include + +/* + * Structures used to implement a DNS server. + */ + +struct evdns_server_request { + int flags; + int nquestions; + struct evdns_server_question **questions; +}; +struct evdns_server_question { + int type; +#ifdef __cplusplus + int dns_question_class; +#else + /* You should refer to this field as "dns_question_class". The + * name "class" works in C for backward compatibility, and will be + * removed in a future version. (1.5 or later). */ + int class; +#define dns_question_class class +#endif + char name[1]; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_DNS_STRUCT_H_INCLUDED_ */ + diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/event.h b/probe-busybox/libevent-2.1.11-stable/include/event2/event.h new file mode 100644 index 00000000..a6b6144a --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/event.h @@ -0,0 +1,1672 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_EVENT_H_INCLUDED_ +#define EVENT2_EVENT_H_INCLUDED_ + +/** + @mainpage + + @section intro Introduction + + Libevent is an event notification library for developing scalable network + servers. The Libevent API provides a mechanism to execute a callback + function when a specific event occurs on a file descriptor or after a + timeout has been reached. Furthermore, Libevent also support callbacks due + to signals or regular timeouts. + + Libevent is meant to replace the event loop found in event driven network + servers. An application just needs to call event_base_dispatch() and then add or + remove events dynamically without having to change the event loop. + + + Currently, Libevent supports /dev/poll, kqueue(2), select(2), poll(2), + epoll(4), and evports. The internal event mechanism is completely + independent of the exposed event API, and a simple update of Libevent can + provide new functionality without having to redesign the applications. As a + result, Libevent allows for portable application development and provides + the most scalable event notification mechanism available on an operating + system. Libevent can also be used for multithreaded programs. Libevent + should compile on Linux, *BSD, Mac OS X, Solaris and, Windows. + + @section usage Standard usage + + Every program that uses Libevent must include the + header, and pass the -levent flag to the linker. (You can instead link + -levent_core if you only want the main event and buffered IO-based code, + and don't want to link any protocol code.) + + @section setup Library setup + + Before you call any other Libevent functions, you need to set up the + library. If you're going to use Libevent from multiple threads in a + multithreaded application, you need to initialize thread support -- + typically by using evthread_use_pthreads() or + evthread_use_windows_threads(). See for more + information. + + This is also the point where you can replace Libevent's memory + management functions with event_set_mem_functions, and enable debug mode + with event_enable_debug_mode(). + + @section base Creating an event base + + Next, you need to create an event_base structure, using event_base_new() + or event_base_new_with_config(). The event_base is responsible for + keeping track of which events are "pending" (that is to say, being + watched to see if they become active) and which events are "active". + Every event is associated with a single event_base. + + @section event Event notification + + For each file descriptor that you wish to monitor, you must create an + event structure with event_new(). (You may also declare an event + structure and call event_assign() to initialize the members of the + structure.) To enable notification, you add the structure to the list + of monitored events by calling event_add(). The event structure must + remain allocated as long as it is active, so it should generally be + allocated on the heap. + + @section loop Dispatching events. + + Finally, you call event_base_dispatch() to loop and dispatch events. + You can also use event_base_loop() for more fine-grained control. + + Currently, only one thread can be dispatching a given event_base at a + time. If you want to run events in multiple threads at once, you can + either have a single event_base whose events add work to a work queue, + or you can create multiple event_base objects. + + @section bufferevent I/O Buffers + + Libevent provides a buffered I/O abstraction on top of the regular event + callbacks. This abstraction is called a bufferevent. A bufferevent + provides input and output buffers that get filled and drained + automatically. The user of a buffered event no longer deals directly + with the I/O, but instead is reading from input and writing to output + buffers. + + Once initialized via bufferevent_socket_new(), the bufferevent structure + can be used repeatedly with bufferevent_enable() and + bufferevent_disable(). Instead of reading and writing directly to a + socket, you would call bufferevent_read() and bufferevent_write(). + + When read enabled the bufferevent will try to read from the file descriptor + and call the read callback. The write callback is executed whenever the + output buffer is drained below the write low watermark, which is 0 by + default. + + See for more information. + + @section timers Timers + + Libevent can also be used to create timers that invoke a callback after a + certain amount of time has expired. The evtimer_new() macro returns + an event struct to use as a timer. To activate the timer, call + evtimer_add(). Timers can be deactivated by calling evtimer_del(). + (These macros are thin wrappers around event_new(), event_add(), + and event_del(); you can also use those instead.) + + @section evdns Asynchronous DNS resolution + + Libevent provides an asynchronous DNS resolver that should be used instead + of the standard DNS resolver functions. See the + functions for more detail. + + @section evhttp Event-driven HTTP servers + + Libevent provides a very simple event-driven HTTP server that can be + embedded in your program and used to service HTTP requests. + + To use this capability, you need to include the header in your + program. See that header for more information. + + @section evrpc A framework for RPC servers and clients + + Libevent provides a framework for creating RPC servers and clients. It + takes care of marshaling and unmarshaling all data structures. + + @section api API Reference + + To browse the complete documentation of the libevent API, click on any of + the following links. + + event2/event.h + The primary libevent header + + event2/thread.h + Functions for use by multithreaded programs + + event2/buffer.h and event2/bufferevent.h + Buffer management for network reading and writing + + event2/util.h + Utility functions for portable nonblocking network code + + event2/dns.h + Asynchronous DNS resolution + + event2/http.h + An embedded libevent-based HTTP server + + event2/rpc.h + A framework for creating RPC servers and clients + + */ + +/** @file event2/event.h + + Core functions for waiting for and receiving events, and using event bases. +*/ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +#include + +/* For int types. */ +#include + +/** + * Structure to hold information and state for a Libevent dispatch loop. + * + * The event_base lies at the center of Libevent; every application will + * have one. It keeps track of all pending and active events, and + * notifies your application of the active ones. + * + * This is an opaque structure; you can allocate one using + * event_base_new() or event_base_new_with_config(). + * + * @see event_base_new(), event_base_free(), event_base_loop(), + * event_base_new_with_config() + */ +struct event_base +#ifdef EVENT_IN_DOXYGEN_ +{/*Empty body so that doxygen will generate documentation here.*/} +#endif +; + +/** + * @struct event + * + * Structure to represent a single event. + * + * An event can have some underlying condition it represents: a socket + * becoming readable or writeable (or both), or a signal becoming raised. + * (An event that represents no underlying condition is still useful: you + * can use one to implement a timer, or to communicate between threads.) + * + * Generally, you can create events with event_new(), then make them + * pending with event_add(). As your event_base runs, it will run the + * callbacks of an events whose conditions are triggered. When you no + * longer want the event, free it with event_free(). + * + * In more depth: + * + * An event may be "pending" (one whose condition we are watching), + * "active" (one whose condition has triggered and whose callback is about + * to run), neither, or both. Events come into existence via + * event_assign() or event_new(), and are then neither active nor pending. + * + * To make an event pending, pass it to event_add(). When doing so, you + * can also set a timeout for the event. + * + * Events become active during an event_base_loop() call when either their + * condition has triggered, or when their timeout has elapsed. You can + * also activate an event manually using event_active(). The even_base + * loop will run the callbacks of active events; after it has done so, it + * marks them as no longer active. + * + * You can make an event non-pending by passing it to event_del(). This + * also makes the event non-active. + * + * Events can be "persistent" or "non-persistent". A non-persistent event + * becomes non-pending as soon as it is triggered: thus, it only runs at + * most once per call to event_add(). A persistent event remains pending + * even when it becomes active: you'll need to event_del() it manually in + * order to make it non-pending. When a persistent event with a timeout + * becomes active, its timeout is reset: this means you can use persistent + * events to implement periodic timeouts. + * + * This should be treated as an opaque structure; you should never read or + * write any of its fields directly. For backward compatibility with old + * code, it is defined in the event2/event_struct.h header; including this + * header may make your code incompatible with other versions of Libevent. + * + * @see event_new(), event_free(), event_assign(), event_get_assignment(), + * event_add(), event_del(), event_active(), event_pending(), + * event_get_fd(), event_get_base(), event_get_events(), + * event_get_callback(), event_get_callback_arg(), + * event_priority_set() + */ +struct event +#ifdef EVENT_IN_DOXYGEN_ +{/*Empty body so that doxygen will generate documentation here.*/} +#endif +; + +/** + * Configuration for an event_base. + * + * There are many options that can be used to alter the behavior and + * implementation of an event_base. To avoid having to pass them all in a + * complex many-argument constructor, we provide an abstract data type + * where you set up configuration information before passing it to + * event_base_new_with_config(). + * + * @see event_config_new(), event_config_free(), event_base_new_with_config(), + * event_config_avoid_method(), event_config_require_features(), + * event_config_set_flag(), event_config_set_num_cpus_hint() + */ +struct event_config +#ifdef EVENT_IN_DOXYGEN_ +{/*Empty body so that doxygen will generate documentation here.*/} +#endif +; + +/** + * Enable some relatively expensive debugging checks in Libevent that + * would normally be turned off. Generally, these checks cause code that + * would otherwise crash mysteriously to fail earlier with an assertion + * failure. Note that this method MUST be called before any events or + * event_bases have been created. + * + * Debug mode can currently catch the following errors: + * An event is re-assigned while it is added + * Any function is called on a non-assigned event + * + * Note that debugging mode uses memory to track every event that has been + * initialized (via event_assign, event_set, or event_new) but not yet + * released (via event_free or event_debug_unassign). If you want to use + * debug mode, and you find yourself running out of memory, you will need + * to use event_debug_unassign to explicitly stop tracking events that + * are no longer considered set-up. + * + * @see event_debug_unassign() + */ +EVENT2_EXPORT_SYMBOL +void event_enable_debug_mode(void); + +/** + * When debugging mode is enabled, informs Libevent that an event should no + * longer be considered as assigned. When debugging mode is not enabled, does + * nothing. + * + * This function must only be called on a non-added event. + * + * @see event_enable_debug_mode() + */ +EVENT2_EXPORT_SYMBOL +void event_debug_unassign(struct event *); + +/** + * Create and return a new event_base to use with the rest of Libevent. + * + * @return a new event_base on success, or NULL on failure. + * + * @see event_base_free(), event_base_new_with_config() + */ +EVENT2_EXPORT_SYMBOL +struct event_base *event_base_new(void); + +/** + Reinitialize the event base after a fork + + Some event mechanisms do not survive across fork. The event base needs + to be reinitialized with the event_reinit() function. + + @param base the event base that needs to be re-initialized + @return 0 if successful, or -1 if some events could not be re-added. + @see event_base_new() +*/ +EVENT2_EXPORT_SYMBOL +int event_reinit(struct event_base *base); + +/** + Event dispatching loop + + This loop will run the event base until either there are no more pending or + active, or until something calls event_base_loopbreak() or + event_base_loopexit(). + + @param base the event_base structure returned by event_base_new() or + event_base_new_with_config() + @return 0 if successful, -1 if an error occurred, or 1 if we exited because + no events were pending or active. + @see event_base_loop() + */ +EVENT2_EXPORT_SYMBOL +int event_base_dispatch(struct event_base *); + +/** + Get the kernel event notification mechanism used by Libevent. + + @param eb the event_base structure returned by event_base_new() + @return a string identifying the kernel event mechanism (kqueue, epoll, etc.) + */ +EVENT2_EXPORT_SYMBOL +const char *event_base_get_method(const struct event_base *); + +/** + Gets all event notification mechanisms supported by Libevent. + + This functions returns the event mechanism in order preferred by + Libevent. Note that this list will include all backends that + Libevent has compiled-in support for, and will not necessarily check + your OS to see whether it has the required resources. + + @return an array with pointers to the names of support methods. + The end of the array is indicated by a NULL pointer. If an + error is encountered NULL is returned. +*/ +EVENT2_EXPORT_SYMBOL +const char **event_get_supported_methods(void); + +/** Query the current monotonic time from a the timer for a struct + * event_base. + */ +EVENT2_EXPORT_SYMBOL +int event_gettime_monotonic(struct event_base *base, struct timeval *tp); + +/** + @name event type flag + + Flags to pass to event_base_get_num_events() to specify the kinds of events + we want to aggregate counts for +*/ +/**@{*/ +/** count the number of active events, which have been triggered.*/ +#define EVENT_BASE_COUNT_ACTIVE 1U +/** count the number of virtual events, which is used to represent an internal + * condition, other than a pending event, that keeps the loop from exiting. */ +#define EVENT_BASE_COUNT_VIRTUAL 2U +/** count the number of events which have been added to event base, including + * internal events. */ +#define EVENT_BASE_COUNT_ADDED 4U +/**@}*/ + +/** + Gets the number of events in event_base, as specified in the flags. + + Since event base has some internal events added to make some of its + functionalities work, EVENT_BASE_COUNT_ADDED may return more than the + number of events you added using event_add(). + + If you pass EVENT_BASE_COUNT_ACTIVE and EVENT_BASE_COUNT_ADDED together, an + active event will be counted twice. However, this might not be the case in + future libevent versions. The return value is an indication of the work + load, but the user shouldn't rely on the exact value as this may change in + the future. + + @param eb the event_base structure returned by event_base_new() + @param flags a bitwise combination of the kinds of events to aggregate + counts for + @return the number of events specified in the flags +*/ +EVENT2_EXPORT_SYMBOL +int event_base_get_num_events(struct event_base *, unsigned int); + +/** + Get the maximum number of events in a given event_base as specified in the + flags. + + @param eb the event_base structure returned by event_base_new() + @param flags a bitwise combination of the kinds of events to aggregate + counts for + @param clear option used to reset the maximum count. + @return the number of events specified in the flags + */ +EVENT2_EXPORT_SYMBOL +int event_base_get_max_events(struct event_base *, unsigned int, int); + +/** + Allocates a new event configuration object. + + The event configuration object can be used to change the behavior of + an event base. + + @return an event_config object that can be used to store configuration, or + NULL if an error is encountered. + @see event_base_new_with_config(), event_config_free(), event_config +*/ +EVENT2_EXPORT_SYMBOL +struct event_config *event_config_new(void); + +/** + Deallocates all memory associated with an event configuration object + + @param cfg the event configuration object to be freed. +*/ +EVENT2_EXPORT_SYMBOL +void event_config_free(struct event_config *cfg); + +/** + Enters an event method that should be avoided into the configuration. + + This can be used to avoid event mechanisms that do not support certain + file descriptor types, or for debugging to avoid certain event + mechanisms. An application can make use of multiple event bases to + accommodate incompatible file descriptor types. + + @param cfg the event configuration object + @param method the name of the event method to avoid + @return 0 on success, -1 on failure. +*/ +EVENT2_EXPORT_SYMBOL +int event_config_avoid_method(struct event_config *cfg, const char *method); + +/** + A flag used to describe which features an event_base (must) provide. + + Because of OS limitations, not every Libevent backend supports every + possible feature. You can use this type with + event_config_require_features() to tell Libevent to only proceed if your + event_base implements a given feature, and you can receive this type from + event_base_get_features() to see which features are available. +*/ +enum event_method_feature { + /** Require an event method that allows edge-triggered events with EV_ET. */ + EV_FEATURE_ET = 0x01, + /** Require an event method where having one event triggered among + * many is [approximately] an O(1) operation. This excludes (for + * example) select and poll, which are approximately O(N) for N + * equal to the total number of possible events. */ + EV_FEATURE_O1 = 0x02, + /** Require an event method that allows file descriptors as well as + * sockets. */ + EV_FEATURE_FDS = 0x04, + /** Require an event method that allows you to use EV_CLOSED to detect + * connection close without the necessity of reading all the pending data. + * + * Methods that do support EV_CLOSED may not be able to provide support on + * all kernel versions. + **/ + EV_FEATURE_EARLY_CLOSE = 0x08 +}; + +/** + A flag passed to event_config_set_flag(). + + These flags change the behavior of an allocated event_base. + + @see event_config_set_flag(), event_base_new_with_config(), + event_method_feature + */ +enum event_base_config_flag { + /** Do not allocate a lock for the event base, even if we have + locking set up. + + Setting this option will make it unsafe and nonfunctional to call + functions on the base concurrently from multiple threads. + */ + EVENT_BASE_FLAG_NOLOCK = 0x01, + /** Do not check the EVENT_* environment variables when configuring + an event_base */ + EVENT_BASE_FLAG_IGNORE_ENV = 0x02, + /** Windows only: enable the IOCP dispatcher at startup + + If this flag is set then bufferevent_socket_new() and + evconn_listener_new() will use IOCP-backed implementations + instead of the usual select-based one on Windows. + */ + EVENT_BASE_FLAG_STARTUP_IOCP = 0x04, + /** Instead of checking the current time every time the event loop is + ready to run timeout callbacks, check after each timeout callback. + */ + EVENT_BASE_FLAG_NO_CACHE_TIME = 0x08, + + /** If we are using the epoll backend, this flag says that it is + safe to use Libevent's internal change-list code to batch up + adds and deletes in order to try to do as few syscalls as + possible. Setting this flag can make your code run faster, but + it may trigger a Linux bug: it is not safe to use this flag + if you have any fds cloned by dup() or its variants. Doing so + will produce strange and hard-to-diagnose bugs. + + This flag can also be activated by setting the + EVENT_EPOLL_USE_CHANGELIST environment variable. + + This flag has no effect if you wind up using a backend other than + epoll. + */ + EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST = 0x10, + + /** Ordinarily, Libevent implements its time and timeout code using + the fastest monotonic timer that we have. If this flag is set, + however, we use less efficient more precise timer, assuming one is + present. + */ + EVENT_BASE_FLAG_PRECISE_TIMER = 0x20 +}; + +/** + Return a bitmask of the features implemented by an event base. This + will be a bitwise OR of one or more of the values of + event_method_feature + + @see event_method_feature + */ +EVENT2_EXPORT_SYMBOL +int event_base_get_features(const struct event_base *base); + +/** + Enters a required event method feature that the application demands. + + Note that not every feature or combination of features is supported + on every platform. Code that requests features should be prepared + to handle the case where event_base_new_with_config() returns NULL, as in: +
+     event_config_require_features(cfg, EV_FEATURE_ET);
+     base = event_base_new_with_config(cfg);
+     if (base == NULL) {
+       // We can't get edge-triggered behavior here.
+       event_config_require_features(cfg, 0);
+       base = event_base_new_with_config(cfg);
+     }
+   
+ + @param cfg the event configuration object + @param feature a bitfield of one or more event_method_feature values. + Replaces values from previous calls to this function. + @return 0 on success, -1 on failure. + @see event_method_feature, event_base_new_with_config() +*/ +EVENT2_EXPORT_SYMBOL +int event_config_require_features(struct event_config *cfg, int feature); + +/** + * Sets one or more flags to configure what parts of the eventual event_base + * will be initialized, and how they'll work. + * + * @see event_base_config_flags, event_base_new_with_config() + **/ +EVENT2_EXPORT_SYMBOL +int event_config_set_flag(struct event_config *cfg, int flag); + +/** + * Records a hint for the number of CPUs in the system. This is used for + * tuning thread pools, etc, for optimal performance. In Libevent 2.0, + * it is only on Windows, and only when IOCP is in use. + * + * @param cfg the event configuration object + * @param cpus the number of cpus + * @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int event_config_set_num_cpus_hint(struct event_config *cfg, int cpus); + +/** + * Record an interval and/or a number of callbacks after which the event base + * should check for new events. By default, the event base will run as many + * events are as activated at the highest activated priority before checking + * for new events. If you configure it by setting max_interval, it will check + * the time after each callback, and not allow more than max_interval to + * elapse before checking for new events. If you configure it by setting + * max_callbacks to a value >= 0, it will run no more than max_callbacks + * callbacks before checking for new events. + * + * This option can decrease the latency of high-priority events, and + * avoid priority inversions where multiple low-priority events keep us from + * polling for high-priority events, but at the expense of slightly decreasing + * the throughput. Use it with caution! + * + * @param cfg The event_base configuration object. + * @param max_interval An interval after which Libevent should stop running + * callbacks and check for more events, or NULL if there should be + * no such interval. + * @param max_callbacks A number of callbacks after which Libevent should + * stop running callbacks and check for more events, or -1 if there + * should be no such limit. + * @param min_priority A priority below which max_interval and max_callbacks + * should not be enforced. If this is set to 0, they are enforced + * for events of every priority; if it's set to 1, they're enforced + * for events of priority 1 and above, and so on. + * @return 0 on success, -1 on failure. + **/ +EVENT2_EXPORT_SYMBOL +int event_config_set_max_dispatch_interval(struct event_config *cfg, + const struct timeval *max_interval, int max_callbacks, + int min_priority); + +/** + Initialize the event API. + + Use event_base_new_with_config() to initialize a new event base, taking + the specified configuration under consideration. The configuration object + can currently be used to avoid certain event notification mechanisms. + + @param cfg the event configuration object + @return an initialized event_base that can be used to registering events, + or NULL if no event base can be created with the requested event_config. + @see event_base_new(), event_base_free(), event_init(), event_assign() +*/ +EVENT2_EXPORT_SYMBOL +struct event_base *event_base_new_with_config(const struct event_config *); + +/** + Deallocate all memory associated with an event_base, and free the base. + + Note that this function will not close any fds or free any memory passed + to event_new as the argument to callback. + + If there are any pending finalizer callbacks, this function will invoke + them. + + @param eb an event_base to be freed + */ +EVENT2_EXPORT_SYMBOL +void event_base_free(struct event_base *); + +/** + As event_base_free, but do not run finalizers. + */ +EVENT2_EXPORT_SYMBOL +void event_base_free_nofinalize(struct event_base *); + +/** @name Log severities + */ +/**@{*/ +#define EVENT_LOG_DEBUG 0 +#define EVENT_LOG_MSG 1 +#define EVENT_LOG_WARN 2 +#define EVENT_LOG_ERR 3 +/**@}*/ + +/* Obsolete names: these are deprecated, but older programs might use them. + * They violate the reserved-identifier namespace. */ +#define _EVENT_LOG_DEBUG EVENT_LOG_DEBUG +#define _EVENT_LOG_MSG EVENT_LOG_MSG +#define _EVENT_LOG_WARN EVENT_LOG_WARN +#define _EVENT_LOG_ERR EVENT_LOG_ERR + +/** + A callback function used to intercept Libevent's log messages. + + @see event_set_log_callback + */ +typedef void (*event_log_cb)(int severity, const char *msg); +/** + Redirect Libevent's log messages. + + @param cb a function taking two arguments: an integer severity between + EVENT_LOG_DEBUG and EVENT_LOG_ERR, and a string. If cb is NULL, + then the default log is used. + + NOTE: The function you provide *must not* call any other libevent + functionality. Doing so can produce undefined behavior. + */ +EVENT2_EXPORT_SYMBOL +void event_set_log_callback(event_log_cb cb); + +/** + A function to be called if Libevent encounters a fatal internal error. + + @see event_set_fatal_callback + */ +typedef void (*event_fatal_cb)(int err); + +/** + Override Libevent's behavior in the event of a fatal internal error. + + By default, Libevent will call exit(1) if a programming error makes it + impossible to continue correct operation. This function allows you to supply + another callback instead. Note that if the function is ever invoked, + something is wrong with your program, or with Libevent: any subsequent calls + to Libevent may result in undefined behavior. + + Libevent will (almost) always log an EVENT_LOG_ERR message before calling + this function; look at the last log message to see why Libevent has died. + */ +EVENT2_EXPORT_SYMBOL +void event_set_fatal_callback(event_fatal_cb cb); + +#define EVENT_DBG_ALL 0xffffffffu +#define EVENT_DBG_NONE 0 + +/** + Turn on debugging logs and have them sent to the default log handler. + + This is a global setting; if you are going to call it, you must call this + before any calls that create an event-base. You must call it before any + multithreaded use of Libevent. + + Debug logs are verbose. + + @param which Controls which debug messages are turned on. This option is + unused for now; for forward compatibility, you must pass in the constant + "EVENT_DBG_ALL" to turn debugging logs on, or "EVENT_DBG_NONE" to turn + debugging logs off. + */ +EVENT2_EXPORT_SYMBOL +void event_enable_debug_logging(ev_uint32_t which); + +/** + Associate a different event base with an event. + + The event to be associated must not be currently active or pending. + + @param eb the event base + @param ev the event + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int event_base_set(struct event_base *, struct event *); + +/** @name Loop flags + + These flags control the behavior of event_base_loop(). + */ +/**@{*/ +/** Block until we have an active event, then exit once all active events + * have had their callbacks run. */ +#define EVLOOP_ONCE 0x01 +/** Do not block: see which events are ready now, run the callbacks + * of the highest-priority ones, then exit. */ +#define EVLOOP_NONBLOCK 0x02 +/** Do not exit the loop because we have no pending events. Instead, keep + * running until event_base_loopexit() or event_base_loopbreak() makes us + * stop. + */ +#define EVLOOP_NO_EXIT_ON_EMPTY 0x04 +/**@}*/ + +/** + Wait for events to become active, and run their callbacks. + + This is a more flexible version of event_base_dispatch(). + + By default, this loop will run the event base until either there are no more + pending or active events, or until something calls event_base_loopbreak() or + event_base_loopexit(). You can override this behavior with the 'flags' + argument. + + @param eb the event_base structure returned by event_base_new() or + event_base_new_with_config() + @param flags any combination of EVLOOP_ONCE | EVLOOP_NONBLOCK + @return 0 if successful, -1 if an error occurred, or 1 if we exited because + no events were pending or active. + @see event_base_loopexit(), event_base_dispatch(), EVLOOP_ONCE, + EVLOOP_NONBLOCK + */ +EVENT2_EXPORT_SYMBOL +int event_base_loop(struct event_base *, int); + +/** + Exit the event loop after the specified time + + The next event_base_loop() iteration after the given timer expires will + complete normally (handling all queued events) then exit without + blocking for events again. + + Subsequent invocations of event_base_loop() will proceed normally. + + @param eb the event_base structure returned by event_init() + @param tv the amount of time after which the loop should terminate, + or NULL to exit after running all currently active events. + @return 0 if successful, or -1 if an error occurred + @see event_base_loopbreak() + */ +EVENT2_EXPORT_SYMBOL +int event_base_loopexit(struct event_base *, const struct timeval *); + +/** + Abort the active event_base_loop() immediately. + + event_base_loop() will abort the loop after the next event is completed; + event_base_loopbreak() is typically invoked from this event's callback. + This behavior is analogous to the "break;" statement. + + Subsequent invocations of event_base_loop() will proceed normally. + + @param eb the event_base structure returned by event_init() + @return 0 if successful, or -1 if an error occurred + @see event_base_loopexit() + */ +EVENT2_EXPORT_SYMBOL +int event_base_loopbreak(struct event_base *); + +/** + Tell the active event_base_loop() to scan for new events immediately. + + Calling this function makes the currently active event_base_loop() + start the loop over again (scanning for new events) after the current + event callback finishes. If the event loop is not running, this + function has no effect. + + event_base_loopbreak() is typically invoked from this event's callback. + This behavior is analogous to the "continue;" statement. + + Subsequent invocations of event loop will proceed normally. + + @param eb the event_base structure returned by event_init() + @return 0 if successful, or -1 if an error occurred + @see event_base_loopbreak() + */ +EVENT2_EXPORT_SYMBOL +int event_base_loopcontinue(struct event_base *); + +/** + Checks if the event loop was told to exit by event_base_loopexit(). + + This function will return true for an event_base at every point after + event_loopexit() is called, until the event loop is next entered. + + @param eb the event_base structure returned by event_init() + @return true if event_base_loopexit() was called on this event base, + or 0 otherwise + @see event_base_loopexit() + @see event_base_got_break() + */ +EVENT2_EXPORT_SYMBOL +int event_base_got_exit(struct event_base *); + +/** + Checks if the event loop was told to abort immediately by event_base_loopbreak(). + + This function will return true for an event_base at every point after + event_base_loopbreak() is called, until the event loop is next entered. + + @param eb the event_base structure returned by event_init() + @return true if event_base_loopbreak() was called on this event base, + or 0 otherwise + @see event_base_loopbreak() + @see event_base_got_exit() + */ +EVENT2_EXPORT_SYMBOL +int event_base_got_break(struct event_base *); + +/** + * @name event flags + * + * Flags to pass to event_new(), event_assign(), event_pending(), and + * anything else with an argument of the form "short events" + */ +/**@{*/ +/** Indicates that a timeout has occurred. It's not necessary to pass + * this flag to event_for new()/event_assign() to get a timeout. */ +#define EV_TIMEOUT 0x01 +/** Wait for a socket or FD to become readable */ +#define EV_READ 0x02 +/** Wait for a socket or FD to become writeable */ +#define EV_WRITE 0x04 +/** Wait for a POSIX signal to be raised*/ +#define EV_SIGNAL 0x08 +/** + * Persistent event: won't get removed automatically when activated. + * + * When a persistent event with a timeout becomes activated, its timeout + * is reset to 0. + */ +#define EV_PERSIST 0x10 +/** Select edge-triggered behavior, if supported by the backend. */ +#define EV_ET 0x20 +/** + * If this option is provided, then event_del() will not block in one thread + * while waiting for the event callback to complete in another thread. + * + * To use this option safely, you may need to use event_finalize() or + * event_free_finalize() in order to safely tear down an event in a + * multithreaded application. See those functions for more information. + **/ +#define EV_FINALIZE 0x40 +/** + * Detects connection close events. You can use this to detect when a + * connection has been closed, without having to read all the pending data + * from a connection. + * + * Not all backends support EV_CLOSED. To detect or require it, use the + * feature flag EV_FEATURE_EARLY_CLOSE. + **/ +#define EV_CLOSED 0x80 +/**@}*/ + +/** + @name evtimer_* macros + + Aliases for working with one-shot timer events + If you need EV_PERSIST timer use event_*() functions. + */ +/**@{*/ +#define evtimer_assign(ev, b, cb, arg) \ + event_assign((ev), (b), -1, 0, (cb), (arg)) +#define evtimer_new(b, cb, arg) event_new((b), -1, 0, (cb), (arg)) +#define evtimer_add(ev, tv) event_add((ev), (tv)) +#define evtimer_del(ev) event_del(ev) +#define evtimer_pending(ev, tv) event_pending((ev), EV_TIMEOUT, (tv)) +#define evtimer_initialized(ev) event_initialized(ev) +/**@}*/ + +/** + @name evsignal_* macros + + Aliases for working with signal events + */ +/**@{*/ +#define evsignal_add(ev, tv) event_add((ev), (tv)) +#define evsignal_assign(ev, b, x, cb, arg) \ + event_assign((ev), (b), (x), EV_SIGNAL|EV_PERSIST, cb, (arg)) +#define evsignal_new(b, x, cb, arg) \ + event_new((b), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg)) +#define evsignal_del(ev) event_del(ev) +#define evsignal_pending(ev, tv) event_pending((ev), EV_SIGNAL, (tv)) +#define evsignal_initialized(ev) event_initialized(ev) +/**@}*/ + +/** + @name evuser_* macros + + Aliases for working with user-triggered events + If you need EV_PERSIST event use event_*() functions. + */ +/**@{*/ +#define evuser_new(b, cb, arg) event_new((b), -1, 0, (cb), (arg)) +#define evuser_del(ev) event_del(ev) +#define evuser_pending(ev, tv) event_pending((ev), 0, (tv)) +#define evuser_initialized(ev) event_initialized(ev) +#define evuser_trigger(ev) event_active((ev), 0, 0) +/**@}*/ + +/** + A callback function for an event. + + It receives three arguments: + + @param fd An fd or signal + @param events One or more EV_* flags + @param arg A user-supplied argument. + + @see event_new() + */ +typedef void (*event_callback_fn)(evutil_socket_t, short, void *); + +/** + Return a value used to specify that the event itself must be used as the callback argument. + + The function event_new() takes a callback argument which is passed + to the event's callback function. To specify that the argument to be + passed to the callback function is the event that event_new() returns, + pass in the return value of event_self_cbarg() as the callback argument + for event_new(). + + For example: +
+      struct event *ev = event_new(base, sock, events, callback, %event_self_cbarg());
+  
+ + For consistency with event_new(), it is possible to pass the return value + of this function as the callback argument for event_assign() – this + achieves the same result as passing the event in directly. + + @return a value to be passed as the callback argument to event_new() or + event_assign(). + @see event_new(), event_assign() + */ +EVENT2_EXPORT_SYMBOL +void *event_self_cbarg(void); + +/** + Allocate and assign a new event structure, ready to be added. + + The function event_new() returns a new event that can be used in + future calls to event_add() and event_del(). The fd and events + arguments determine which conditions will trigger the event; the + callback and callback_arg arguments tell Libevent what to do when the + event becomes active. + + If events contains one of EV_READ, EV_WRITE, or EV_READ|EV_WRITE, then + fd is a file descriptor or socket that should get monitored for + readiness to read, readiness to write, or readiness for either operation + (respectively). If events contains EV_SIGNAL, then fd is a signal + number to wait for. If events contains none of those flags, then the + event can be triggered only by a timeout or by manual activation with + event_active(): In this case, fd must be -1. + + The EV_PERSIST flag can also be passed in the events argument: it makes + event_add() persistent until event_del() is called. + + The EV_ET flag is compatible with EV_READ and EV_WRITE, and supported + only by certain backends. It tells Libevent to use edge-triggered + events. + + The EV_TIMEOUT flag has no effect here. + + It is okay to have multiple events all listening on the same fds; but + they must either all be edge-triggered, or all not be edge triggered. + + When the event becomes active, the event loop will run the provided + callback function, with three arguments. The first will be the provided + fd value. The second will be a bitfield of the events that triggered: + EV_READ, EV_WRITE, or EV_SIGNAL. Here the EV_TIMEOUT flag indicates + that a timeout occurred, and EV_ET indicates that an edge-triggered + event occurred. The third event will be the callback_arg pointer that + you provide. + + @param base the event base to which the event should be attached. + @param fd the file descriptor or signal to be monitored, or -1. + @param events desired events to monitor: bitfield of EV_READ, EV_WRITE, + EV_SIGNAL, EV_PERSIST, EV_ET. + @param callback callback function to be invoked when the event occurs + @param callback_arg an argument to be passed to the callback function + + @return a newly allocated struct event that must later be freed with + event_free() or NULL if an error occurred. + @see event_free(), event_add(), event_del(), event_assign() + */ +EVENT2_EXPORT_SYMBOL +struct event *event_new(struct event_base *, evutil_socket_t, short, event_callback_fn, void *); + + +/** + Prepare a new, already-allocated event structure to be added. + + The function event_assign() prepares the event structure ev to be used + in future calls to event_add() and event_del(). Unlike event_new(), it + doesn't allocate memory itself: it requires that you have already + allocated a struct event, probably on the heap. Doing this will + typically make your code depend on the size of the event structure, and + thereby create incompatibility with future versions of Libevent. + + The easiest way to avoid this problem is just to use event_new() and + event_free() instead. + + A slightly harder way to future-proof your code is to use + event_get_struct_event_size() to determine the required size of an event + at runtime. + + Note that it is NOT safe to call this function on an event that is + active or pending. Doing so WILL corrupt internal data structures in + Libevent, and lead to strange, hard-to-diagnose bugs. You _can_ use + event_assign to change an existing event, but only if it is not active + or pending! + + The arguments for this function, and the behavior of the events that it + makes, are as for event_new(). + + @param ev an event struct to be modified + @param base the event base to which ev should be attached. + @param fd the file descriptor to be monitored + @param events desired events to monitor; can be EV_READ and/or EV_WRITE + @param callback callback function to be invoked when the event occurs + @param callback_arg an argument to be passed to the callback function + + @return 0 if success, or -1 on invalid arguments. + + @see event_new(), event_add(), event_del(), event_base_once(), + event_get_struct_event_size() + */ +EVENT2_EXPORT_SYMBOL +int event_assign(struct event *, struct event_base *, evutil_socket_t, short, event_callback_fn, void *); + +/** + Deallocate a struct event * returned by event_new(). + + If the event is pending or active, this function makes it non-pending + and non-active first. + */ +EVENT2_EXPORT_SYMBOL +void event_free(struct event *); + +/** + * Callback type for event_finalize and event_free_finalize(). + **/ +typedef void (*event_finalize_callback_fn)(struct event *, void *); +/** + @name Finalization functions + + These functions are used to safely tear down an event in a multithreaded + application. If you construct your events with EV_FINALIZE to avoid + deadlocks, you will need a way to remove an event in the certainty that + it will definitely not be running its callback when you deallocate it + and its callback argument. + + To do this, call one of event_finalize() or event_free_finalize with + 0 for its first argument, the event to tear down as its second argument, + and a callback function as its third argument. The callback will be + invoked as part of the event loop, with the event's priority. + + After you call a finalizer function, event_add() and event_active() will + no longer work on the event, and event_del() will produce a no-op. You + must not try to change the event's fields with event_assign() or + event_set() while the finalize callback is in progress. Once the + callback has been invoked, you should treat the event structure as + containing uninitialized memory. + + The event_free_finalize() function frees the event after it's finalized; + event_finalize() does not. + + A finalizer callback must not make events pending or active. It must not + add events, activate events, or attempt to "resuscitate" the event being + finalized in any way. + + @return 0 on success, -1 on failure. + */ +/**@{*/ +EVENT2_EXPORT_SYMBOL +int event_finalize(unsigned, struct event *, event_finalize_callback_fn); +EVENT2_EXPORT_SYMBOL +int event_free_finalize(unsigned, struct event *, event_finalize_callback_fn); +/**@}*/ + +/** + Schedule a one-time event + + The function event_base_once() is similar to event_new(). However, it + schedules a callback to be called exactly once, and does not require the + caller to prepare an event structure. + + Note that in Libevent 2.0 and earlier, if the event is never triggered, the + internal memory used to hold it will never be freed. In Libevent 2.1, + the internal memory will get freed by event_base_free() if the event + is never triggered. The 'arg' value, however, will not get freed in either + case--you'll need to free that on your own if you want it to go away. + + @param base an event_base + @param fd a file descriptor to monitor, or -1 for no fd. + @param events event(s) to monitor; can be any of EV_READ | + EV_WRITE, or EV_TIMEOUT + @param callback callback function to be invoked when the event occurs + @param arg an argument to be passed to the callback function + @param timeout the maximum amount of time to wait for the event. NULL + makes an EV_READ/EV_WRITE event make forever; NULL makes an + EV_TIMEOUT event success immediately. + @return 0 if successful, or -1 if an error occurred + */ +EVENT2_EXPORT_SYMBOL +int event_base_once(struct event_base *, evutil_socket_t, short, event_callback_fn, void *, const struct timeval *); + +/** + Add an event to the set of pending events. + + The function event_add() schedules the execution of the event 'ev' when the + condition specified by event_assign() or event_new() occurs, or when the time + specified in timeout has elapsed. If a timeout is NULL, no timeout + occurs and the function will only be + called if a matching event occurs. The event in the + ev argument must be already initialized by event_assign() or event_new() + and may not be used + in calls to event_assign() until it is no longer pending. + + If the event in the ev argument already has a scheduled timeout, calling + event_add() replaces the old timeout with the new one if tv is non-NULL. + + @param ev an event struct initialized via event_assign() or event_new() + @param timeout the maximum amount of time to wait for the event, or NULL + to wait forever + @return 0 if successful, or -1 if an error occurred + @see event_del(), event_assign(), event_new() + */ +EVENT2_EXPORT_SYMBOL +int event_add(struct event *ev, const struct timeval *timeout); + +/** + Remove a timer from a pending event without removing the event itself. + + If the event has a scheduled timeout, this function unschedules it but + leaves the event otherwise pending. + + @param ev an event struct initialized via event_assign() or event_new() + @return 0 on success, or -1 if an error occurred. +*/ +EVENT2_EXPORT_SYMBOL +int event_remove_timer(struct event *ev); + +/** + Remove an event from the set of monitored events. + + The function event_del() will cancel the event in the argument ev. If the + event has already executed or has never been added the call will have no + effect. + + @param ev an event struct to be removed from the working set + @return 0 if successful, or -1 if an error occurred + @see event_add() + */ +EVENT2_EXPORT_SYMBOL +int event_del(struct event *); + +/** + As event_del(), but never blocks while the event's callback is running + in another thread, even if the event was constructed without the + EV_FINALIZE flag. + */ +EVENT2_EXPORT_SYMBOL +int event_del_noblock(struct event *ev); +/** + As event_del(), but always blocks while the event's callback is running + in another thread, even if the event was constructed with the + EV_FINALIZE flag. + */ +EVENT2_EXPORT_SYMBOL +int event_del_block(struct event *ev); + +/** + Make an event active. + + You can use this function on a pending or a non-pending event to make it + active, so that its callback will be run by event_base_dispatch() or + event_base_loop(). + + One common use in multithreaded programs is to wake the thread running + event_base_loop() from another thread. + + @param ev an event to make active. + @param res a set of flags to pass to the event's callback. + @param ncalls an obsolete argument: this is ignored. + **/ +EVENT2_EXPORT_SYMBOL +void event_active(struct event *ev, int res, short ncalls); + +/** + Checks if a specific event is pending or scheduled. + + @param ev an event struct previously passed to event_add() + @param events the requested event type; any of EV_TIMEOUT|EV_READ| + EV_WRITE|EV_SIGNAL + @param tv if this field is not NULL, and the event has a timeout, + this field is set to hold the time at which the timeout will + expire. + + @return true if the event is pending on any of the events in 'what', (that + is to say, it has been added), or 0 if the event is not added. + */ +EVENT2_EXPORT_SYMBOL +int event_pending(const struct event *ev, short events, struct timeval *tv); + +/** + If called from within the callback for an event, returns that event. + + The behavior of this function is not defined when called from outside the + callback function for an event. + */ +EVENT2_EXPORT_SYMBOL +struct event *event_base_get_running_event(struct event_base *base); + +/** + Test if an event structure might be initialized. + + The event_initialized() function can be used to check if an event has been + initialized. + + Warning: This function is only useful for distinguishing a zeroed-out + piece of memory from an initialized event, it can easily be confused by + uninitialized memory. Thus, it should ONLY be used to distinguish an + initialized event from zero. + + @param ev an event structure to be tested + @return 1 if the structure might be initialized, or 0 if it has not been + initialized + */ +EVENT2_EXPORT_SYMBOL +int event_initialized(const struct event *ev); + +/** + Get the signal number assigned to a signal event +*/ +#define event_get_signal(ev) ((int)event_get_fd(ev)) + +/** + Get the socket or signal assigned to an event, or -1 if the event has + no socket. +*/ +EVENT2_EXPORT_SYMBOL +evutil_socket_t event_get_fd(const struct event *ev); + +/** + Get the event_base associated with an event. +*/ +EVENT2_EXPORT_SYMBOL +struct event_base *event_get_base(const struct event *ev); + +/** + Return the events (EV_READ, EV_WRITE, etc) assigned to an event. +*/ +EVENT2_EXPORT_SYMBOL +short event_get_events(const struct event *ev); + +/** + Return the callback assigned to an event. +*/ +EVENT2_EXPORT_SYMBOL +event_callback_fn event_get_callback(const struct event *ev); + +/** + Return the callback argument assigned to an event. +*/ +EVENT2_EXPORT_SYMBOL +void *event_get_callback_arg(const struct event *ev); + +/** + Return the priority of an event. + @see event_priority_init(), event_get_priority() +*/ +EVENT2_EXPORT_SYMBOL +int event_get_priority(const struct event *ev); + +/** + Extract _all_ of arguments given to construct a given event. The + event_base is copied into *base_out, the fd is copied into *fd_out, and so + on. + + If any of the "_out" arguments is NULL, it will be ignored. + */ +EVENT2_EXPORT_SYMBOL +void event_get_assignment(const struct event *event, + struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, + event_callback_fn *callback_out, void **arg_out); + +/** + Return the size of struct event that the Libevent library was compiled + with. + + This will be NO GREATER than sizeof(struct event) if you're running with + the same version of Libevent that your application was built with, but + otherwise might not. + + Note that it might be SMALLER than sizeof(struct event) if some future + version of Libevent adds extra padding to the end of struct event. + We might do this to help ensure ABI-compatibility between different + versions of Libevent. + */ +EVENT2_EXPORT_SYMBOL +size_t event_get_struct_event_size(void); + +/** + Get the Libevent version. + + Note that this will give you the version of the library that you're + currently linked against, not the version of the headers that you've + compiled against. + + @return a string containing the version number of Libevent +*/ +EVENT2_EXPORT_SYMBOL +const char *event_get_version(void); + +/** + Return a numeric representation of Libevent's version. + + Note that this will give you the version of the library that you're + currently linked against, not the version of the headers you've used to + compile. + + The format uses one byte each for the major, minor, and patchlevel parts of + the version number. The low-order byte is unused. For example, version + 2.0.1-alpha has a numeric representation of 0x02000100 +*/ +EVENT2_EXPORT_SYMBOL +ev_uint32_t event_get_version_number(void); + +/** As event_get_version, but gives the version of Libevent's headers. */ +#define LIBEVENT_VERSION EVENT__VERSION +/** As event_get_version_number, but gives the version number of Libevent's + * headers. */ +#define LIBEVENT_VERSION_NUMBER EVENT__NUMERIC_VERSION + +/** Largest number of priorities that Libevent can support. */ +#define EVENT_MAX_PRIORITIES 256 +/** + Set the number of different event priorities + + By default Libevent schedules all active events with the same priority. + However, some time it is desirable to process some events with a higher + priority than others. For that reason, Libevent supports strict priority + queues. Active events with a lower priority are always processed before + events with a higher priority. + + The number of different priorities can be set initially with the + event_base_priority_init() function. This function should be called + before the first call to event_base_dispatch(). The + event_priority_set() function can be used to assign a priority to an + event. By default, Libevent assigns the middle priority to all events + unless their priority is explicitly set. + + Note that urgent-priority events can starve less-urgent events: after + running all urgent-priority callbacks, Libevent checks for more urgent + events again, before running less-urgent events. Less-urgent events + will not have their callbacks run until there are no events more urgent + than them that want to be active. + + @param eb the event_base structure returned by event_base_new() + @param npriorities the maximum number of priorities + @return 0 if successful, or -1 if an error occurred + @see event_priority_set() + */ +EVENT2_EXPORT_SYMBOL +int event_base_priority_init(struct event_base *, int); + +/** + Get the number of different event priorities. + + @param eb the event_base structure returned by event_base_new() + @return Number of different event priorities + @see event_base_priority_init() +*/ +EVENT2_EXPORT_SYMBOL +int event_base_get_npriorities(struct event_base *eb); + +/** + Assign a priority to an event. + + @param ev an event struct + @param priority the new priority to be assigned + @return 0 if successful, or -1 if an error occurred + @see event_priority_init(), event_get_priority() + */ +EVENT2_EXPORT_SYMBOL +int event_priority_set(struct event *, int); + +/** + Prepare an event_base to use a large number of timeouts with the same + duration. + + Libevent's default scheduling algorithm is optimized for having a large + number of timeouts with their durations more or less randomly + distributed. But if you have a large number of timeouts that all have + the same duration (for example, if you have a large number of + connections that all have a 10-second timeout), then you can improve + Libevent's performance by telling Libevent about it. + + To do this, call this function with the common duration. It will return a + pointer to a different, opaque timeout value. (Don't depend on its actual + contents!) When you use this timeout value in event_add(), Libevent will + schedule the event more efficiently. + + (This optimization probably will not be worthwhile until you have thousands + or tens of thousands of events with the same timeout.) + */ +EVENT2_EXPORT_SYMBOL +const struct timeval *event_base_init_common_timeout(struct event_base *base, + const struct timeval *duration); + +#if !defined(EVENT__DISABLE_MM_REPLACEMENT) || defined(EVENT_IN_DOXYGEN_) +/** + Override the functions that Libevent uses for memory management. + + Usually, Libevent uses the standard libc functions malloc, realloc, and + free to allocate memory. Passing replacements for those functions to + event_set_mem_functions() overrides this behavior. + + Note that all memory returned from Libevent will be allocated by the + replacement functions rather than by malloc() and realloc(). Thus, if you + have replaced those functions, it will not be appropriate to free() memory + that you get from Libevent. Instead, you must use the free_fn replacement + that you provided. + + Note also that if you are going to call this function, you should do so + before any call to any Libevent function that does allocation. + Otherwise, those functions will allocate their memory using malloc(), but + then later free it using your provided free_fn. + + @param malloc_fn A replacement for malloc. + @param realloc_fn A replacement for realloc + @param free_fn A replacement for free. + **/ +EVENT2_EXPORT_SYMBOL +void event_set_mem_functions( + void *(*malloc_fn)(size_t sz), + void *(*realloc_fn)(void *ptr, size_t sz), + void (*free_fn)(void *ptr)); +/** This definition is present if Libevent was built with support for + event_set_mem_functions() */ +#define EVENT_SET_MEM_FUNCTIONS_IMPLEMENTED +#endif + +/** + Writes a human-readable description of all inserted and/or active + events to a provided stdio stream. + + This is intended for debugging; its format is not guaranteed to be the same + between libevent versions. + + @param base An event_base on which to scan the events. + @param output A stdio file to write on. + */ +EVENT2_EXPORT_SYMBOL +void event_base_dump_events(struct event_base *, FILE *); + + +/** + Activates all pending events for the given fd and event mask. + + This function activates pending events only. Events which have not been + added will not become active. + + @param base the event_base on which to activate the events. + @param fd An fd to active events on. + @param events One or more of EV_{READ,WRITE,TIMEOUT}. + */ +EVENT2_EXPORT_SYMBOL +void event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events); + +/** + Activates all pending signals with a given signal number + + This function activates pending events only. Events which have not been + added will not become active. + + @param base the event_base on which to activate the events. + @param fd The signal to active events on. + */ +EVENT2_EXPORT_SYMBOL +void event_base_active_by_signal(struct event_base *base, int sig); + +/** + * Callback for iterating events in an event base via event_base_foreach_event + */ +typedef int (*event_base_foreach_event_cb)(const struct event_base *, const struct event *, void *); + +/** + Iterate over all added or active events events in an event loop, and invoke + a given callback on each one. + + The callback must not call any function that modifies the event base, that + modifies any event in the event base, or that adds or removes any event to + the event base. Doing so is unsupported and will lead to undefined + behavior -- likely, to crashes. + + event_base_foreach_event() holds a lock on the event_base() for the whole + time it's running: slow callbacks are not advisable. + + Note that Libevent adds some events of its own to make pieces of its + functionality work. You must not assume that the only events you'll + encounter will be the ones you added yourself. + + The callback function must return 0 to continue iteration, or some other + integer to stop iterating. + + @param base An event_base on which to scan the events. + @param fn A callback function to receive the events. + @param arg An argument passed to the callback function. + @return 0 if we iterated over every event, or the value returned by the + callback function if the loop exited early. +*/ +EVENT2_EXPORT_SYMBOL +int event_base_foreach_event(struct event_base *base, event_base_foreach_event_cb fn, void *arg); + + +/** Sets 'tv' to the current time (as returned by gettimeofday()), + looking at the cached value in 'base' if possible, and calling + gettimeofday() or clock_gettime() as appropriate if there is no + cached time. + + Generally, this value will only be cached while actually + processing event callbacks, and may be very inaccurate if your + callbacks take a long time to execute. + + Returns 0 on success, negative on failure. + */ +EVENT2_EXPORT_SYMBOL +int event_base_gettimeofday_cached(struct event_base *base, + struct timeval *tv); + +/** Update cached_tv in the 'base' to the current time + * + * You can use this function is useful for selectively increasing + * the accuracy of the cached time value in 'base' during callbacks + * that take a long time to execute. + * + * This function has no effect if the base is currently not in its + * event loop, or if timeval caching is disabled via + * EVENT_BASE_FLAG_NO_CACHE_TIME. + * + * @return 0 on success, -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int event_base_update_cache_time(struct event_base *base); + +/** Release up all globally-allocated resources allocated by Libevent. + + This function does not free developer-controlled resources like + event_bases, events, bufferevents, listeners, and so on. It only releases + resources like global locks that there is no other way to free. + + It is not actually necessary to call this function before exit: every + resource that it frees would be released anyway on exit. It mainly exists + so that resource-leak debugging tools don't see Libevent as holding + resources at exit. + + You should only call this function when no other Libevent functions will + be invoked -- e.g., when cleanly exiting a program. + */ +EVENT2_EXPORT_SYMBOL +void libevent_global_shutdown(void); + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_EVENT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/event_compat.h b/probe-busybox/libevent-2.1.11-stable/include/event2/event_compat.h new file mode 100644 index 00000000..5110175a --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/event_compat.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_EVENT_COMPAT_H_INCLUDED_ +#define EVENT2_EVENT_COMPAT_H_INCLUDED_ + +/** @file event2/event_compat.h + + Potentially non-threadsafe versions of the functions in event.h: provided + only for backwards compatibility. + + In the oldest versions of Libevent, event_base was not a first-class + structure. Instead, there was a single event base that every function + manipulated. Later, when separate event bases were added, the old functions + that didn't take an event_base argument needed to work by manipulating the + "current" event base. This could lead to thread-safety issues, and obscure, + hard-to-diagnose bugs. + + @deprecated All functions in this file are by definition deprecated. + */ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +/* For int types. */ +#include + +/** + Initialize the event API. + + The event API needs to be initialized with event_init() before it can be + used. Sets the global current base that gets used for events that have no + base associated with them. + + @deprecated This function is deprecated because it replaces the "current" + event_base, and is totally unsafe for multithreaded use. The replacement + is event_base_new(). + + @see event_base_set(), event_base_new() + */ +EVENT2_EXPORT_SYMBOL +struct event_base *event_init(void); + +/** + Loop to process events. + + Like event_base_dispatch(), but uses the "current" base. + + @deprecated This function is deprecated because it is easily confused by + multiple calls to event_init(), and because it is not safe for + multithreaded use. The replacement is event_base_dispatch(). + + @see event_base_dispatch(), event_init() + */ +EVENT2_EXPORT_SYMBOL +int event_dispatch(void); + +/** + Handle events. + + This function behaves like event_base_loop(), but uses the "current" base + + @deprecated This function is deprecated because it uses the event base from + the last call to event_init, and is therefore not safe for multithreaded + use. The replacement is event_base_loop(). + + @see event_base_loop(), event_init() +*/ +EVENT2_EXPORT_SYMBOL +int event_loop(int); + + +/** + Exit the event loop after the specified time. + + This function behaves like event_base_loopexit(), except that it uses the + "current" base. + + @deprecated This function is deprecated because it uses the event base from + the last call to event_init, and is therefore not safe for multithreaded + use. The replacement is event_base_loopexit(). + + @see event_init, event_base_loopexit() + */ +EVENT2_EXPORT_SYMBOL +int event_loopexit(const struct timeval *); + + +/** + Abort the active event_loop() immediately. + + This function behaves like event_base_loopbreakt(), except that it uses the + "current" base. + + @deprecated This function is deprecated because it uses the event base from + the last call to event_init, and is therefore not safe for multithreaded + use. The replacement is event_base_loopbreak(). + + @see event_base_loopbreak(), event_init() + */ +EVENT2_EXPORT_SYMBOL +int event_loopbreak(void); + +/** + Schedule a one-time event to occur. + + @deprecated This function is obsolete, and has been replaced by + event_base_once(). Its use is deprecated because it relies on the + "current" base configured by event_init(). + + @see event_base_once() + */ +EVENT2_EXPORT_SYMBOL +int event_once(evutil_socket_t , short, + void (*)(evutil_socket_t, short, void *), void *, const struct timeval *); + + +/** + Get the kernel event notification mechanism used by Libevent. + + @deprecated This function is obsolete, and has been replaced by + event_base_get_method(). Its use is deprecated because it relies on the + "current" base configured by event_init(). + + @see event_base_get_method() + */ +EVENT2_EXPORT_SYMBOL +const char *event_get_method(void); + + +/** + Set the number of different event priorities. + + @deprecated This function is deprecated because it is easily confused by + multiple calls to event_init(), and because it is not safe for + multithreaded use. The replacement is event_base_priority_init(). + + @see event_base_priority_init() + */ +EVENT2_EXPORT_SYMBOL +int event_priority_init(int); + +/** + Prepare an event structure to be added. + + @deprecated event_set() is not recommended for new code, because it requires + a subsequent call to event_base_set() to be safe under most circumstances. + Use event_assign() or event_new() instead. + */ +EVENT2_EXPORT_SYMBOL +void event_set(struct event *, evutil_socket_t, short, void (*)(evutil_socket_t, short, void *), void *); + +#define evtimer_set(ev, cb, arg) event_set((ev), -1, 0, (cb), (arg)) +#define evsignal_set(ev, x, cb, arg) \ + event_set((ev), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg)) + + +/** + @name timeout_* macros + + @deprecated These macros are deprecated because their naming is inconsistent + with the rest of Libevent. Use the evtimer_* macros instead. + @{ + */ +#define timeout_add(ev, tv) event_add((ev), (tv)) +#define timeout_set(ev, cb, arg) event_set((ev), -1, 0, (cb), (arg)) +#define timeout_del(ev) event_del(ev) +#define timeout_pending(ev, tv) event_pending((ev), EV_TIMEOUT, (tv)) +#define timeout_initialized(ev) event_initialized(ev) +/**@}*/ + +/** + @name signal_* macros + + @deprecated These macros are deprecated because their naming is inconsistent + with the rest of Libevent. Use the evsignal_* macros instead. + @{ + */ +#define signal_add(ev, tv) event_add((ev), (tv)) +#define signal_set(ev, x, cb, arg) \ + event_set((ev), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg)) +#define signal_del(ev) event_del(ev) +#define signal_pending(ev, tv) event_pending((ev), EV_SIGNAL, (tv)) +#define signal_initialized(ev) event_initialized(ev) +/**@}*/ + +#ifndef EVENT_FD +/* These macros are obsolete; use event_get_fd and event_get_signal instead. */ +#define EVENT_FD(ev) ((int)event_get_fd(ev)) +#define EVENT_SIGNAL(ev) event_get_signal(ev) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_EVENT_COMPAT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/event_struct.h b/probe-busybox/libevent-2.1.11-stable/include/event2/event_struct.h new file mode 100644 index 00000000..1c8b71b6 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/event_struct.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_EVENT_STRUCT_H_INCLUDED_ +#define EVENT2_EVENT_STRUCT_H_INCLUDED_ + +/** @file event2/event_struct.h + + Structures used by event.h. Using these structures directly WILL harm + forward compatibility: be careful. + + No field declared in this file should be used directly in user code. Except + for historical reasons, these fields would not be exposed at all. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +/* For int types. */ +#include + +/* For evkeyvalq */ +#include + +#define EVLIST_TIMEOUT 0x01 +#define EVLIST_INSERTED 0x02 +#define EVLIST_SIGNAL 0x04 +#define EVLIST_ACTIVE 0x08 +#define EVLIST_INTERNAL 0x10 +#define EVLIST_ACTIVE_LATER 0x20 +#define EVLIST_FINALIZING 0x40 +#define EVLIST_INIT 0x80 + +#define EVLIST_ALL 0xff + +/* Fix so that people don't have to run with */ +#ifndef TAILQ_ENTRY +#define EVENT_DEFINED_TQENTRY_ +#define TAILQ_ENTRY(type) \ +struct { \ + struct type *tqe_next; /* next element */ \ + struct type **tqe_prev; /* address of previous next element */ \ +} +#endif /* !TAILQ_ENTRY */ + +#ifndef TAILQ_HEAD +#define EVENT_DEFINED_TQHEAD_ +#define TAILQ_HEAD(name, type) \ +struct name { \ + struct type *tqh_first; \ + struct type **tqh_last; \ +} +#endif + +/* Fix so that people don't have to run with */ +#ifndef LIST_ENTRY +#define EVENT_DEFINED_LISTENTRY_ +#define LIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} +#endif /* !LIST_ENTRY */ + +#ifndef LIST_HEAD +#define EVENT_DEFINED_LISTHEAD_ +#define LIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ + } +#endif /* !LIST_HEAD */ + +struct event; + +struct event_callback { + TAILQ_ENTRY(event_callback) evcb_active_next; + short evcb_flags; + ev_uint8_t evcb_pri; /* smaller numbers are higher priority */ + ev_uint8_t evcb_closure; + /* allows us to adopt for different types of events */ + union { + void (*evcb_callback)(evutil_socket_t, short, void *); + void (*evcb_selfcb)(struct event_callback *, void *); + void (*evcb_evfinalize)(struct event *, void *); + void (*evcb_cbfinalize)(struct event_callback *, void *); + } evcb_cb_union; + void *evcb_arg; +}; + +struct event_base; +struct event { + struct event_callback ev_evcallback; + + /* for managing timeouts */ + union { + TAILQ_ENTRY(event) ev_next_with_common_timeout; + int min_heap_idx; + } ev_timeout_pos; + evutil_socket_t ev_fd; + + struct event_base *ev_base; + + union { + /* used for io events */ + struct { + LIST_ENTRY (event) ev_io_next; + struct timeval ev_timeout; + } ev_io; + + /* used by signal events */ + struct { + LIST_ENTRY (event) ev_signal_next; + short ev_ncalls; + /* Allows deletes in callback */ + short *ev_pncalls; + } ev_signal; + } ev_; + + short ev_events; + short ev_res; /* result passed to event callback */ + struct timeval ev_timeout; +}; + +TAILQ_HEAD (event_list, event); + +#ifdef EVENT_DEFINED_TQENTRY_ +#undef TAILQ_ENTRY +#endif + +#ifdef EVENT_DEFINED_TQHEAD_ +#undef TAILQ_HEAD +#endif + +LIST_HEAD (event_dlist, event); + +#ifdef EVENT_DEFINED_LISTENTRY_ +#undef LIST_ENTRY +#endif + +#ifdef EVENT_DEFINED_LISTHEAD_ +#undef LIST_HEAD +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_EVENT_STRUCT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/http.h b/probe-busybox/libevent-2.1.11-stable/include/event2/http.h new file mode 100644 index 00000000..2a41303e --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/http.h @@ -0,0 +1,1192 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_HTTP_H_INCLUDED_ +#define EVENT2_HTTP_H_INCLUDED_ + +/* For int types. */ +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* In case we haven't included the right headers yet. */ +struct evbuffer; +struct event_base; +struct bufferevent; +struct evhttp_connection; + +/** @file event2/http.h + * + * Basic support for HTTP serving. + * + * As Libevent is a library for dealing with event notification and most + * interesting applications are networked today, I have often found the + * need to write HTTP code. The following prototypes and definitions provide + * an application with a minimal interface for making HTTP requests and for + * creating a very simple HTTP server. + */ + +/* Response codes */ +#define HTTP_OK 200 /**< request completed ok */ +#define HTTP_NOCONTENT 204 /**< request does not have content */ +#define HTTP_MOVEPERM 301 /**< the uri moved permanently */ +#define HTTP_MOVETEMP 302 /**< the uri moved temporarily */ +#define HTTP_NOTMODIFIED 304 /**< page was not modified from last */ +#define HTTP_BADREQUEST 400 /**< invalid http request was made */ +#define HTTP_NOTFOUND 404 /**< could not find content for uri */ +#define HTTP_BADMETHOD 405 /**< method not allowed for this uri */ +#define HTTP_ENTITYTOOLARGE 413 /**< */ +#define HTTP_EXPECTATIONFAILED 417 /**< we can't handle this expectation */ +#define HTTP_INTERNAL 500 /**< internal error */ +#define HTTP_NOTIMPLEMENTED 501 /**< not implemented */ +#define HTTP_SERVUNAVAIL 503 /**< the server is not available */ + +struct evhttp; +struct evhttp_request; +struct evkeyvalq; +struct evhttp_bound_socket; +struct evconnlistener; +struct evdns_base; + +/** + * Create a new HTTP server. + * + * @param base (optional) the event base to receive the HTTP events + * @return a pointer to a newly initialized evhttp server structure or NULL + * on error + * @see evhttp_free() + */ +EVENT2_EXPORT_SYMBOL +struct evhttp *evhttp_new(struct event_base *base); + +/** + * Binds an HTTP server on the specified address and port. + * + * Can be called multiple times to bind the same http server + * to multiple different ports. + * + * @param http a pointer to an evhttp object + * @param address a string containing the IP address to listen(2) on + * @param port the port number to listen on + * @return 0 on success, -1 on failure. + * @see evhttp_accept_socket() + */ +EVENT2_EXPORT_SYMBOL +int evhttp_bind_socket(struct evhttp *http, const char *address, ev_uint16_t port); + +/** + * Like evhttp_bind_socket(), but returns a handle for referencing the socket. + * + * The returned pointer is not valid after \a http is freed. + * + * @param http a pointer to an evhttp object + * @param address a string containing the IP address to listen(2) on + * @param port the port number to listen on + * @return Handle for the socket on success, NULL on failure. + * @see evhttp_bind_socket(), evhttp_del_accept_socket() + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_bound_socket *evhttp_bind_socket_with_handle(struct evhttp *http, const char *address, ev_uint16_t port); + +/** + * Makes an HTTP server accept connections on the specified socket. + * + * This may be useful to create a socket and then fork multiple instances + * of an http server, or when a socket has been communicated via file + * descriptor passing in situations where an http servers does not have + * permissions to bind to a low-numbered port. + * + * Can be called multiple times to have the http server listen to + * multiple different sockets. + * + * @param http a pointer to an evhttp object + * @param fd a socket fd that is ready for accepting connections + * @return 0 on success, -1 on failure. + * @see evhttp_bind_socket() + */ +EVENT2_EXPORT_SYMBOL +int evhttp_accept_socket(struct evhttp *http, evutil_socket_t fd); + +/** + * Like evhttp_accept_socket(), but returns a handle for referencing the socket. + * + * The returned pointer is not valid after \a http is freed. + * + * @param http a pointer to an evhttp object + * @param fd a socket fd that is ready for accepting connections + * @return Handle for the socket on success, NULL on failure. + * @see evhttp_accept_socket(), evhttp_del_accept_socket() + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_bound_socket *evhttp_accept_socket_with_handle(struct evhttp *http, evutil_socket_t fd); + +/** + * The most low-level evhttp_bind/accept method: takes an evconnlistener, and + * returns an evhttp_bound_socket. The listener will be freed when the bound + * socket is freed. + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_bound_socket *evhttp_bind_listener(struct evhttp *http, struct evconnlistener *listener); + +/** + * Return the listener used to implement a bound socket. + */ +EVENT2_EXPORT_SYMBOL +struct evconnlistener *evhttp_bound_socket_get_listener(struct evhttp_bound_socket *bound); + +typedef void evhttp_bound_socket_foreach_fn(struct evhttp_bound_socket *, void *); +/** + * Applies the function specified in the first argument to all + * evhttp_bound_sockets associated with "http". The user must not + * attempt to free or remove any connections, sockets or listeners + * in the callback "function". + * + * @param http pointer to an evhttp object + * @param function function to apply to every bound socket + * @param argument pointer value passed to function for every socket iterated + */ +EVENT2_EXPORT_SYMBOL +void evhttp_foreach_bound_socket(struct evhttp *http, evhttp_bound_socket_foreach_fn *function, void *argument); + +/** + * Makes an HTTP server stop accepting connections on the specified socket + * + * This may be useful when a socket has been sent via file descriptor passing + * and is no longer needed by the current process. + * + * If you created this bound socket with evhttp_bind_socket_with_handle or + * evhttp_accept_socket_with_handle, this function closes the fd you provided. + * If you created this bound socket with evhttp_bind_listener, this function + * frees the listener you provided. + * + * \a bound_socket is an invalid pointer after this call returns. + * + * @param http a pointer to an evhttp object + * @param bound_socket a handle returned by evhttp_{bind,accept}_socket_with_handle + * @see evhttp_bind_socket_with_handle(), evhttp_accept_socket_with_handle() + */ +EVENT2_EXPORT_SYMBOL +void evhttp_del_accept_socket(struct evhttp *http, struct evhttp_bound_socket *bound_socket); + +/** + * Get the raw file descriptor referenced by an evhttp_bound_socket. + * + * @param bound_socket a handle returned by evhttp_{bind,accept}_socket_with_handle + * @return the file descriptor used by the bound socket + * @see evhttp_bind_socket_with_handle(), evhttp_accept_socket_with_handle() + */ +EVENT2_EXPORT_SYMBOL +evutil_socket_t evhttp_bound_socket_get_fd(struct evhttp_bound_socket *bound_socket); + +/** + * Free the previously created HTTP server. + * + * Works only if no requests are currently being served. + * + * @param http the evhttp server object to be freed + * @see evhttp_start() + */ +EVENT2_EXPORT_SYMBOL +void evhttp_free(struct evhttp* http); + +/** XXX Document. */ +EVENT2_EXPORT_SYMBOL +void evhttp_set_max_headers_size(struct evhttp* http, ev_ssize_t max_headers_size); +/** XXX Document. */ +EVENT2_EXPORT_SYMBOL +void evhttp_set_max_body_size(struct evhttp* http, ev_ssize_t max_body_size); + +/** + Set the value to use for the Content-Type header when none was provided. If + the content type string is NULL, the Content-Type header will not be + automatically added. + + @param http the http server on which to set the default content type + @param content_type the value for the Content-Type header +*/ +EVENT2_EXPORT_SYMBOL +void evhttp_set_default_content_type(struct evhttp *http, + const char *content_type); + +/** + Sets the what HTTP methods are supported in requests accepted by this + server, and passed to user callbacks. + + If not supported they will generate a "405 Method not allowed" response. + + By default this includes the following methods: GET, POST, HEAD, PUT, DELETE + + @param http the http server on which to set the methods + @param methods bit mask constructed from evhttp_cmd_type values +*/ +EVENT2_EXPORT_SYMBOL +void evhttp_set_allowed_methods(struct evhttp* http, ev_uint16_t methods); + +/** + Set a callback for a specified URI + + @param http the http sever on which to set the callback + @param path the path for which to invoke the callback + @param cb the callback function that gets invoked on requesting path + @param cb_arg an additional context argument for the callback + @return 0 on success, -1 if the callback existed already, -2 on failure +*/ +EVENT2_EXPORT_SYMBOL +int evhttp_set_cb(struct evhttp *http, const char *path, + void (*cb)(struct evhttp_request *, void *), void *cb_arg); + +/** Removes the callback for a specified URI */ +EVENT2_EXPORT_SYMBOL +int evhttp_del_cb(struct evhttp *, const char *); + +/** + Set a callback for all requests that are not caught by specific callbacks + + Invokes the specified callback for all requests that do not match any of + the previously specified request paths. This is catchall for requests not + specifically configured with evhttp_set_cb(). + + @param http the evhttp server object for which to set the callback + @param cb the callback to invoke for any unmatched requests + @param arg an context argument for the callback +*/ +EVENT2_EXPORT_SYMBOL +void evhttp_set_gencb(struct evhttp *http, + void (*cb)(struct evhttp_request *, void *), void *arg); + +/** + Set a callback used to create new bufferevents for connections + to a given evhttp object. + + You can use this to override the default bufferevent type -- for example, + to make this evhttp object use SSL bufferevents rather than unencrypted + ones. + + New bufferevents must be allocated with no fd set on them. + + @param http the evhttp server object for which to set the callback + @param cb the callback to invoke for incoming connections + @param arg an context argument for the callback + */ +EVENT2_EXPORT_SYMBOL +void evhttp_set_bevcb(struct evhttp *http, + struct bufferevent *(*cb)(struct event_base *, void *), void *arg); + +/** + Adds a virtual host to the http server. + + A virtual host is a newly initialized evhttp object that has request + callbacks set on it via evhttp_set_cb() or evhttp_set_gencb(). It + most not have any listing sockets associated with it. + + If the virtual host has not been removed by the time that evhttp_free() + is called on the main http server, it will be automatically freed, too. + + It is possible to have hierarchical vhosts. For example: A vhost + with the pattern *.example.com may have other vhosts with patterns + foo.example.com and bar.example.com associated with it. + + @param http the evhttp object to which to add a virtual host + @param pattern the glob pattern against which the hostname is matched. + The match is case insensitive and follows otherwise regular shell + matching. + @param vhost the virtual host to add the regular http server. + @return 0 on success, -1 on failure + @see evhttp_remove_virtual_host() +*/ +EVENT2_EXPORT_SYMBOL +int evhttp_add_virtual_host(struct evhttp* http, const char *pattern, + struct evhttp* vhost); + +/** + Removes a virtual host from the http server. + + @param http the evhttp object from which to remove the virtual host + @param vhost the virtual host to remove from the regular http server. + @return 0 on success, -1 on failure + @see evhttp_add_virtual_host() +*/ +EVENT2_EXPORT_SYMBOL +int evhttp_remove_virtual_host(struct evhttp* http, struct evhttp* vhost); + +/** + Add a server alias to an http object. The http object can be a virtual + host or the main server. + + @param http the evhttp object + @param alias the alias to add + @see evhttp_add_remove_alias() +*/ +EVENT2_EXPORT_SYMBOL +int evhttp_add_server_alias(struct evhttp *http, const char *alias); + +/** + Remove a server alias from an http object. + + @param http the evhttp object + @param alias the alias to remove + @see evhttp_add_server_alias() +*/ +EVENT2_EXPORT_SYMBOL +int evhttp_remove_server_alias(struct evhttp *http, const char *alias); + +/** + * Set the timeout for an HTTP request. + * + * @param http an evhttp object + * @param timeout_in_secs the timeout, in seconds + */ +EVENT2_EXPORT_SYMBOL +void evhttp_set_timeout(struct evhttp *http, int timeout_in_secs); + +/** + * Set the timeout for an HTTP request. + * + * @param http an evhttp object + * @param tv the timeout, or NULL + */ +EVENT2_EXPORT_SYMBOL +void evhttp_set_timeout_tv(struct evhttp *http, const struct timeval* tv); + +/* Read all the clients body, and only after this respond with an error if the + * clients body exceed max_body_size */ +#define EVHTTP_SERVER_LINGERING_CLOSE 0x0001 +/** + * Set connection flags for HTTP server. + * + * @see EVHTTP_SERVER_* + * @return 0 on success, otherwise non zero (for example if flag doesn't + * supported). + */ +EVENT2_EXPORT_SYMBOL +int evhttp_set_flags(struct evhttp *http, int flags); + +/* Request/Response functionality */ + +/** + * Send an HTML error message to the client. + * + * @param req a request object + * @param error the HTTP error code + * @param reason a brief explanation of the error. If this is NULL, we'll + * just use the standard meaning of the error code. + */ +EVENT2_EXPORT_SYMBOL +void evhttp_send_error(struct evhttp_request *req, int error, + const char *reason); + +/** + * Send an HTML reply to the client. + * + * The body of the reply consists of the data in databuf. After calling + * evhttp_send_reply() databuf will be empty, but the buffer is still + * owned by the caller and needs to be deallocated by the caller if + * necessary. + * + * @param req a request object + * @param code the HTTP response code to send + * @param reason a brief message to send with the response code + * @param databuf the body of the response + */ +EVENT2_EXPORT_SYMBOL +void evhttp_send_reply(struct evhttp_request *req, int code, + const char *reason, struct evbuffer *databuf); + +/* Low-level response interface, for streaming/chunked replies */ + +/** + Initiate a reply that uses Transfer-Encoding chunked. + + This allows the caller to stream the reply back to the client and is + useful when either not all of the reply data is immediately available + or when sending very large replies. + + The caller needs to supply data chunks with evhttp_send_reply_chunk() + and complete the reply by calling evhttp_send_reply_end(). + + @param req a request object + @param code the HTTP response code to send + @param reason a brief message to send with the response code +*/ +EVENT2_EXPORT_SYMBOL +void evhttp_send_reply_start(struct evhttp_request *req, int code, + const char *reason); + +/** + Send another data chunk as part of an ongoing chunked reply. + + The reply chunk consists of the data in databuf. After calling + evhttp_send_reply_chunk() databuf will be empty, but the buffer is + still owned by the caller and needs to be deallocated by the caller + if necessary. + + @param req a request object + @param databuf the data chunk to send as part of the reply. +*/ +EVENT2_EXPORT_SYMBOL +void evhttp_send_reply_chunk(struct evhttp_request *req, + struct evbuffer *databuf); + +/** + Send another data chunk as part of an ongoing chunked reply. + + The reply chunk consists of the data in databuf. After calling + evhttp_send_reply_chunk() databuf will be empty, but the buffer is + still owned by the caller and needs to be deallocated by the caller + if necessary. + + @param req a request object + @param databuf the data chunk to send as part of the reply. + @param cb callback funcion + @param call back's argument. +*/ +EVENT2_EXPORT_SYMBOL +void evhttp_send_reply_chunk_with_cb(struct evhttp_request *, struct evbuffer *, + void (*cb)(struct evhttp_connection *, void *), void *arg); + +/** + Complete a chunked reply, freeing the request as appropriate. + + @param req a request object +*/ +EVENT2_EXPORT_SYMBOL +void evhttp_send_reply_end(struct evhttp_request *req); + +/* + * Interfaces for making requests + */ + +/** The different request types supported by evhttp. These are as specified + * in RFC2616, except for PATCH which is specified by RFC5789. + * + * By default, only some of these methods are accepted and passed to user + * callbacks; use evhttp_set_allowed_methods() to change which methods + * are allowed. + */ +enum evhttp_cmd_type { + EVHTTP_REQ_GET = 1 << 0, + EVHTTP_REQ_POST = 1 << 1, + EVHTTP_REQ_HEAD = 1 << 2, + EVHTTP_REQ_PUT = 1 << 3, + EVHTTP_REQ_DELETE = 1 << 4, + EVHTTP_REQ_OPTIONS = 1 << 5, + EVHTTP_REQ_TRACE = 1 << 6, + EVHTTP_REQ_CONNECT = 1 << 7, + EVHTTP_REQ_PATCH = 1 << 8 +}; + +/** a request object can represent either a request or a reply */ +enum evhttp_request_kind { EVHTTP_REQUEST, EVHTTP_RESPONSE }; + +/** + * Create and return a connection object that can be used to for making HTTP + * requests. The connection object tries to resolve address and establish the + * connection when it is given an http request object. + * + * @param base the event_base to use for handling the connection + * @param dnsbase the dns_base to use for resolving host names; if not + * specified host name resolution will block. + * @param bev a bufferevent to use for connecting to the server; if NULL, a + * socket-based bufferevent will be created. This buffrevent will be freed + * when the connection closes. It must have no fd set on it. + * @param address the address to which to connect + * @param port the port to connect to + * @return an evhttp_connection object that can be used for making requests or + * NULL on error + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_connection *evhttp_connection_base_bufferevent_new( + struct event_base *base, struct evdns_base *dnsbase, struct bufferevent* bev, const char *address, ev_uint16_t port); + +/** + * Return the bufferevent that an evhttp_connection is using. + */ +EVENT2_EXPORT_SYMBOL +struct bufferevent* evhttp_connection_get_bufferevent(struct evhttp_connection *evcon); + +/** + * Return the HTTP server associated with this connection, or NULL. + */ +EVENT2_EXPORT_SYMBOL +struct evhttp *evhttp_connection_get_server(struct evhttp_connection *evcon); + +/** + * Creates a new request object that needs to be filled in with the request + * parameters. The callback is executed when the request completed or an + * error occurred. + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_request *evhttp_request_new( + void (*cb)(struct evhttp_request *, void *), void *arg); + +/** + * Enable delivery of chunks to requestor. + * @param cb will be called after every read of data with the same argument + * as the completion callback. Will never be called on an empty + * response. May drain the input buffer; it will be drained + * automatically on return. + */ +EVENT2_EXPORT_SYMBOL +void evhttp_request_set_chunked_cb(struct evhttp_request *, + void (*cb)(struct evhttp_request *, void *)); + +/** + * Register callback for additional parsing of request headers. + * @param cb will be called after receiving and parsing the full header. + * It allows analyzing the header and possibly closing the connection + * by returning a value < 0. + */ +EVENT2_EXPORT_SYMBOL +void evhttp_request_set_header_cb(struct evhttp_request *, + int (*cb)(struct evhttp_request *, void *)); + +/** + * The different error types supported by evhttp + * + * @see evhttp_request_set_error_cb() + */ +enum evhttp_request_error { + /** + * Timeout reached, also @see evhttp_connection_set_timeout() + */ + EVREQ_HTTP_TIMEOUT, + /** + * EOF reached + */ + EVREQ_HTTP_EOF, + /** + * Error while reading header, or invalid header + */ + EVREQ_HTTP_INVALID_HEADER, + /** + * Error encountered while reading or writing + */ + EVREQ_HTTP_BUFFER_ERROR, + /** + * The evhttp_cancel_request() called on this request. + */ + EVREQ_HTTP_REQUEST_CANCEL, + /** + * Body is greater then evhttp_connection_set_max_body_size() + */ + EVREQ_HTTP_DATA_TOO_LONG +}; +/** + * Set a callback for errors + * @see evhttp_request_error for error types. + * + * On error, both the error callback and the regular callback will be called, + * error callback is called before the regular callback. + **/ +EVENT2_EXPORT_SYMBOL +void evhttp_request_set_error_cb(struct evhttp_request *, + void (*)(enum evhttp_request_error, void *)); + +/** + * Set a callback to be called on request completion of evhttp_send_* function. + * + * The callback function will be called on the completion of the request after + * the output data has been written and before the evhttp_request object + * is destroyed. This can be useful for tracking resources associated with a + * request (ex: timing metrics). + * + * @param req a request object + * @param cb callback function that will be called on request completion + * @param cb_arg an additional context argument for the callback + */ +EVENT2_EXPORT_SYMBOL +void evhttp_request_set_on_complete_cb(struct evhttp_request *req, + void (*cb)(struct evhttp_request *, void *), void *cb_arg); + +/** Frees the request object and removes associated events. */ +EVENT2_EXPORT_SYMBOL +void evhttp_request_free(struct evhttp_request *req); + +/** + * Create and return a connection object that can be used to for making HTTP + * requests. The connection object tries to resolve address and establish the + * connection when it is given an http request object. + * + * @param base the event_base to use for handling the connection + * @param dnsbase the dns_base to use for resolving host names; if not + * specified host name resolution will block. + * @param address the address to which to connect + * @param port the port to connect to + * @return an evhttp_connection object that can be used for making requests or + * NULL on error + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_connection *evhttp_connection_base_new( + struct event_base *base, struct evdns_base *dnsbase, + const char *address, ev_uint16_t port); + +/** + * Set family hint for DNS requests. + */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_family(struct evhttp_connection *evcon, + int family); + +/* reuse connection address on retry */ +#define EVHTTP_CON_REUSE_CONNECTED_ADDR 0x0008 +/* Try to read error, since server may already send and close + * connection, but if at that time we have some data to send then we + * can send get EPIPE and fail, while we can read that HTTP error. */ +#define EVHTTP_CON_READ_ON_WRITE_ERROR 0x0010 +/* @see EVHTTP_SERVER_LINGERING_CLOSE */ +#define EVHTTP_CON_LINGERING_CLOSE 0x0020 +/* Padding for public flags, @see EVHTTP_CON_* in http-internal.h */ +#define EVHTTP_CON_PUBLIC_FLAGS_END 0x100000 +/** + * Set connection flags. + * + * @see EVHTTP_CON_* + * @return 0 on success, otherwise non zero (for example if flag doesn't + * supported). + */ +EVENT2_EXPORT_SYMBOL +int evhttp_connection_set_flags(struct evhttp_connection *evcon, + int flags); + +/** Takes ownership of the request object + * + * Can be used in a request callback to keep onto the request until + * evhttp_request_free() is explicitly called by the user. + */ +EVENT2_EXPORT_SYMBOL +void evhttp_request_own(struct evhttp_request *req); + +/** Returns 1 if the request is owned by the user */ +EVENT2_EXPORT_SYMBOL +int evhttp_request_is_owned(struct evhttp_request *req); + +/** + * Returns the connection object associated with the request or NULL + * + * The user needs to either free the request explicitly or call + * evhttp_send_reply_end(). + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_connection *evhttp_request_get_connection(struct evhttp_request *req); + +/** + * Returns the underlying event_base for this connection + */ +EVENT2_EXPORT_SYMBOL +struct event_base *evhttp_connection_get_base(struct evhttp_connection *req); + +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_max_headers_size(struct evhttp_connection *evcon, + ev_ssize_t new_max_headers_size); + +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_max_body_size(struct evhttp_connection* evcon, + ev_ssize_t new_max_body_size); + +/** Frees an http connection */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_free(struct evhttp_connection *evcon); + +/** Disowns a given connection object + * + * Can be used to tell libevent to free the connection object after + * the last request has completed or failed. + */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_free_on_completion(struct evhttp_connection *evcon); + +/** sets the ip address from which http connections are made */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_local_address(struct evhttp_connection *evcon, + const char *address); + +/** sets the local port from which http connections are made */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_local_port(struct evhttp_connection *evcon, + ev_uint16_t port); + +/** Sets the timeout in seconds for events related to this connection */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_timeout(struct evhttp_connection *evcon, + int timeout_in_secs); + +/** Sets the timeout for events related to this connection. Takes a struct + * timeval. */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_timeout_tv(struct evhttp_connection *evcon, + const struct timeval *tv); + +/** Sets the delay before retrying requests on this connection. This is only + * used if evhttp_connection_set_retries is used to make the number of retries + * at least one. Each retry after the first is twice as long as the one before + * it. */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_initial_retry_tv(struct evhttp_connection *evcon, + const struct timeval *tv); + +/** Sets the retry limit for this connection - -1 repeats indefinitely */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_retries(struct evhttp_connection *evcon, + int retry_max); + +/** Set a callback for connection close. */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_closecb(struct evhttp_connection *evcon, + void (*)(struct evhttp_connection *, void *), void *); + +/** Get the remote address and port associated with this connection. */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_get_peer(struct evhttp_connection *evcon, + char **address, ev_uint16_t *port); + +/** Get the remote address associated with this connection. + * extracted from getpeername() OR from nameserver. + * + * @return NULL if getpeername() return non success, + * or connection is not connected, + * otherwise it return pointer to struct sockaddr_storage */ +EVENT2_EXPORT_SYMBOL +const struct sockaddr* +evhttp_connection_get_addr(struct evhttp_connection *evcon); + +/** + Make an HTTP request over the specified connection. + + The connection gets ownership of the request. On failure, the + request object is no longer valid as it has been freed. + + @param evcon the evhttp_connection object over which to send the request + @param req the previously created and configured request object + @param type the request type EVHTTP_REQ_GET, EVHTTP_REQ_POST, etc. + @param uri the URI associated with the request + @return 0 on success, -1 on failure + @see evhttp_cancel_request() +*/ +EVENT2_EXPORT_SYMBOL +int evhttp_make_request(struct evhttp_connection *evcon, + struct evhttp_request *req, + enum evhttp_cmd_type type, const char *uri); + +/** + Cancels a pending HTTP request. + + Cancels an ongoing HTTP request. The callback associated with this request + is not executed and the request object is freed. If the request is + currently being processed, e.g. it is ongoing, the corresponding + evhttp_connection object is going to get reset. + + A request cannot be canceled if its callback has executed already. A request + may be canceled reentrantly from its chunked callback. + + @param req the evhttp_request to cancel; req becomes invalid after this call. +*/ +EVENT2_EXPORT_SYMBOL +void evhttp_cancel_request(struct evhttp_request *req); + +/** + * A structure to hold a parsed URI or Relative-Ref conforming to RFC3986. + */ +struct evhttp_uri; + +/** Returns the request URI */ +EVENT2_EXPORT_SYMBOL +const char *evhttp_request_get_uri(const struct evhttp_request *req); +/** Returns the request URI (parsed) */ +EVENT2_EXPORT_SYMBOL +const struct evhttp_uri *evhttp_request_get_evhttp_uri(const struct evhttp_request *req); +/** Returns the request command */ +EVENT2_EXPORT_SYMBOL +enum evhttp_cmd_type evhttp_request_get_command(const struct evhttp_request *req); + +EVENT2_EXPORT_SYMBOL +int evhttp_request_get_response_code(const struct evhttp_request *req); +EVENT2_EXPORT_SYMBOL +const char * evhttp_request_get_response_code_line(const struct evhttp_request *req); + +/** Returns the input headers */ +EVENT2_EXPORT_SYMBOL +struct evkeyvalq *evhttp_request_get_input_headers(struct evhttp_request *req); +/** Returns the output headers */ +EVENT2_EXPORT_SYMBOL +struct evkeyvalq *evhttp_request_get_output_headers(struct evhttp_request *req); +/** Returns the input buffer */ +EVENT2_EXPORT_SYMBOL +struct evbuffer *evhttp_request_get_input_buffer(struct evhttp_request *req); +/** Returns the output buffer */ +EVENT2_EXPORT_SYMBOL +struct evbuffer *evhttp_request_get_output_buffer(struct evhttp_request *req); +/** Returns the host associated with the request. If a client sends an absolute + URI, the host part of that is preferred. Otherwise, the input headers are + searched for a Host: header. NULL is returned if no absolute URI or Host: + header is provided. */ +EVENT2_EXPORT_SYMBOL +const char *evhttp_request_get_host(struct evhttp_request *req); + +/* Interfaces for dealing with HTTP headers */ + +/** + Finds the value belonging to a header. + + @param headers the evkeyvalq object in which to find the header + @param key the name of the header to find + @returns a pointer to the value for the header or NULL if the header + could not be found. + @see evhttp_add_header(), evhttp_remove_header() +*/ +EVENT2_EXPORT_SYMBOL +const char *evhttp_find_header(const struct evkeyvalq *headers, + const char *key); + +/** + Removes a header from a list of existing headers. + + @param headers the evkeyvalq object from which to remove a header + @param key the name of the header to remove + @returns 0 if the header was removed, -1 otherwise. + @see evhttp_find_header(), evhttp_add_header() +*/ +EVENT2_EXPORT_SYMBOL +int evhttp_remove_header(struct evkeyvalq *headers, const char *key); + +/** + Adds a header to a list of existing headers. + + @param headers the evkeyvalq object to which to add a header + @param key the name of the header + @param value the value belonging to the header + @returns 0 on success, -1 otherwise. + @see evhttp_find_header(), evhttp_clear_headers() +*/ +EVENT2_EXPORT_SYMBOL +int evhttp_add_header(struct evkeyvalq *headers, const char *key, const char *value); + +/** + Removes all headers from the header list. + + @param headers the evkeyvalq object from which to remove all headers +*/ +EVENT2_EXPORT_SYMBOL +void evhttp_clear_headers(struct evkeyvalq *headers); + +/* Miscellaneous utility functions */ + + +/** + Helper function to encode a string for inclusion in a URI. All + characters are replaced by their hex-escaped (%22) equivalents, + except for characters explicitly unreserved by RFC3986 -- that is, + ASCII alphanumeric characters, hyphen, dot, underscore, and tilde. + + The returned string must be freed by the caller. + + @param str an unencoded string + @return a newly allocated URI-encoded string or NULL on failure + */ +EVENT2_EXPORT_SYMBOL +char *evhttp_encode_uri(const char *str); + +/** + As evhttp_encode_uri, but if 'size' is nonnegative, treat the string + as being 'size' bytes long. This allows you to encode strings that + may contain 0-valued bytes. + + The returned string must be freed by the caller. + + @param str an unencoded string + @param size the length of the string to encode, or -1 if the string + is NUL-terminated + @param space_to_plus if true, space characters in 'str' are encoded + as +, not %20. + @return a newly allocate URI-encoded string, or NULL on failure. + */ +EVENT2_EXPORT_SYMBOL +char *evhttp_uriencode(const char *str, ev_ssize_t size, int space_to_plus); + +/** + Helper function to sort of decode a URI-encoded string. Unlike + evhttp_uridecode, it decodes all plus characters that appear + _after_ the first question mark character, but no plusses that occur + before. This is not a good way to decode URIs in whole or in part. + + The returned string must be freed by the caller + + @deprecated This function is deprecated; you probably want to use + evhttp_uridecode instead. + + @param uri an encoded URI + @return a newly allocated unencoded URI or NULL on failure + */ +EVENT2_EXPORT_SYMBOL +char *evhttp_decode_uri(const char *uri); + +/** + Helper function to decode a URI-escaped string or HTTP parameter. + + If 'decode_plus' is 1, then we decode the string as an HTTP parameter + value, and convert all plus ('+') characters to spaces. If + 'decode_plus' is 0, we leave all plus characters unchanged. + + The returned string must be freed by the caller. + + @param uri a URI-encode encoded URI + @param decode_plus determines whether we convert '+' to space. + @param size_out if size_out is not NULL, *size_out is set to the size of the + returned string + @return a newly allocated unencoded URI or NULL on failure + */ +EVENT2_EXPORT_SYMBOL +char *evhttp_uridecode(const char *uri, int decode_plus, + size_t *size_out); + +/** + Helper function to parse out arguments in a query. + + Parsing a URI like + + http://foo.com/?q=test&s=some+thing + + will result in two entries in the key value queue. + + The first entry is: key="q", value="test" + The second entry is: key="s", value="some thing" + + @deprecated This function is deprecated as of Libevent 2.0.9. Use + evhttp_uri_parse and evhttp_parse_query_str instead. + + @param uri the request URI + @param headers the head of the evkeyval queue + @return 0 on success, -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int evhttp_parse_query(const char *uri, struct evkeyvalq *headers); + +/** + Helper function to parse out arguments from the query portion of an + HTTP URI. + + Parsing a query string like + + q=test&s=some+thing + + will result in two entries in the key value queue. + + The first entry is: key="q", value="test" + The second entry is: key="s", value="some thing" + + @param query_parse the query portion of the URI + @param headers the head of the evkeyval queue + @return 0 on success, -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int evhttp_parse_query_str(const char *uri, struct evkeyvalq *headers); + +/** + * Escape HTML character entities in a string. + * + * Replaces <, >, ", ' and & with <, >, ", + * ' and & correspondingly. + * + * The returned string needs to be freed by the caller. + * + * @param html an unescaped HTML string + * @return an escaped HTML string or NULL on error + */ +EVENT2_EXPORT_SYMBOL +char *evhttp_htmlescape(const char *html); + +/** + * Return a new empty evhttp_uri with no fields set. + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_uri *evhttp_uri_new(void); + +/** + * Changes the flags set on a given URI. See EVHTTP_URI_* for + * a list of flags. + **/ +EVENT2_EXPORT_SYMBOL +void evhttp_uri_set_flags(struct evhttp_uri *uri, unsigned flags); + +/** Return the scheme of an evhttp_uri, or NULL if there is no scheme has + * been set and the evhttp_uri contains a Relative-Ref. */ +EVENT2_EXPORT_SYMBOL +const char *evhttp_uri_get_scheme(const struct evhttp_uri *uri); +/** + * Return the userinfo part of an evhttp_uri, or NULL if it has no userinfo + * set. + */ +EVENT2_EXPORT_SYMBOL +const char *evhttp_uri_get_userinfo(const struct evhttp_uri *uri); +/** + * Return the host part of an evhttp_uri, or NULL if it has no host set. + * The host may either be a regular hostname (conforming to the RFC 3986 + * "regname" production), or an IPv4 address, or the empty string, or a + * bracketed IPv6 address, or a bracketed 'IP-Future' address. + * + * Note that having a NULL host means that the URI has no authority + * section, but having an empty-string host means that the URI has an + * authority section with no host part. For example, + * "mailto:user@example.com" has a host of NULL, but "file:///etc/motd" + * has a host of "". + */ +EVENT2_EXPORT_SYMBOL +const char *evhttp_uri_get_host(const struct evhttp_uri *uri); +/** Return the port part of an evhttp_uri, or -1 if there is no port set. */ +EVENT2_EXPORT_SYMBOL +int evhttp_uri_get_port(const struct evhttp_uri *uri); +/** Return the path part of an evhttp_uri, or NULL if it has no path set */ +EVENT2_EXPORT_SYMBOL +const char *evhttp_uri_get_path(const struct evhttp_uri *uri); +/** Return the query part of an evhttp_uri (excluding the leading "?"), or + * NULL if it has no query set */ +EVENT2_EXPORT_SYMBOL +const char *evhttp_uri_get_query(const struct evhttp_uri *uri); +/** Return the fragment part of an evhttp_uri (excluding the leading "#"), + * or NULL if it has no fragment set */ +EVENT2_EXPORT_SYMBOL +const char *evhttp_uri_get_fragment(const struct evhttp_uri *uri); + +/** Set the scheme of an evhttp_uri, or clear the scheme if scheme==NULL. + * Returns 0 on success, -1 if scheme is not well-formed. */ +EVENT2_EXPORT_SYMBOL +int evhttp_uri_set_scheme(struct evhttp_uri *uri, const char *scheme); +/** Set the userinfo of an evhttp_uri, or clear the userinfo if userinfo==NULL. + * Returns 0 on success, -1 if userinfo is not well-formed. */ +EVENT2_EXPORT_SYMBOL +int evhttp_uri_set_userinfo(struct evhttp_uri *uri, const char *userinfo); +/** Set the host of an evhttp_uri, or clear the host if host==NULL. + * Returns 0 on success, -1 if host is not well-formed. */ +EVENT2_EXPORT_SYMBOL +int evhttp_uri_set_host(struct evhttp_uri *uri, const char *host); +/** Set the port of an evhttp_uri, or clear the port if port==-1. + * Returns 0 on success, -1 if port is not well-formed. */ +EVENT2_EXPORT_SYMBOL +int evhttp_uri_set_port(struct evhttp_uri *uri, int port); +/** Set the path of an evhttp_uri, or clear the path if path==NULL. + * Returns 0 on success, -1 if path is not well-formed. */ +EVENT2_EXPORT_SYMBOL +int evhttp_uri_set_path(struct evhttp_uri *uri, const char *path); +/** Set the query of an evhttp_uri, or clear the query if query==NULL. + * The query should not include a leading "?". + * Returns 0 on success, -1 if query is not well-formed. */ +EVENT2_EXPORT_SYMBOL +int evhttp_uri_set_query(struct evhttp_uri *uri, const char *query); +/** Set the fragment of an evhttp_uri, or clear the fragment if fragment==NULL. + * The fragment should not include a leading "#". + * Returns 0 on success, -1 if fragment is not well-formed. */ +EVENT2_EXPORT_SYMBOL +int evhttp_uri_set_fragment(struct evhttp_uri *uri, const char *fragment); + +/** + * Helper function to parse a URI-Reference as specified by RFC3986. + * + * This function matches the URI-Reference production from RFC3986, + * which includes both URIs like + * + * scheme://[[userinfo]@]foo.com[:port]]/[path][?query][#fragment] + * + * and relative-refs like + * + * [path][?query][#fragment] + * + * Any optional elements portions not present in the original URI are + * left set to NULL in the resulting evhttp_uri. If no port is + * specified, the port is set to -1. + * + * Note that no decoding is performed on percent-escaped characters in + * the string; if you want to parse them, use evhttp_uridecode or + * evhttp_parse_query_str as appropriate. + * + * Note also that most URI schemes will have additional constraints that + * this function does not know about, and cannot check. For example, + * mailto://www.example.com/cgi-bin/fortune.pl is not a reasonable + * mailto url, http://www.example.com:99999/ is not a reasonable HTTP + * URL, and ftp:username@example.com is not a reasonable FTP URL. + * Nevertheless, all of these URLs conform to RFC3986, and this function + * accepts all of them as valid. + * + * @param source_uri the request URI + * @param flags Zero or more EVHTTP_URI_* flags to affect the behavior + * of the parser. + * @return uri container to hold parsed data, or NULL if there is error + * @see evhttp_uri_free() + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_uri *evhttp_uri_parse_with_flags(const char *source_uri, + unsigned flags); + +/** Tolerate URIs that do not conform to RFC3986. + * + * Unfortunately, some HTTP clients generate URIs that, according to RFC3986, + * are not conformant URIs. If you need to support these URIs, you can + * do so by passing this flag to evhttp_uri_parse_with_flags. + * + * Currently, these changes are: + *
    + *
  • Nonconformant URIs are allowed to contain otherwise unreasonable + * characters in their path, query, and fragment components. + *
+ */ +#define EVHTTP_URI_NONCONFORMANT 0x01 + +/** Alias for evhttp_uri_parse_with_flags(source_uri, 0) */ +EVENT2_EXPORT_SYMBOL +struct evhttp_uri *evhttp_uri_parse(const char *source_uri); + +/** + * Free all memory allocated for a parsed uri. Only use this for URIs + * generated by evhttp_uri_parse. + * + * @param uri container with parsed data + * @see evhttp_uri_parse() + */ +EVENT2_EXPORT_SYMBOL +void evhttp_uri_free(struct evhttp_uri *uri); + +/** + * Join together the uri parts from parsed data to form a URI-Reference. + * + * Note that no escaping of reserved characters is done on the members + * of the evhttp_uri, so the generated string might not be a valid URI + * unless the members of evhttp_uri are themselves valid. + * + * @param uri container with parsed data + * @param buf destination buffer + * @param limit destination buffer size + * @return an joined uri as string or NULL on error + * @see evhttp_uri_parse() + */ +EVENT2_EXPORT_SYMBOL +char *evhttp_uri_join(struct evhttp_uri *uri, char *buf, size_t limit); + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_HTTP_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/http_compat.h b/probe-busybox/libevent-2.1.11-stable/include/event2/http_compat.h new file mode 100644 index 00000000..794a5810 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/http_compat.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_HTTP_COMPAT_H_INCLUDED_ +#define EVENT2_HTTP_COMPAT_H_INCLUDED_ + +/** @file event2/http_compat.h + + Potentially non-threadsafe versions of the functions in http.h: provided + only for backwards compatibility. + + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +/* For int types. */ +#include + +/** + * Start an HTTP server on the specified address and port + * + * @deprecated It does not allow an event base to be specified + * + * @param address the address to which the HTTP server should be bound + * @param port the port number on which the HTTP server should listen + * @return a pointer to a newly initialized evhttp server structure + * or NULL on error + */ +EVENT2_EXPORT_SYMBOL +struct evhttp *evhttp_start(const char *address, ev_uint16_t port); + +/** + * A connection object that can be used to for making HTTP requests. The + * connection object tries to establish the connection when it is given an + * http request object. + * + * @deprecated It does not allow an event base to be specified + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_connection *evhttp_connection_new( + const char *address, ev_uint16_t port); + +/** + * Associates an event base with the connection - can only be called + * on a freshly created connection object that has not been used yet. + * + * @deprecated XXXX Why? + */ +EVENT2_EXPORT_SYMBOL +void evhttp_connection_set_base(struct evhttp_connection *evcon, + struct event_base *base); + + +/** Returns the request URI */ +#define evhttp_request_uri evhttp_request_get_uri + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_EVENT_COMPAT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/http_struct.h b/probe-busybox/libevent-2.1.11-stable/include/event2/http_struct.h new file mode 100644 index 00000000..4bf5b1ff --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/http_struct.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_HTTP_STRUCT_H_INCLUDED_ +#define EVENT2_HTTP_STRUCT_H_INCLUDED_ + +/** @file event2/http_struct.h + + Data structures for http. Using these structures may hurt forward + compatibility with later versions of Libevent: be careful! + + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +/* For int types. */ +#include + +/** + * the request structure that a server receives. + * WARNING: expect this structure to change. I will try to provide + * reasonable accessors. + */ +struct evhttp_request { +#if defined(TAILQ_ENTRY) + TAILQ_ENTRY(evhttp_request) next; +#else +struct { + struct evhttp_request *tqe_next; + struct evhttp_request **tqe_prev; +} next; +#endif + + /* the connection object that this request belongs to */ + struct evhttp_connection *evcon; + int flags; +/** The request obj owns the evhttp connection and needs to free it */ +#define EVHTTP_REQ_OWN_CONNECTION 0x0001 +/** Request was made via a proxy */ +#define EVHTTP_PROXY_REQUEST 0x0002 +/** The request object is owned by the user; the user must free it */ +#define EVHTTP_USER_OWNED 0x0004 +/** The request will be used again upstack; freeing must be deferred */ +#define EVHTTP_REQ_DEFER_FREE 0x0008 +/** The request should be freed upstack */ +#define EVHTTP_REQ_NEEDS_FREE 0x0010 + + struct evkeyvalq *input_headers; + struct evkeyvalq *output_headers; + + /* address of the remote host and the port connection came from */ + char *remote_host; + ev_uint16_t remote_port; + + /* cache of the hostname for evhttp_request_get_host */ + char *host_cache; + + enum evhttp_request_kind kind; + enum evhttp_cmd_type type; + + size_t headers_size; + size_t body_size; + + char *uri; /* uri after HTTP request was parsed */ + struct evhttp_uri *uri_elems; /* uri elements */ + + char major; /* HTTP Major number */ + char minor; /* HTTP Minor number */ + + int response_code; /* HTTP Response code */ + char *response_code_line; /* Readable response */ + + struct evbuffer *input_buffer; /* read data */ + ev_int64_t ntoread; + unsigned chunked:1, /* a chunked request */ + userdone:1; /* the user has sent all data */ + + struct evbuffer *output_buffer; /* outgoing post or data */ + + /* Callback */ + void (*cb)(struct evhttp_request *, void *); + void *cb_arg; + + /* + * Chunked data callback - call for each completed chunk if + * specified. If not specified, all the data is delivered via + * the regular callback. + */ + void (*chunk_cb)(struct evhttp_request *, void *); + + /* + * Callback added for forked-daapd so they can collect ICY + * (shoutcast) metadata from the http header. If return + * int is negative the connection will be closed. + */ + int (*header_cb)(struct evhttp_request *, void *); + + /* + * Error callback - called when error is occured. + * @see evhttp_request_error for error types. + * + * @see evhttp_request_set_error_cb() + */ + void (*error_cb)(enum evhttp_request_error, void *); + + /* + * Send complete callback - called when the request is actually + * sent and completed. + */ + void (*on_complete_cb)(struct evhttp_request *, void *); + void *on_complete_cb_arg; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_HTTP_STRUCT_H_INCLUDED_ */ + diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/keyvalq_struct.h b/probe-busybox/libevent-2.1.11-stable/include/event2/keyvalq_struct.h new file mode 100644 index 00000000..bffa54b3 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/keyvalq_struct.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_KEYVALQ_STRUCT_H_INCLUDED_ +#define EVENT2_KEYVALQ_STRUCT_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Fix so that people don't have to run with */ +/* XXXX This code is duplicated with event_struct.h */ +#ifndef TAILQ_ENTRY +#define EVENT_DEFINED_TQENTRY_ +#define TAILQ_ENTRY(type) \ +struct { \ + struct type *tqe_next; /* next element */ \ + struct type **tqe_prev; /* address of previous next element */ \ +} +#endif /* !TAILQ_ENTRY */ + +#ifndef TAILQ_HEAD +#define EVENT_DEFINED_TQHEAD_ +#define TAILQ_HEAD(name, type) \ +struct name { \ + struct type *tqh_first; \ + struct type **tqh_last; \ +} +#endif + +/* + * Key-Value pairs. Can be used for HTTP headers but also for + * query argument parsing. + */ +struct evkeyval { + TAILQ_ENTRY(evkeyval) next; + + char *key; + char *value; +}; + +TAILQ_HEAD (evkeyvalq, evkeyval); + +/* XXXX This code is duplicated with event_struct.h */ +#ifdef EVENT_DEFINED_TQENTRY_ +#undef TAILQ_ENTRY +#endif + +#ifdef EVENT_DEFINED_TQHEAD_ +#undef TAILQ_HEAD +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/listener.h b/probe-busybox/libevent-2.1.11-stable/include/event2/listener.h new file mode 100644 index 00000000..789a27c2 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/listener.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_LISTENER_H_INCLUDED_ +#define EVENT2_LISTENER_H_INCLUDED_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +struct sockaddr; +struct evconnlistener; + +/** + A callback that we invoke when a listener has a new connection. + + @param listener The evconnlistener + @param fd The new file descriptor + @param addr The source address of the connection + @param socklen The length of addr + @param user_arg the pointer passed to evconnlistener_new() + */ +typedef void (*evconnlistener_cb)(struct evconnlistener *, evutil_socket_t, struct sockaddr *, int socklen, void *); + +/** + A callback that we invoke when a listener encounters a non-retriable error. + + @param listener The evconnlistener + @param user_arg the pointer passed to evconnlistener_new() + */ +typedef void (*evconnlistener_errorcb)(struct evconnlistener *, void *); + +/** Flag: Indicates that we should not make incoming sockets nonblocking + * before passing them to the callback. */ +#define LEV_OPT_LEAVE_SOCKETS_BLOCKING (1u<<0) +/** Flag: Indicates that freeing the listener should close the underlying + * socket. */ +#define LEV_OPT_CLOSE_ON_FREE (1u<<1) +/** Flag: Indicates that we should set the close-on-exec flag, if possible */ +#define LEV_OPT_CLOSE_ON_EXEC (1u<<2) +/** Flag: Indicates that we should disable the timeout (if any) between when + * this socket is closed and when we can listen again on the same port. */ +#define LEV_OPT_REUSEABLE (1u<<3) +/** Flag: Indicates that the listener should be locked so it's safe to use + * from multiple threadcs at once. */ +#define LEV_OPT_THREADSAFE (1u<<4) +/** Flag: Indicates that the listener should be created in disabled + * state. Use evconnlistener_enable() to enable it later. */ +#define LEV_OPT_DISABLED (1u<<5) +/** Flag: Indicates that the listener should defer accept() until data is + * available, if possible. Ignored on platforms that do not support this. + * + * This option can help performance for protocols where the client transmits + * immediately after connecting. Do not use this option if your protocol + * _doesn't_ start out with the client transmitting data, since in that case + * this option will sometimes cause the kernel to never tell you about the + * connection. + * + * This option is only supported by evconnlistener_new_bind(): it can't + * work with evconnlistener_new_fd(), since the listener needs to be told + * to use the option before it is actually bound. + */ +#define LEV_OPT_DEFERRED_ACCEPT (1u<<6) +/** Flag: Indicates that we ask to allow multiple servers (processes or + * threads) to bind to the same port if they each set the option. + * + * SO_REUSEPORT is what most people would expect SO_REUSEADDR to be, however + * SO_REUSEPORT does not imply SO_REUSEADDR. + * + * This is only available on Linux and kernel 3.9+ + */ +#define LEV_OPT_REUSEABLE_PORT (1u<<7) +/** Flag: Indicates that the listener wants to work only in IPv6 socket. + * + * According to RFC3493 and most Linux distributions, default value is to + * work in IPv4-mapped mode. If there is a requirement to bind same port + * on same ip addresses but different handlers for both IPv4 and IPv6, + * it is required to set IPV6_V6ONLY socket option to be sure that the + * code works as expected without affected by bindv6only sysctl setting in + * system. + * + * This socket option also supported by Windows. + */ +#define LEV_OPT_BIND_IPV6ONLY (1u<<8) + +/** + Allocate a new evconnlistener object to listen for incoming TCP connections + on a given file descriptor. + + @param base The event base to associate the listener with. + @param cb A callback to be invoked when a new connection arrives. If the + callback is NULL, the listener will be treated as disabled until the + callback is set. + @param ptr A user-supplied pointer to give to the callback. + @param flags Any number of LEV_OPT_* flags + @param backlog Passed to the listen() call to determine the length of the + acceptable connection backlog. Set to -1 for a reasonable default. + Set to 0 if the socket is already listening. + @param fd The file descriptor to listen on. It must be a nonblocking + file descriptor, and it should already be bound to an appropriate + port and address. +*/ +EVENT2_EXPORT_SYMBOL +struct evconnlistener *evconnlistener_new(struct event_base *base, + evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, + evutil_socket_t fd); +/** + Allocate a new evconnlistener object to listen for incoming TCP connections + on a given address. + + @param base The event base to associate the listener with. + @param cb A callback to be invoked when a new connection arrives. If the + callback is NULL, the listener will be treated as disabled until the + callback is set. + @param ptr A user-supplied pointer to give to the callback. + @param flags Any number of LEV_OPT_* flags + @param backlog Passed to the listen() call to determine the length of the + acceptable connection backlog. Set to -1 for a reasonable default. + @param addr The address to listen for connections on. + @param socklen The length of the address. + */ +EVENT2_EXPORT_SYMBOL +struct evconnlistener *evconnlistener_new_bind(struct event_base *base, + evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, + const struct sockaddr *sa, int socklen); +/** + Disable and deallocate an evconnlistener. + */ +EVENT2_EXPORT_SYMBOL +void evconnlistener_free(struct evconnlistener *lev); +/** + Re-enable an evconnlistener that has been disabled. + */ +EVENT2_EXPORT_SYMBOL +int evconnlistener_enable(struct evconnlistener *lev); +/** + Stop listening for connections on an evconnlistener. + */ +EVENT2_EXPORT_SYMBOL +int evconnlistener_disable(struct evconnlistener *lev); + +/** Return an evconnlistener's associated event_base. */ +EVENT2_EXPORT_SYMBOL +struct event_base *evconnlistener_get_base(struct evconnlistener *lev); + +/** Return the socket that an evconnlistner is listening on. */ +EVENT2_EXPORT_SYMBOL +evutil_socket_t evconnlistener_get_fd(struct evconnlistener *lev); + +/** Change the callback on the listener to cb and its user_data to arg. + */ +EVENT2_EXPORT_SYMBOL +void evconnlistener_set_cb(struct evconnlistener *lev, + evconnlistener_cb cb, void *arg); + +/** Set an evconnlistener's error callback. */ +EVENT2_EXPORT_SYMBOL +void evconnlistener_set_error_cb(struct evconnlistener *lev, + evconnlistener_errorcb errorcb); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/rpc.h b/probe-busybox/libevent-2.1.11-stable/include/event2/rpc.h new file mode 100644 index 00000000..1bc31d57 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/rpc.h @@ -0,0 +1,626 @@ +/* + * Copyright (c) 2006-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_RPC_H_INCLUDED_ +#define EVENT2_RPC_H_INCLUDED_ + +/* For int types. */ +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** @file rpc.h + * + * This header files provides basic support for an RPC server and client. + * + * To support RPCs in a server, every supported RPC command needs to be + * defined and registered. + * + * EVRPC_HEADER(SendCommand, Request, Reply); + * + * SendCommand is the name of the RPC command. + * Request is the name of a structure generated by event_rpcgen.py. + * It contains all parameters relating to the SendCommand RPC. The + * server needs to fill in the Reply structure. + * Reply is the name of a structure generated by event_rpcgen.py. It + * contains the answer to the RPC. + * + * To register an RPC with an HTTP server, you need to first create an RPC + * base with: + * + * struct evrpc_base *base = evrpc_init(http); + * + * A specific RPC can then be registered with + * + * EVRPC_REGISTER(base, SendCommand, Request, Reply, FunctionCB, arg); + * + * when the server receives an appropriately formatted RPC, the user callback + * is invoked. The callback needs to fill in the reply structure. + * + * void FunctionCB(EVRPC_STRUCT(SendCommand)* rpc, void *arg); + * + * To send the reply, call EVRPC_REQUEST_DONE(rpc); + * + * See the regression test for an example. + */ + +/** + Determines if the member has been set in the message + + @param msg the message to inspect + @param member the member variable to test for presences + @return 1 if it's present or 0 otherwise. +*/ +#define EVTAG_HAS(msg, member) \ + ((msg)->member##_set == 1) + +#ifndef EVENT2_RPC_COMPAT_H_INCLUDED_ + +/** + Assigns a value to the member in the message. + + @param msg the message to which to assign a value + @param member the name of the member variable + @param value the value to assign +*/ +#define EVTAG_ASSIGN(msg, member, value) \ + (*(msg)->base->member##_assign)((msg), (value)) +/** + Assigns a value to the member in the message. + + @param msg the message to which to assign a value + @param member the name of the member variable + @param value the value to assign + @param len the length of the value +*/ +#define EVTAG_ASSIGN_WITH_LEN(msg, member, value, len) \ + (*(msg)->base->member##_assign)((msg), (value), (len)) +/** + Returns the value for a member. + + @param msg the message from which to get the value + @param member the name of the member variable + @param pvalue a pointer to the variable to hold the value + @return 0 on success, -1 otherwise. +*/ +#define EVTAG_GET(msg, member, pvalue) \ + (*(msg)->base->member##_get)((msg), (pvalue)) +/** + Returns the value for a member. + + @param msg the message from which to get the value + @param member the name of the member variable + @param pvalue a pointer to the variable to hold the value + @param plen a pointer to the length of the value + @return 0 on success, -1 otherwise. +*/ +#define EVTAG_GET_WITH_LEN(msg, member, pvalue, plen) \ + (*(msg)->base->member##_get)((msg), (pvalue), (plen)) + +#endif /* EVENT2_RPC_COMPAT_H_INCLUDED_ */ + +/** + Adds a value to an array. +*/ +#define EVTAG_ARRAY_ADD_VALUE(msg, member, value) \ + (*(msg)->base->member##_add)((msg), (value)) +/** + Allocates a new entry in the array and returns it. +*/ +#define EVTAG_ARRAY_ADD(msg, member) \ + (*(msg)->base->member##_add)(msg) +/** + Gets a variable at the specified offset from the array. +*/ +#define EVTAG_ARRAY_GET(msg, member, offset, pvalue) \ + (*(msg)->base->member##_get)((msg), (offset), (pvalue)) +/** + Returns the number of entries in the array. +*/ +#define EVTAG_ARRAY_LEN(msg, member) ((msg)->member##_length) + + +struct evbuffer; +struct event_base; +struct evrpc_req_generic; +struct evrpc_request_wrapper; +struct evrpc; + +/** The type of a specific RPC Message + * + * @param rpcname the name of the RPC message + */ +#define EVRPC_STRUCT(rpcname) struct evrpc_req__##rpcname + +struct evhttp_request; +struct evrpc_status; +struct evrpc_hook_meta; + +/** Creates the definitions and prototypes for an RPC + * + * You need to use EVRPC_HEADER to create structures and function prototypes + * needed by the server and client implementation. The structures have to be + * defined in an .rpc file and converted to source code via event_rpcgen.py + * + * @param rpcname the name of the RPC + * @param reqstruct the name of the RPC request structure + * @param replystruct the name of the RPC reply structure + * @see EVRPC_GENERATE() + */ +#define EVRPC_HEADER(rpcname, reqstruct, rplystruct) \ +EVRPC_STRUCT(rpcname) { \ + struct evrpc_hook_meta *hook_meta; \ + struct reqstruct* request; \ + struct rplystruct* reply; \ + struct evrpc* rpc; \ + struct evhttp_request* http_req; \ + struct evbuffer* rpc_data; \ +}; \ +EVENT2_EXPORT_SYMBOL \ +int evrpc_send_request_##rpcname(struct evrpc_pool *, \ + struct reqstruct *, struct rplystruct *, \ + void (*)(struct evrpc_status *, \ + struct reqstruct *, struct rplystruct *, void *cbarg), \ + void *); + +struct evrpc_pool; + +/** use EVRPC_GENERATE instead */ +EVENT2_EXPORT_SYMBOL +struct evrpc_request_wrapper *evrpc_make_request_ctx( + struct evrpc_pool *pool, void *request, void *reply, + const char *rpcname, + void (*req_marshal)(struct evbuffer*, void *), + void (*rpl_clear)(void *), + int (*rpl_unmarshal)(void *, struct evbuffer *), + void (*cb)(struct evrpc_status *, void *, void *, void *), + void *cbarg); + +/** Creates a context structure that contains rpc specific information. + * + * EVRPC_MAKE_CTX is used to populate a RPC specific context that + * contains information about marshaling the RPC data types. + * + * @param rpcname the name of the RPC + * @param reqstruct the name of the RPC request structure + * @param replystruct the name of the RPC reply structure + * @param pool the evrpc_pool over which to make the request + * @param request a pointer to the RPC request structure object + * @param reply a pointer to the RPC reply structure object + * @param cb the callback function to call when the RPC has completed + * @param cbarg the argument to supply to the callback + */ +#define EVRPC_MAKE_CTX(rpcname, reqstruct, rplystruct, \ + pool, request, reply, cb, cbarg) \ + evrpc_make_request_ctx(pool, request, reply, \ + #rpcname, \ + (void (*)(struct evbuffer *, void *))reqstruct##_marshal, \ + (void (*)(void *))rplystruct##_clear, \ + (int (*)(void *, struct evbuffer *))rplystruct##_unmarshal, \ + (void (*)(struct evrpc_status *, void *, void *, void *))cb, \ + cbarg) + +/** Generates the code for receiving and sending an RPC message + * + * EVRPC_GENERATE is used to create the code corresponding to sending + * and receiving a particular RPC message + * + * @param rpcname the name of the RPC + * @param reqstruct the name of the RPC request structure + * @param replystruct the name of the RPC reply structure + * @see EVRPC_HEADER() + */ +#define EVRPC_GENERATE(rpcname, reqstruct, rplystruct) \ + int evrpc_send_request_##rpcname(struct evrpc_pool *pool, \ + struct reqstruct *request, struct rplystruct *reply, \ + void (*cb)(struct evrpc_status *, \ + struct reqstruct *, struct rplystruct *, void *cbarg), \ + void *cbarg) { \ + return evrpc_send_request_generic(pool, request, reply, \ + (void (*)(struct evrpc_status *, void *, void *, void *))cb, \ + cbarg, \ + #rpcname, \ + (void (*)(struct evbuffer *, void *))reqstruct##_marshal, \ + (void (*)(void *))rplystruct##_clear, \ + (int (*)(void *, struct evbuffer *))rplystruct##_unmarshal); \ +} + +/** Provides access to the HTTP request object underlying an RPC + * + * Access to the underlying http object; can be used to look at headers or + * for getting the remote ip address + * + * @param rpc_req the rpc request structure provided to the server callback + * @return an struct evhttp_request object that can be inspected for + * HTTP headers or sender information. + */ +#define EVRPC_REQUEST_HTTP(rpc_req) (rpc_req)->http_req + +/** completes the server response to an rpc request */ +EVENT2_EXPORT_SYMBOL +void evrpc_request_done(struct evrpc_req_generic *req); + +/** accessors for request and reply */ +EVENT2_EXPORT_SYMBOL +void *evrpc_get_request(struct evrpc_req_generic *req); +EVENT2_EXPORT_SYMBOL +void *evrpc_get_reply(struct evrpc_req_generic *req); + +/** Creates the reply to an RPC request + * + * EVRPC_REQUEST_DONE is used to answer a request; the reply is expected + * to have been filled in. The request and reply pointers become invalid + * after this call has finished. + * + * @param rpc_req the rpc request structure provided to the server callback + */ +#define EVRPC_REQUEST_DONE(rpc_req) do { \ + struct evrpc_req_generic *req_ = (struct evrpc_req_generic *)(rpc_req); \ + evrpc_request_done(req_); \ +} while (0) + + +struct evrpc_base; +struct evhttp; + +/* functions to start up the rpc system */ + +/** Creates a new rpc base from which RPC requests can be received + * + * @param server a pointer to an existing HTTP server + * @return a newly allocated evrpc_base struct or NULL if an error occurred + * @see evrpc_free() + */ +EVENT2_EXPORT_SYMBOL +struct evrpc_base *evrpc_init(struct evhttp *server); + +/** + * Frees the evrpc base + * + * For now, you are responsible for making sure that no rpcs are ongoing. + * + * @param base the evrpc_base object to be freed + * @see evrpc_init + */ +EVENT2_EXPORT_SYMBOL +void evrpc_free(struct evrpc_base *base); + +/** register RPCs with the HTTP Server + * + * registers a new RPC with the HTTP server, each RPC needs to have + * a unique name under which it can be identified. + * + * @param base the evrpc_base structure in which the RPC should be + * registered. + * @param name the name of the RPC + * @param request the name of the RPC request structure + * @param reply the name of the RPC reply structure + * @param callback the callback that should be invoked when the RPC + * is received. The callback has the following prototype + * void (*callback)(EVRPC_STRUCT(Message)* rpc, void *arg) + * @param cbarg an additional parameter that can be passed to the callback. + * The parameter can be used to carry around state. + */ +#define EVRPC_REGISTER(base, name, request, reply, callback, cbarg) \ + evrpc_register_generic(base, #name, \ + (void (*)(struct evrpc_req_generic *, void *))callback, cbarg, \ + (void *(*)(void *))request##_new_with_arg, NULL, \ + (void (*)(void *))request##_free, \ + (int (*)(void *, struct evbuffer *))request##_unmarshal, \ + (void *(*)(void *))reply##_new_with_arg, NULL, \ + (void (*)(void *))reply##_free, \ + (int (*)(void *))reply##_complete, \ + (void (*)(struct evbuffer *, void *))reply##_marshal) + +/** + Low level function for registering an RPC with a server. + + Use EVRPC_REGISTER() instead. + + @see EVRPC_REGISTER() +*/ +EVENT2_EXPORT_SYMBOL +int evrpc_register_rpc(struct evrpc_base *, struct evrpc *, + void (*)(struct evrpc_req_generic*, void *), void *); + +/** + * Unregisters an already registered RPC + * + * @param base the evrpc_base object from which to unregister an RPC + * @param name the name of the rpc to unregister + * @return -1 on error or 0 when successful. + * @see EVRPC_REGISTER() + */ +#define EVRPC_UNREGISTER(base, name) evrpc_unregister_rpc((base), #name) + +EVENT2_EXPORT_SYMBOL +int evrpc_unregister_rpc(struct evrpc_base *base, const char *name); + +/* + * Client-side RPC support + */ + +struct evhttp_connection; +struct evrpc_status; + +/** launches an RPC and sends it to the server + * + * EVRPC_MAKE_REQUEST() is used by the client to send an RPC to the server. + * + * @param name the name of the RPC + * @param pool the evrpc_pool that contains the connection objects over which + * the request should be sent. + * @param request a pointer to the RPC request structure - it contains the + * data to be sent to the server. + * @param reply a pointer to the RPC reply structure. It is going to be filled + * if the request was answered successfully + * @param cb the callback to invoke when the RPC request has been answered + * @param cbarg an additional argument to be passed to the client + * @return 0 on success, -1 on failure + */ +#define EVRPC_MAKE_REQUEST(name, pool, request, reply, cb, cbarg) \ + evrpc_send_request_##name((pool), (request), (reply), (cb), (cbarg)) + +/** + Makes an RPC request based on the provided context. + + This is a low-level function and should not be used directly + unless a custom context object is provided. Use EVRPC_MAKE_REQUEST() + instead. + + @param ctx a context from EVRPC_MAKE_CTX() + @returns 0 on success, -1 otherwise. + @see EVRPC_MAKE_REQUEST(), EVRPC_MAKE_CTX() +*/ +EVENT2_EXPORT_SYMBOL +int evrpc_make_request(struct evrpc_request_wrapper *ctx); + +/** creates an rpc connection pool + * + * a pool has a number of connections associated with it. + * rpc requests are always made via a pool. + * + * @param base a pointer to an struct event_based object; can be left NULL + * in singled-threaded applications + * @return a newly allocated struct evrpc_pool object or NULL if an error + * occurred + * @see evrpc_pool_free() + */ +EVENT2_EXPORT_SYMBOL +struct evrpc_pool *evrpc_pool_new(struct event_base *base); +/** frees an rpc connection pool + * + * @param pool a pointer to an evrpc_pool allocated via evrpc_pool_new() + * @see evrpc_pool_new() + */ +EVENT2_EXPORT_SYMBOL +void evrpc_pool_free(struct evrpc_pool *pool); + +/** + * Adds a connection over which rpc can be dispatched to the pool. + * + * The connection object must have been newly created. + * + * @param pool the pool to which to add the connection + * @param evcon the connection to add to the pool. + */ +EVENT2_EXPORT_SYMBOL +void evrpc_pool_add_connection(struct evrpc_pool *pool, + struct evhttp_connection *evcon); + +/** + * Removes a connection from the pool. + * + * The connection object must have been newly created. + * + * @param pool the pool from which to remove the connection + * @param evcon the connection to remove from the pool. + */ +EVENT2_EXPORT_SYMBOL +void evrpc_pool_remove_connection(struct evrpc_pool *pool, + struct evhttp_connection *evcon); + +/** + * Sets the timeout in secs after which a request has to complete. The + * RPC is completely aborted if it does not complete by then. Setting + * the timeout to 0 means that it never timeouts and can be used to + * implement callback type RPCs. + * + * Any connection already in the pool will be updated with the new + * timeout. Connections added to the pool after set_timeout has be + * called receive the pool timeout only if no timeout has been set + * for the connection itself. + * + * @param pool a pointer to a struct evrpc_pool object + * @param timeout_in_secs the number of seconds after which a request should + * timeout and a failure be returned to the callback. + */ +EVENT2_EXPORT_SYMBOL +void evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs); + +/** + * Hooks for changing the input and output of RPCs; this can be used to + * implement compression, authentication, encryption, ... + */ + +enum EVRPC_HOOK_TYPE { + EVRPC_INPUT, /**< apply the function to an input hook */ + EVRPC_OUTPUT /**< apply the function to an output hook */ +}; + +#ifndef _WIN32 +/** Deprecated alias for EVRPC_INPUT. Not available on windows, where it + * conflicts with platform headers. */ +#define INPUT EVRPC_INPUT +/** Deprecated alias for EVRPC_OUTPUT. Not available on windows, where it + * conflicts with platform headers. */ +#define OUTPUT EVRPC_OUTPUT +#endif + +/** + * Return value from hook processing functions + */ + +enum EVRPC_HOOK_RESULT { + EVRPC_TERMINATE = -1, /**< indicates the rpc should be terminated */ + EVRPC_CONTINUE = 0, /**< continue processing the rpc */ + EVRPC_PAUSE = 1 /**< pause processing request until resumed */ +}; + +/** adds a processing hook to either an rpc base or rpc pool + * + * If a hook returns TERMINATE, the processing is aborted. On CONTINUE, + * the request is immediately processed after the hook returns. If the + * hook returns PAUSE, request processing stops until evrpc_resume_request() + * has been called. + * + * The add functions return handles that can be used for removing hooks. + * + * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool + * @param hook_type either INPUT or OUTPUT + * @param cb the callback to call when the hook is activated + * @param cb_arg an additional argument for the callback + * @return a handle to the hook so it can be removed later + * @see evrpc_remove_hook() + */ +EVENT2_EXPORT_SYMBOL +void *evrpc_add_hook(void *vbase, + enum EVRPC_HOOK_TYPE hook_type, + int (*cb)(void *, struct evhttp_request *, struct evbuffer *, void *), + void *cb_arg); + +/** removes a previously added hook + * + * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool + * @param hook_type either INPUT or OUTPUT + * @param handle a handle returned by evrpc_add_hook() + * @return 1 on success or 0 on failure + * @see evrpc_add_hook() + */ +EVENT2_EXPORT_SYMBOL +int evrpc_remove_hook(void *vbase, + enum EVRPC_HOOK_TYPE hook_type, + void *handle); + +/** resume a paused request + * + * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool + * @param ctx the context pointer provided to the original hook call + */ +EVENT2_EXPORT_SYMBOL +int evrpc_resume_request(void *vbase, void *ctx, enum EVRPC_HOOK_RESULT res); + +/** adds meta data to request + * + * evrpc_hook_add_meta() allows hooks to add meta data to a request. for + * a client request, the meta data can be inserted by an outgoing request hook + * and retrieved by the incoming request hook. + * + * @param ctx the context provided to the hook call + * @param key a NUL-terminated c-string + * @param data the data to be associated with the key + * @param data_size the size of the data + */ +EVENT2_EXPORT_SYMBOL +void evrpc_hook_add_meta(void *ctx, const char *key, + const void *data, size_t data_size); + +/** retrieves meta data previously associated + * + * evrpc_hook_find_meta() can be used to retrieve meta data associated to a + * request by a previous hook. + * @param ctx the context provided to the hook call + * @param key a NUL-terminated c-string + * @param data pointer to a data pointer that will contain the retrieved data + * @param data_size pointer to the size of the data + * @return 0 on success or -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int evrpc_hook_find_meta(void *ctx, const char *key, + void **data, size_t *data_size); + +/** + * returns the connection object associated with the request + * + * @param ctx the context provided to the hook call + * @return a pointer to the evhttp_connection object or NULL if an error + * occurred + */ +EVENT2_EXPORT_SYMBOL +struct evhttp_connection *evrpc_hook_get_connection(void *ctx); + +/** + Function for sending a generic RPC request. + + Do not call this function directly, use EVRPC_MAKE_REQUEST() instead. + + @see EVRPC_MAKE_REQUEST() + */ +EVENT2_EXPORT_SYMBOL +int evrpc_send_request_generic(struct evrpc_pool *pool, + void *request, void *reply, + void (*cb)(struct evrpc_status *, void *, void *, void *), + void *cb_arg, + const char *rpcname, + void (*req_marshal)(struct evbuffer *, void *), + void (*rpl_clear)(void *), + int (*rpl_unmarshal)(void *, struct evbuffer *)); + +/** + Function for registering a generic RPC with the RPC base. + + Do not call this function directly, use EVRPC_REGISTER() instead. + + @see EVRPC_REGISTER() + */ +EVENT2_EXPORT_SYMBOL +int evrpc_register_generic(struct evrpc_base *base, const char *name, + void (*callback)(struct evrpc_req_generic *, void *), void *cbarg, + void *(*req_new)(void *), void *req_new_arg, void (*req_free)(void *), + int (*req_unmarshal)(void *, struct evbuffer *), + void *(*rpl_new)(void *), void *rpl_new_arg, void (*rpl_free)(void *), + int (*rpl_complete)(void *), + void (*rpl_marshal)(struct evbuffer *, void *)); + +/** accessors for obscure and undocumented functionality */ +EVENT2_EXPORT_SYMBOL +struct evrpc_pool* evrpc_request_get_pool(struct evrpc_request_wrapper *ctx); +EVENT2_EXPORT_SYMBOL +void evrpc_request_set_pool(struct evrpc_request_wrapper *ctx, + struct evrpc_pool *pool); +EVENT2_EXPORT_SYMBOL +void evrpc_request_set_cb(struct evrpc_request_wrapper *ctx, + void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg), + void *cb_arg); + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_RPC_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/rpc_compat.h b/probe-busybox/libevent-2.1.11-stable/include/event2/rpc_compat.h new file mode 100644 index 00000000..8d8334d2 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/rpc_compat.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2006-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_RPC_COMPAT_H_INCLUDED_ +#define EVENT2_RPC_COMPAT_H_INCLUDED_ + +/** @file event2/rpc_compat.h + + Deprecated versions of the functions in rpc.h: provided only for + backwards compatibility. + + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** backwards compatible accessors that work only with gcc */ +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) + +#undef EVTAG_ASSIGN +#undef EVTAG_GET +#undef EVTAG_ADD + +#define EVTAG_ASSIGN(msg, member, args...) \ + (*(msg)->base->member##_assign)(msg, ## args) +#define EVTAG_GET(msg, member, args...) \ + (*(msg)->base->member##_get)(msg, ## args) +#define EVTAG_ADD(msg, member, args...) \ + (*(msg)->base->member##_add)(msg, ## args) +#endif +#define EVTAG_LEN(msg, member) ((msg)->member##_length) + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_EVENT_COMPAT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/rpc_struct.h b/probe-busybox/libevent-2.1.11-stable/include/event2/rpc_struct.h new file mode 100644 index 00000000..f3cb460a --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/rpc_struct.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2006-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_RPC_STRUCT_H_INCLUDED_ +#define EVENT2_RPC_STRUCT_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** @file event2/rpc_struct.h + + Structures used by rpc.h. Using these structures directly may harm + forward compatibility: be careful! + + */ + +/* Fix so that people don't have to run with */ +#ifndef TAILQ_ENTRY +#define EVENT_DEFINED_TQENTRY_ +#define TAILQ_ENTRY(type) \ +struct { \ + struct type *tqe_next; /* next element */ \ + struct type **tqe_prev; /* address of previous next element */ \ +} +#endif /* !TAILQ_ENTRY */ + +/** + * provides information about the completed RPC request. + */ +struct evrpc_status { +#define EVRPC_STATUS_ERR_NONE 0 +#define EVRPC_STATUS_ERR_TIMEOUT 1 +#define EVRPC_STATUS_ERR_BADPAYLOAD 2 +#define EVRPC_STATUS_ERR_UNSTARTED 3 +#define EVRPC_STATUS_ERR_HOOKABORTED 4 + int error; + + /* for looking at headers or other information */ + struct evhttp_request *http_req; +}; + +/* the structure below needs to be synchronized with evrpc_req_generic */ + +/* Encapsulates a request */ +struct evrpc { + TAILQ_ENTRY(evrpc) next; + + /* the URI at which the request handler lives */ + const char* uri; + + /* creates a new request structure */ + void *(*request_new)(void *); + void *request_new_arg; + + /* frees the request structure */ + void (*request_free)(void *); + + /* unmarshals the buffer into the proper request structure */ + int (*request_unmarshal)(void *, struct evbuffer *); + + /* creates a new reply structure */ + void *(*reply_new)(void *); + void *reply_new_arg; + + /* frees the reply structure */ + void (*reply_free)(void *); + + /* verifies that the reply is valid */ + int (*reply_complete)(void *); + + /* marshals the reply into a buffer */ + void (*reply_marshal)(struct evbuffer*, void *); + + /* the callback invoked for each received rpc */ + void (*cb)(struct evrpc_req_generic *, void *); + void *cb_arg; + + /* reference for further configuration */ + struct evrpc_base *base; +}; + +#ifdef EVENT_DEFINED_TQENTRY_ +#undef TAILQ_ENTRY +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_RPC_STRUCT_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/tag.h b/probe-busybox/libevent-2.1.11-stable/include/event2/tag.h new file mode 100644 index 00000000..2f73bfc0 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/tag.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_TAG_H_INCLUDED_ +#define EVENT2_TAG_H_INCLUDED_ + +/** @file event2/tag.h + + Helper functions for reading and writing tagged data onto buffers. + + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif + +/* For int types. */ +#include + +struct evbuffer; + +/* + * Marshaling tagged data - We assume that all tags are inserted in their + * numeric order - so that unknown tags will always be higher than the + * known ones - and we can just ignore the end of an event buffer. + */ + +EVENT2_EXPORT_SYMBOL +void evtag_init(void); + +/** + Unmarshals the header and returns the length of the payload + + @param evbuf the buffer from which to unmarshal data + @param ptag a pointer in which the tag id is being stored + @returns -1 on failure or the number of bytes in the remaining payload. +*/ +EVENT2_EXPORT_SYMBOL +int evtag_unmarshal_header(struct evbuffer *evbuf, ev_uint32_t *ptag); + +EVENT2_EXPORT_SYMBOL +void evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag, const void *data, + ev_uint32_t len); +EVENT2_EXPORT_SYMBOL +void evtag_marshal_buffer(struct evbuffer *evbuf, ev_uint32_t tag, + struct evbuffer *data); + +/** + Encode an integer and store it in an evbuffer. + + We encode integers by nybbles; the first nibble contains the number + of significant nibbles - 1; this allows us to encode up to 64-bit + integers. This function is byte-order independent. + + @param evbuf evbuffer to store the encoded number + @param number a 32-bit integer + */ +EVENT2_EXPORT_SYMBOL +void evtag_encode_int(struct evbuffer *evbuf, ev_uint32_t number); +EVENT2_EXPORT_SYMBOL +void evtag_encode_int64(struct evbuffer *evbuf, ev_uint64_t number); + +EVENT2_EXPORT_SYMBOL +void evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, + ev_uint32_t integer); +EVENT2_EXPORT_SYMBOL +void evtag_marshal_int64(struct evbuffer *evbuf, ev_uint32_t tag, + ev_uint64_t integer); + +EVENT2_EXPORT_SYMBOL +void evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, + const char *string); + +EVENT2_EXPORT_SYMBOL +void evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, + struct timeval *tv); + +EVENT2_EXPORT_SYMBOL +int evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, + struct evbuffer *dst); +EVENT2_EXPORT_SYMBOL +int evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag); +EVENT2_EXPORT_SYMBOL +int evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength); +EVENT2_EXPORT_SYMBOL +int evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength); +EVENT2_EXPORT_SYMBOL +int evtag_consume(struct evbuffer *evbuf); + +EVENT2_EXPORT_SYMBOL +int evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag, + ev_uint32_t *pinteger); +EVENT2_EXPORT_SYMBOL +int evtag_unmarshal_int64(struct evbuffer *evbuf, ev_uint32_t need_tag, + ev_uint64_t *pinteger); + +EVENT2_EXPORT_SYMBOL +int evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, + void *data, size_t len); + +EVENT2_EXPORT_SYMBOL +int evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag, + char **pstring); + +EVENT2_EXPORT_SYMBOL +int evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag, + struct timeval *ptv); + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_TAG_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/tag_compat.h b/probe-busybox/libevent-2.1.11-stable/include/event2/tag_compat.h new file mode 100644 index 00000000..a276c0d3 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/tag_compat.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_TAG_COMPAT_H_INCLUDED_ +#define EVENT2_TAG_COMPAT_H_INCLUDED_ + +/** @file event2/tag_compat.h + + Obsolete/deprecated functions from tag.h; provided only for backwards + compatibility. + */ + +/** + @name Misnamed functions + + @deprecated These macros are deprecated because their names don't follow + Libevent's naming conventions. Use evtag_encode_int and + evtag_encode_int64 instead. + + @{ +*/ +#define encode_int(evbuf, number) evtag_encode_int((evbuf), (number)) +#define encode_int64(evbuf, number) evtag_encode_int64((evbuf), (number)) +/**@}*/ + +#endif /* EVENT2_TAG_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/thread.h b/probe-busybox/libevent-2.1.11-stable/include/event2/thread.h new file mode 100644 index 00000000..b5199863 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/thread.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2008-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_THREAD_H_INCLUDED_ +#define EVENT2_THREAD_H_INCLUDED_ + +/** @file event2/thread.h + + Functions for multi-threaded applications using Libevent. + + When using a multi-threaded application in which multiple threads + add and delete events from a single event base, Libevent needs to + lock its data structures. + + Like the memory-management function hooks, all of the threading functions + _must_ be set up before an event_base is created if you want the base to + use them. + + Most programs will either be using Windows threads or Posix threads. You + can configure Libevent to use one of these event_use_windows_threads() or + event_use_pthreads() respectively. If you're using another threading + library, you'll need to configure threading functions manually using + evthread_set_lock_callbacks() and evthread_set_condition_callbacks(). + + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + @name Flags passed to lock functions + + @{ +*/ +/** A flag passed to a locking callback when the lock was allocated as a + * read-write lock, and we want to acquire or release the lock for writing. */ +#define EVTHREAD_WRITE 0x04 +/** A flag passed to a locking callback when the lock was allocated as a + * read-write lock, and we want to acquire or release the lock for reading. */ +#define EVTHREAD_READ 0x08 +/** A flag passed to a locking callback when we don't want to block waiting + * for the lock; if we can't get the lock immediately, we will instead + * return nonzero from the locking callback. */ +#define EVTHREAD_TRY 0x10 +/**@}*/ + +#if !defined(EVENT__DISABLE_THREAD_SUPPORT) || defined(EVENT_IN_DOXYGEN_) + +#define EVTHREAD_LOCK_API_VERSION 1 + +/** + @name Types of locks + + @{*/ +/** A recursive lock is one that can be acquired multiple times at once by the + * same thread. No other process can allocate the lock until the thread that + * has been holding it has unlocked it as many times as it locked it. */ +#define EVTHREAD_LOCKTYPE_RECURSIVE 1 +/* A read-write lock is one that allows multiple simultaneous readers, but + * where any one writer excludes all other writers and readers. */ +#define EVTHREAD_LOCKTYPE_READWRITE 2 +/**@}*/ + +/** This structure describes the interface a threading library uses for + * locking. It's used to tell evthread_set_lock_callbacks() how to use + * locking on this platform. + */ +struct evthread_lock_callbacks { + /** The current version of the locking API. Set this to + * EVTHREAD_LOCK_API_VERSION */ + int lock_api_version; + /** Which kinds of locks does this version of the locking API + * support? A bitfield of EVTHREAD_LOCKTYPE_RECURSIVE and + * EVTHREAD_LOCKTYPE_READWRITE. + * + * (Note that RECURSIVE locks are currently mandatory, and + * READWRITE locks are not currently used.) + **/ + unsigned supported_locktypes; + /** Function to allocate and initialize new lock of type 'locktype'. + * Returns NULL on failure. */ + void *(*alloc)(unsigned locktype); + /** Funtion to release all storage held in 'lock', which was created + * with type 'locktype'. */ + void (*free)(void *lock, unsigned locktype); + /** Acquire an already-allocated lock at 'lock' with mode 'mode'. + * Returns 0 on success, and nonzero on failure. */ + int (*lock)(unsigned mode, void *lock); + /** Release a lock at 'lock' using mode 'mode'. Returns 0 on success, + * and nonzero on failure. */ + int (*unlock)(unsigned mode, void *lock); +}; + +/** Sets a group of functions that Libevent should use for locking. + * For full information on the required callback API, see the + * documentation for the individual members of evthread_lock_callbacks. + * + * Note that if you're using Windows or the Pthreads threading library, you + * probably shouldn't call this function; instead, use + * evthread_use_windows_threads() or evthread_use_posix_threads() if you can. + */ +EVENT2_EXPORT_SYMBOL +int evthread_set_lock_callbacks(const struct evthread_lock_callbacks *); + +#define EVTHREAD_CONDITION_API_VERSION 1 + +struct timeval; + +/** This structure describes the interface a threading library uses for + * condition variables. It's used to tell evthread_set_condition_callbacks + * how to use locking on this platform. + */ +struct evthread_condition_callbacks { + /** The current version of the conditions API. Set this to + * EVTHREAD_CONDITION_API_VERSION */ + int condition_api_version; + /** Function to allocate and initialize a new condition variable. + * Returns the condition variable on success, and NULL on failure. + * The 'condtype' argument will be 0 with this API version. + */ + void *(*alloc_condition)(unsigned condtype); + /** Function to free a condition variable. */ + void (*free_condition)(void *cond); + /** Function to signal a condition variable. If 'broadcast' is 1, all + * threads waiting on 'cond' should be woken; otherwise, only on one + * thread is worken. Should return 0 on success, -1 on failure. + * This function will only be called while holding the associated + * lock for the condition. + */ + int (*signal_condition)(void *cond, int broadcast); + /** Function to wait for a condition variable. The lock 'lock' + * will be held when this function is called; should be released + * while waiting for the condition to be come signalled, and + * should be held again when this function returns. + * If timeout is provided, it is interval of seconds to wait for + * the event to become signalled; if it is NULL, the function + * should wait indefinitely. + * + * The function should return -1 on error; 0 if the condition + * was signalled, or 1 on a timeout. */ + int (*wait_condition)(void *cond, void *lock, + const struct timeval *timeout); +}; + +/** Sets a group of functions that Libevent should use for condition variables. + * For full information on the required callback API, see the + * documentation for the individual members of evthread_condition_callbacks. + * + * Note that if you're using Windows or the Pthreads threading library, you + * probably shouldn't call this function; instead, use + * evthread_use_windows_threads() or evthread_use_pthreads() if you can. + */ +EVENT2_EXPORT_SYMBOL +int evthread_set_condition_callbacks( + const struct evthread_condition_callbacks *); + +/** + Sets the function for determining the thread id. + + @param base the event base for which to set the id function + @param id_fn the identify function Libevent should invoke to + determine the identity of a thread. +*/ +EVENT2_EXPORT_SYMBOL +void evthread_set_id_callback( + unsigned long (*id_fn)(void)); + +#if (defined(_WIN32) && !defined(EVENT__DISABLE_THREAD_SUPPORT)) || defined(EVENT_IN_DOXYGEN_) +/** Sets up Libevent for use with Windows builtin locking and thread ID + functions. Unavailable if Libevent is not built for Windows. + + @return 0 on success, -1 on failure. */ +EVENT2_EXPORT_SYMBOL +int evthread_use_windows_threads(void); +/** + Defined if Libevent was built with support for evthread_use_windows_threads() +*/ +#define EVTHREAD_USE_WINDOWS_THREADS_IMPLEMENTED 1 +#endif + +#if defined(EVENT__HAVE_PTHREADS) || defined(EVENT_IN_DOXYGEN_) +/** Sets up Libevent for use with Pthreads locking and thread ID functions. + Unavailable if Libevent is not build for use with pthreads. Requires + libraries to link against Libevent_pthreads as well as Libevent. + + @return 0 on success, -1 on failure. */ +EVENT2_EXPORT_SYMBOL +int evthread_use_pthreads(void); +/** Defined if Libevent was built with support for evthread_use_pthreads() */ +#define EVTHREAD_USE_PTHREADS_IMPLEMENTED 1 + +#endif + +/** Enable debugging wrappers around the current lock callbacks. If Libevent + * makes one of several common locking errors, exit with an assertion failure. + * + * If you're going to call this function, you must do so before any locks are + * allocated. + **/ +EVENT2_EXPORT_SYMBOL +void evthread_enable_lock_debugging(void); + +/* Old (misspelled) version: This is deprecated; use + * evthread_enable_log_debugging instead. */ +EVENT2_EXPORT_SYMBOL +void evthread_enable_lock_debuging(void); + +#endif /* EVENT__DISABLE_THREAD_SUPPORT */ + +struct event_base; +/** Make sure it's safe to tell an event base to wake up from another thread + or a signal handler. + + You shouldn't need to call this by hand; configuring the base with thread + support should be necessary and sufficient. + + @return 0 on success, -1 on failure. + */ +EVENT2_EXPORT_SYMBOL +int evthread_make_base_notifiable(struct event_base *base); + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT2_THREAD_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/util.h b/probe-busybox/libevent-2.1.11-stable/include/event2/util.h new file mode 100644 index 00000000..e6df6289 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/util.h @@ -0,0 +1,882 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_UTIL_H_INCLUDED_ +#define EVENT2_UTIL_H_INCLUDED_ + +/** @file event2/util.h + + Common convenience functions for cross-platform portability and + related socket manipulations. + + */ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#ifdef EVENT__HAVE_STDINT_H +#include +#elif defined(EVENT__HAVE_INTTYPES_H) +#include +#endif +#ifdef EVENT__HAVE_SYS_TYPES_H +#include +#endif +#ifdef EVENT__HAVE_STDDEF_H +#include +#endif +#ifdef _MSC_VER +#include +#endif +#include +#ifdef EVENT__HAVE_NETDB_H +#include +#endif + +#ifdef _WIN32 +#include +#ifdef EVENT__HAVE_GETADDRINFO +/* for EAI_* definitions. */ +#include +#endif +#else +#ifdef EVENT__HAVE_ERRNO_H +#include +#endif +#include +#endif + +#include + +/* Some openbsd autoconf versions get the name of this macro wrong. */ +#if defined(EVENT__SIZEOF_VOID__) && !defined(EVENT__SIZEOF_VOID_P) +#define EVENT__SIZEOF_VOID_P EVENT__SIZEOF_VOID__ +#endif + +/** + * @name Standard integer types. + * + * Integer type definitions for types that are supposed to be defined in the + * C99-specified stdint.h. Shamefully, some platforms do not include + * stdint.h, so we need to replace it. (If you are on a platform like this, + * your C headers are now over 10 years out of date. You should bug them to + * do something about this.) + * + * We define: + * + *
+ *
ev_uint64_t, ev_uint32_t, ev_uint16_t, ev_uint8_t
+ *
unsigned integer types of exactly 64, 32, 16, and 8 bits + * respectively.
+ *
ev_int64_t, ev_int32_t, ev_int16_t, ev_int8_t
+ *
signed integer types of exactly 64, 32, 16, and 8 bits + * respectively.
+ *
ev_uintptr_t, ev_intptr_t
+ *
unsigned/signed integers large enough + * to hold a pointer without loss of bits.
+ *
ev_ssize_t
+ *
A signed type of the same size as size_t
+ *
ev_off_t
+ *
A signed type typically used to represent offsets within a + * (potentially large) file
+ * + * @{ + */ +#ifdef EVENT__HAVE_UINT64_T +#define ev_uint64_t uint64_t +#define ev_int64_t int64_t +#elif defined(_WIN32) +#define ev_uint64_t unsigned __int64 +#define ev_int64_t signed __int64 +#elif EVENT__SIZEOF_LONG_LONG == 8 +#define ev_uint64_t unsigned long long +#define ev_int64_t long long +#elif EVENT__SIZEOF_LONG == 8 +#define ev_uint64_t unsigned long +#define ev_int64_t long +#elif defined(EVENT_IN_DOXYGEN_) +#define ev_uint64_t ... +#define ev_int64_t ... +#else +#error "No way to define ev_uint64_t" +#endif + +#ifdef EVENT__HAVE_UINT32_T +#define ev_uint32_t uint32_t +#define ev_int32_t int32_t +#elif defined(_WIN32) +#define ev_uint32_t unsigned int +#define ev_int32_t signed int +#elif EVENT__SIZEOF_LONG == 4 +#define ev_uint32_t unsigned long +#define ev_int32_t signed long +#elif EVENT__SIZEOF_INT == 4 +#define ev_uint32_t unsigned int +#define ev_int32_t signed int +#elif defined(EVENT_IN_DOXYGEN_) +#define ev_uint32_t ... +#define ev_int32_t ... +#else +#error "No way to define ev_uint32_t" +#endif + +#ifdef EVENT__HAVE_UINT16_T +#define ev_uint16_t uint16_t +#define ev_int16_t int16_t +#elif defined(_WIN32) +#define ev_uint16_t unsigned short +#define ev_int16_t signed short +#elif EVENT__SIZEOF_INT == 2 +#define ev_uint16_t unsigned int +#define ev_int16_t signed int +#elif EVENT__SIZEOF_SHORT == 2 +#define ev_uint16_t unsigned short +#define ev_int16_t signed short +#elif defined(EVENT_IN_DOXYGEN_) +#define ev_uint16_t ... +#define ev_int16_t ... +#else +#error "No way to define ev_uint16_t" +#endif + +#ifdef EVENT__HAVE_UINT8_T +#define ev_uint8_t uint8_t +#define ev_int8_t int8_t +#elif defined(EVENT_IN_DOXYGEN_) +#define ev_uint8_t ... +#define ev_int8_t ... +#else +#define ev_uint8_t unsigned char +#define ev_int8_t signed char +#endif + +#ifdef EVENT__HAVE_UINTPTR_T +#define ev_uintptr_t uintptr_t +#define ev_intptr_t intptr_t +#elif EVENT__SIZEOF_VOID_P <= 4 +#define ev_uintptr_t ev_uint32_t +#define ev_intptr_t ev_int32_t +#elif EVENT__SIZEOF_VOID_P <= 8 +#define ev_uintptr_t ev_uint64_t +#define ev_intptr_t ev_int64_t +#elif defined(EVENT_IN_DOXYGEN_) +#define ev_uintptr_t ... +#define ev_intptr_t ... +#else +#error "No way to define ev_uintptr_t" +#endif + +#ifdef EVENT__ssize_t +#define ev_ssize_t EVENT__ssize_t +#else +#define ev_ssize_t ssize_t +#endif + +/* Note that we define ev_off_t based on the compile-time size of off_t that + * we used to build Libevent, and not based on the current size of off_t. + * (For example, we don't define ev_off_t to off_t.). We do this because + * some systems let you build your software with different off_t sizes + * at runtime, and so putting in any dependency on off_t would risk API + * mismatch. + */ +#ifdef _WIN32 +#define ev_off_t ev_int64_t +#elif EVENT__SIZEOF_OFF_T == 8 +#define ev_off_t ev_int64_t +#elif EVENT__SIZEOF_OFF_T == 4 +#define ev_off_t ev_int32_t +#elif defined(EVENT_IN_DOXYGEN_) +#define ev_off_t ... +#else +#define ev_off_t off_t +#endif +/**@}*/ + +/* Limits for integer types. + + We're making two assumptions here: + - The compiler does constant folding properly. + - The platform does signed arithmetic in two's complement. +*/ + +/** + @name Limits for integer types + + These macros hold the largest or smallest values possible for the + ev_[u]int*_t types. + + @{ +*/ +#ifndef EVENT__HAVE_STDINT_H +#define EV_UINT64_MAX ((((ev_uint64_t)0xffffffffUL) << 32) | 0xffffffffUL) +#define EV_INT64_MAX ((((ev_int64_t) 0x7fffffffL) << 32) | 0xffffffffL) +#define EV_INT64_MIN ((-EV_INT64_MAX) - 1) +#define EV_UINT32_MAX ((ev_uint32_t)0xffffffffUL) +#define EV_INT32_MAX ((ev_int32_t) 0x7fffffffL) +#define EV_INT32_MIN ((-EV_INT32_MAX) - 1) +#define EV_UINT16_MAX ((ev_uint16_t)0xffffUL) +#define EV_INT16_MAX ((ev_int16_t) 0x7fffL) +#define EV_INT16_MIN ((-EV_INT16_MAX) - 1) +#define EV_UINT8_MAX 255 +#define EV_INT8_MAX 127 +#define EV_INT8_MIN ((-EV_INT8_MAX) - 1) +#else +#define EV_UINT64_MAX UINT64_MAX +#define EV_INT64_MAX INT64_MAX +#define EV_INT64_MIN INT64_MIN +#define EV_UINT32_MAX UINT32_MAX +#define EV_INT32_MAX INT32_MAX +#define EV_INT32_MIN INT32_MIN +#define EV_UINT16_MAX UINT16_MAX +#define EV_INT16_MIN INT16_MIN +#define EV_INT16_MAX INT16_MAX +#define EV_UINT8_MAX UINT8_MAX +#define EV_INT8_MAX INT8_MAX +#define EV_INT8_MIN INT8_MIN +/** @} */ +#endif + + +/** + @name Limits for SIZE_T and SSIZE_T + + @{ +*/ +#if EVENT__SIZEOF_SIZE_T == 8 +#define EV_SIZE_MAX EV_UINT64_MAX +#define EV_SSIZE_MAX EV_INT64_MAX +#elif EVENT__SIZEOF_SIZE_T == 4 +#define EV_SIZE_MAX EV_UINT32_MAX +#define EV_SSIZE_MAX EV_INT32_MAX +#elif defined(EVENT_IN_DOXYGEN_) +#define EV_SIZE_MAX ... +#define EV_SSIZE_MAX ... +#else +#error "No way to define SIZE_MAX" +#endif + +#define EV_SSIZE_MIN ((-EV_SSIZE_MAX) - 1) +/**@}*/ + +#ifdef _WIN32 +#define ev_socklen_t int +#elif defined(EVENT__socklen_t) +#define ev_socklen_t EVENT__socklen_t +#else +#define ev_socklen_t socklen_t +#endif + +#ifdef EVENT__HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY +#if !defined(EVENT__HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY) \ + && !defined(ss_family) +#define ss_family __ss_family +#endif +#endif + +/** + * A type wide enough to hold the output of "socket()" or "accept()". On + * Windows, this is an intptr_t; elsewhere, it is an int. */ +#ifdef _WIN32 +#define evutil_socket_t intptr_t +#else +#define evutil_socket_t int +#endif + +/** + * Structure to hold information about a monotonic timer + * + * Use this with evutil_configure_monotonic_time() and + * evutil_gettime_monotonic(). + * + * This is an opaque structure; you can allocate one using + * evutil_monotonic_timer_new(). + * + * @see evutil_monotonic_timer_new(), evutil_monotonic_timer_free(), + * evutil_configure_monotonic_time(), evutil_gettime_monotonic() + */ +struct evutil_monotonic_timer +#ifdef EVENT_IN_DOXYGEN_ +{/*Empty body so that doxygen will generate documentation here.*/} +#endif +; + +#define EV_MONOT_PRECISE 1 +#define EV_MONOT_FALLBACK 2 + +/** Format a date string using RFC 1123 format (used in HTTP). + * If `tm` is NULL, current system's time will be used. + * The number of characters written will be returned. + * One should check if the return value is smaller than `datelen` to check if + * the result is truncated or not. + */ +EVENT2_EXPORT_SYMBOL int +evutil_date_rfc1123(char *date, const size_t datelen, const struct tm *tm); + +/** Allocate a new struct evutil_monotonic_timer for use with the + * evutil_configure_monotonic_time() and evutil_gettime_monotonic() + * functions. You must configure the timer with + * evutil_configure_monotonic_time() before using it. + */ +EVENT2_EXPORT_SYMBOL +struct evutil_monotonic_timer * evutil_monotonic_timer_new(void); + +/** Free a struct evutil_monotonic_timer that was allocated using + * evutil_monotonic_timer_new(). + */ +EVENT2_EXPORT_SYMBOL +void evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer); + +/** Set up a struct evutil_monotonic_timer; flags can include + * EV_MONOT_PRECISE and EV_MONOT_FALLBACK. + */ +EVENT2_EXPORT_SYMBOL +int evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer, + int flags); + +/** Query the current monotonic time from a struct evutil_monotonic_timer + * previously configured with evutil_configure_monotonic_time(). Monotonic + * time is guaranteed never to run in reverse, but is not necessarily epoch- + * based, or relative to any other definite point. Use it to make reliable + * measurements of elapsed time between events even when the system time + * may be changed. + * + * It is not safe to use this funtion on the same timer from multiple + * threads. + */ +EVENT2_EXPORT_SYMBOL +int evutil_gettime_monotonic(struct evutil_monotonic_timer *timer, + struct timeval *tp); + +/** Create two new sockets that are connected to each other. + + On Unix, this simply calls socketpair(). On Windows, it uses the + loopback network interface on 127.0.0.1, and only + AF_INET,SOCK_STREAM are supported. + + (This may fail on some Windows hosts where firewall software has cleverly + decided to keep 127.0.0.1 from talking to itself.) + + Parameters and return values are as for socketpair() +*/ +EVENT2_EXPORT_SYMBOL +int evutil_socketpair(int d, int type, int protocol, evutil_socket_t sv[2]); +/** Do platform-specific operations as needed to make a socket nonblocking. + + @param sock The socket to make nonblocking + @return 0 on success, -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int evutil_make_socket_nonblocking(evutil_socket_t sock); + +/** Do platform-specific operations to make a listener socket reusable. + + Specifically, we want to make sure that another program will be able + to bind this address right after we've closed the listener. + + This differs from Windows's interpretation of "reusable", which + allows multiple listeners to bind the same address at the same time. + + @param sock The socket to make reusable + @return 0 on success, -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int evutil_make_listen_socket_reuseable(evutil_socket_t sock); + +/** Do platform-specific operations to make a listener port reusable. + + Specifically, we want to make sure that multiple programs which also + set the same socket option will be able to bind, listen at the same time. + + This is a feature available only to Linux 3.9+ + + @param sock The socket to make reusable + @return 0 on success, -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int evutil_make_listen_socket_reuseable_port(evutil_socket_t sock); + +/** Set ipv6 only bind socket option to make listener work only in ipv6 sockets. + + According to RFC3493 and most Linux distributions, default value for the + sockets is to work in IPv4-mapped mode. In IPv4-mapped mode, it is not possible + to bind same port from different IPv4 and IPv6 handlers. + + @param sock The socket to make in ipv6only working mode + @return 0 on success, -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int evutil_make_listen_socket_ipv6only(evutil_socket_t sock); + +/** Do platform-specific operations as needed to close a socket upon a + successful execution of one of the exec*() functions. + + @param sock The socket to be closed + @return 0 on success, -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int evutil_make_socket_closeonexec(evutil_socket_t sock); + +/** Do the platform-specific call needed to close a socket returned from + socket() or accept(). + + @param sock The socket to be closed + @return 0 on success (whether the operation is supported or not), + -1 on failure + */ +EVENT2_EXPORT_SYMBOL +int evutil_closesocket(evutil_socket_t sock); +#define EVUTIL_CLOSESOCKET(s) evutil_closesocket(s) + +/** Do platform-specific operations, if possible, to make a tcp listener + * socket defer accept()s until there is data to read. + * + * Not all platforms support this. You don't want to do this for every + * listener socket: only the ones that implement a protocol where the + * client transmits before the server needs to respond. + * + * @param sock The listening socket to to make deferred + * @return 0 on success (whether the operation is supported or not), + * -1 on failure +*/ +EVENT2_EXPORT_SYMBOL +int evutil_make_tcp_listen_socket_deferred(evutil_socket_t sock); + +#ifdef _WIN32 +/** Return the most recent socket error. Not idempotent on all platforms. */ +#define EVUTIL_SOCKET_ERROR() WSAGetLastError() +/** Replace the most recent socket error with errcode */ +#define EVUTIL_SET_SOCKET_ERROR(errcode) \ + do { WSASetLastError(errcode); } while (0) +/** Return the most recent socket error to occur on sock. */ +EVENT2_EXPORT_SYMBOL +int evutil_socket_geterror(evutil_socket_t sock); +/** Convert a socket error to a string. */ +EVENT2_EXPORT_SYMBOL +const char *evutil_socket_error_to_string(int errcode); +#define EVUTIL_INVALID_SOCKET INVALID_SOCKET +#elif defined(EVENT_IN_DOXYGEN_) +/** + @name Socket error functions + + These functions are needed for making programs compatible between + Windows and Unix-like platforms. + + You see, Winsock handles socket errors differently from the rest of + the world. Elsewhere, a socket error is like any other error and is + stored in errno. But winsock functions require you to retrieve the + error with a special function, and don't let you use strerror for + the error codes. And handling EWOULDBLOCK is ... different. + + @{ +*/ +/** Return the most recent socket error. Not idempotent on all platforms. */ +#define EVUTIL_SOCKET_ERROR() ... +/** Replace the most recent socket error with errcode */ +#define EVUTIL_SET_SOCKET_ERROR(errcode) ... +/** Return the most recent socket error to occur on sock. */ +#define evutil_socket_geterror(sock) ... +/** Convert a socket error to a string. */ +#define evutil_socket_error_to_string(errcode) ... +#define EVUTIL_INVALID_SOCKET -1 +/**@}*/ +#else /** !EVENT_IN_DOXYGEN_ && !_WIN32 */ +#define EVUTIL_SOCKET_ERROR() (errno) +#define EVUTIL_SET_SOCKET_ERROR(errcode) \ + do { errno = (errcode); } while (0) +#define evutil_socket_geterror(sock) (errno) +#define evutil_socket_error_to_string(errcode) (strerror(errcode)) +#define EVUTIL_INVALID_SOCKET -1 +#endif /** !_WIN32 */ + + +/** + * @name Manipulation macros for struct timeval. + * + * We define replacements + * for timeradd, timersub, timerclear, timercmp, and timerisset. + * + * @{ + */ +#ifdef EVENT__HAVE_TIMERADD +#define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp)) +#define evutil_timersub(tvp, uvp, vvp) timersub((tvp), (uvp), (vvp)) +#else +#define evutil_timeradd(tvp, uvp, vvp) \ + do { \ + (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \ + if ((vvp)->tv_usec >= 1000000) { \ + (vvp)->tv_sec++; \ + (vvp)->tv_usec -= 1000000; \ + } \ + } while (0) +#define evutil_timersub(tvp, uvp, vvp) \ + do { \ + (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ + if ((vvp)->tv_usec < 0) { \ + (vvp)->tv_sec--; \ + (vvp)->tv_usec += 1000000; \ + } \ + } while (0) +#endif /* !EVENT__HAVE_TIMERADD */ + +#ifdef EVENT__HAVE_TIMERCLEAR +#define evutil_timerclear(tvp) timerclear(tvp) +#else +#define evutil_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 +#endif +/**@}*/ + +/** Return true iff the tvp is related to uvp according to the relational + * operator cmp. Recognized values for cmp are ==, <=, <, >=, and >. */ +#define evutil_timercmp(tvp, uvp, cmp) \ + (((tvp)->tv_sec == (uvp)->tv_sec) ? \ + ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ + ((tvp)->tv_sec cmp (uvp)->tv_sec)) + +#ifdef EVENT__HAVE_TIMERISSET +#define evutil_timerisset(tvp) timerisset(tvp) +#else +#define evutil_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) +#endif + +/** Replacement for offsetof on platforms that don't define it. */ +#ifdef offsetof +#define evutil_offsetof(type, field) offsetof(type, field) +#else +#define evutil_offsetof(type, field) ((off_t)(&((type *)0)->field)) +#endif + +/* big-int related functions */ +/** Parse a 64-bit value from a string. Arguments are as for strtol. */ +EVENT2_EXPORT_SYMBOL +ev_int64_t evutil_strtoll(const char *s, char **endptr, int base); + +/** Replacement for gettimeofday on platforms that lack it. */ +#ifdef EVENT__HAVE_GETTIMEOFDAY +#define evutil_gettimeofday(tv, tz) gettimeofday((tv), (tz)) +#else +struct timezone; +EVENT2_EXPORT_SYMBOL +int evutil_gettimeofday(struct timeval *tv, struct timezone *tz); +#endif + +/** Replacement for snprintf to get consistent behavior on platforms for + which the return value of snprintf does not conform to C99. + */ +EVENT2_EXPORT_SYMBOL +int evutil_snprintf(char *buf, size_t buflen, const char *format, ...) +#ifdef __GNUC__ + __attribute__((format(printf, 3, 4))) +#endif +; +/** Replacement for vsnprintf to get consistent behavior on platforms for + which the return value of snprintf does not conform to C99. + */ +EVENT2_EXPORT_SYMBOL +int evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap) +#ifdef __GNUC__ + __attribute__((format(printf, 3, 0))) +#endif +; + +/** Replacement for inet_ntop for platforms which lack it. */ +EVENT2_EXPORT_SYMBOL +const char *evutil_inet_ntop(int af, const void *src, char *dst, size_t len); +/** Replacement for inet_pton for platforms which lack it. */ +EVENT2_EXPORT_SYMBOL +int evutil_inet_pton(int af, const char *src, void *dst); +struct sockaddr; + +/** Parse an IPv4 or IPv6 address, with optional port, from a string. + + Recognized formats are: + - [IPv6Address]:port + - [IPv6Address] + - IPv6Address + - IPv4Address:port + - IPv4Address + + If no port is specified, the port in the output is set to 0. + + @param str The string to parse. + @param out A struct sockaddr to hold the result. This should probably be + a struct sockaddr_storage. + @param outlen A pointer to the number of bytes that that 'out' can safely + hold. Set to the number of bytes used in 'out' on success. + @return -1 if the address is not well-formed, if the port is out of range, + or if out is not large enough to hold the result. Otherwise returns + 0 on success. +*/ +EVENT2_EXPORT_SYMBOL +int evutil_parse_sockaddr_port(const char *str, struct sockaddr *out, int *outlen); + +/** Compare two sockaddrs; return 0 if they are equal, or less than 0 if sa1 + * preceeds sa2, or greater than 0 if sa1 follows sa2. If include_port is + * true, consider the port as well as the address. Only implemented for + * AF_INET and AF_INET6 addresses. The ordering is not guaranteed to remain + * the same between Libevent versions. */ +EVENT2_EXPORT_SYMBOL +int evutil_sockaddr_cmp(const struct sockaddr *sa1, const struct sockaddr *sa2, + int include_port); + +/** As strcasecmp, but always compares the characters in locale-independent + ASCII. That's useful if you're handling data in ASCII-based protocols. + */ +EVENT2_EXPORT_SYMBOL +int evutil_ascii_strcasecmp(const char *str1, const char *str2); +/** As strncasecmp, but always compares the characters in locale-independent + ASCII. That's useful if you're handling data in ASCII-based protocols. + */ +EVENT2_EXPORT_SYMBOL +int evutil_ascii_strncasecmp(const char *str1, const char *str2, size_t n); + +/* Here we define evutil_addrinfo to the native addrinfo type, or redefine it + * if this system has no getaddrinfo(). */ +#ifdef EVENT__HAVE_STRUCT_ADDRINFO +#define evutil_addrinfo addrinfo +#else +/** A definition of struct addrinfo for systems that lack it. + + (This is just an alias for struct addrinfo if the system defines + struct addrinfo.) +*/ +struct evutil_addrinfo { + int ai_flags; /* AI_PASSIVE, AI_CANONNAME, AI_NUMERICHOST */ + int ai_family; /* PF_xxx */ + int ai_socktype; /* SOCK_xxx */ + int ai_protocol; /* 0 or IPPROTO_xxx for IPv4 and IPv6 */ + size_t ai_addrlen; /* length of ai_addr */ + char *ai_canonname; /* canonical name for nodename */ + struct sockaddr *ai_addr; /* binary address */ + struct evutil_addrinfo *ai_next; /* next structure in linked list */ +}; +#endif +/** @name evutil_getaddrinfo() error codes + + These values are possible error codes for evutil_getaddrinfo() and + related functions. + + @{ +*/ +#if defined(EAI_ADDRFAMILY) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_ADDRFAMILY EAI_ADDRFAMILY +#else +#define EVUTIL_EAI_ADDRFAMILY -901 +#endif +#if defined(EAI_AGAIN) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_AGAIN EAI_AGAIN +#else +#define EVUTIL_EAI_AGAIN -902 +#endif +#if defined(EAI_BADFLAGS) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_BADFLAGS EAI_BADFLAGS +#else +#define EVUTIL_EAI_BADFLAGS -903 +#endif +#if defined(EAI_FAIL) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_FAIL EAI_FAIL +#else +#define EVUTIL_EAI_FAIL -904 +#endif +#if defined(EAI_FAMILY) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_FAMILY EAI_FAMILY +#else +#define EVUTIL_EAI_FAMILY -905 +#endif +#if defined(EAI_MEMORY) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_MEMORY EAI_MEMORY +#else +#define EVUTIL_EAI_MEMORY -906 +#endif +/* This test is a bit complicated, since some MS SDKs decide to + * remove NODATA or redefine it to be the same as NONAME, in a + * fun interpretation of RFC 2553 and RFC 3493. */ +#if defined(EAI_NODATA) && defined(EVENT__HAVE_GETADDRINFO) && (!defined(EAI_NONAME) || EAI_NODATA != EAI_NONAME) +#define EVUTIL_EAI_NODATA EAI_NODATA +#else +#define EVUTIL_EAI_NODATA -907 +#endif +#if defined(EAI_NONAME) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_NONAME EAI_NONAME +#else +#define EVUTIL_EAI_NONAME -908 +#endif +#if defined(EAI_SERVICE) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_SERVICE EAI_SERVICE +#else +#define EVUTIL_EAI_SERVICE -909 +#endif +#if defined(EAI_SOCKTYPE) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_SOCKTYPE EAI_SOCKTYPE +#else +#define EVUTIL_EAI_SOCKTYPE -910 +#endif +#if defined(EAI_SYSTEM) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_EAI_SYSTEM EAI_SYSTEM +#else +#define EVUTIL_EAI_SYSTEM -911 +#endif + +#define EVUTIL_EAI_CANCEL -90001 + +#if defined(AI_PASSIVE) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_AI_PASSIVE AI_PASSIVE +#else +#define EVUTIL_AI_PASSIVE 0x1000 +#endif +#if defined(AI_CANONNAME) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_AI_CANONNAME AI_CANONNAME +#else +#define EVUTIL_AI_CANONNAME 0x2000 +#endif +#if defined(AI_NUMERICHOST) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_AI_NUMERICHOST AI_NUMERICHOST +#else +#define EVUTIL_AI_NUMERICHOST 0x4000 +#endif +#if defined(AI_NUMERICSERV) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_AI_NUMERICSERV AI_NUMERICSERV +#else +#define EVUTIL_AI_NUMERICSERV 0x8000 +#endif +#if defined(AI_V4MAPPED) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_AI_V4MAPPED AI_V4MAPPED +#else +#define EVUTIL_AI_V4MAPPED 0x10000 +#endif +#if defined(AI_ALL) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_AI_ALL AI_ALL +#else +#define EVUTIL_AI_ALL 0x20000 +#endif +#if defined(AI_ADDRCONFIG) && defined(EVENT__HAVE_GETADDRINFO) +#define EVUTIL_AI_ADDRCONFIG AI_ADDRCONFIG +#else +#define EVUTIL_AI_ADDRCONFIG 0x40000 +#endif +/**@}*/ + +struct evutil_addrinfo; +/** + * This function clones getaddrinfo for systems that don't have it. For full + * details, see RFC 3493, section 6.1. + * + * Limitations: + * - When the system has no getaddrinfo, we fall back to gethostbyname_r or + * gethostbyname, with their attendant issues. + * - The AI_V4MAPPED and AI_ALL flags are not currently implemented. + * + * For a nonblocking variant, see evdns_getaddrinfo. + */ +EVENT2_EXPORT_SYMBOL +int evutil_getaddrinfo(const char *nodename, const char *servname, + const struct evutil_addrinfo *hints_in, struct evutil_addrinfo **res); + +/** Release storage allocated by evutil_getaddrinfo or evdns_getaddrinfo. */ +EVENT2_EXPORT_SYMBOL +void evutil_freeaddrinfo(struct evutil_addrinfo *ai); + +EVENT2_EXPORT_SYMBOL +const char *evutil_gai_strerror(int err); + +/** Generate n bytes of secure pseudorandom data, and store them in buf. + * + * Current versions of Libevent use an ARC4-based random number generator, + * seeded using the platform's entropy source (/dev/urandom on Unix-like + * systems; CryptGenRandom on Windows). This is not actually as secure as it + * should be: ARC4 is a pretty lousy cipher, and the current implementation + * provides only rudimentary prediction- and backtracking-resistance. Don't + * use this for serious cryptographic applications. + */ +EVENT2_EXPORT_SYMBOL +void evutil_secure_rng_get_bytes(void *buf, size_t n); + +/** + * Seed the secure random number generator if needed, and return 0 on + * success or -1 on failure. + * + * It is okay to call this function more than once; it will still return + * 0 if the RNG has been successfully seeded and -1 if it can't be + * seeded. + * + * Ordinarily you don't need to call this function from your own code; + * Libevent will seed the RNG itself the first time it needs good random + * numbers. You only need to call it if (a) you want to double-check + * that one of the seeding methods did succeed, or (b) you plan to drop + * the capability to seed (by chrooting, or dropping capabilities, or + * whatever), and you want to make sure that seeding happens before your + * program loses the ability to do it. + */ +EVENT2_EXPORT_SYMBOL +int evutil_secure_rng_init(void); + +/** + * Set a filename to use in place of /dev/urandom for seeding the secure + * PRNG. Return 0 on success, -1 on failure. + * + * Call this function BEFORE calling any other initialization or RNG + * functions. + * + * (This string will _NOT_ be copied internally. Do not free it while any + * user of the secure RNG might be running. Don't pass anything other than a + * real /dev/...random device file here, or you might lose security.) + * + * This API is unstable, and might change in a future libevent version. + */ +EVENT2_EXPORT_SYMBOL +int evutil_secure_rng_set_urandom_device_file(char *fname); + +#if !defined(EVENT__HAVE_ARC4RANDOM) || defined(EVENT__HAVE_ARC4RANDOM_ADDRANDOM) +/** Seed the random number generator with extra random bytes. + + You should almost never need to call this function; it should be + sufficient to invoke evutil_secure_rng_init(), or let Libevent take + care of calling evutil_secure_rng_init() on its own. + + If you call this function as a _replacement_ for the regular + entropy sources, then you need to be sure that your input + contains a fairly large amount of strong entropy. Doing so is + notoriously hard: most people who try get it wrong. Watch out! + + @param dat a buffer full of a strong source of random numbers + @param datlen the number of bytes to read from datlen + */ +EVENT2_EXPORT_SYMBOL +void evutil_secure_rng_add_bytes(const char *dat, size_t datlen); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT1_EVUTIL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/event2/visibility.h b/probe-busybox/libevent-2.1.11-stable/include/event2/visibility.h new file mode 100644 index 00000000..006bbf06 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/event2/visibility.h @@ -0,0 +1,67 @@ +/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT2_VISIBILITY_H_INCLUDED_ +#define EVENT2_VISIBILITY_H_INCLUDED_ + +#include + +#if defined(event_shared_EXPORTS) || \ + defined(event_extra_shared_EXPORTS) || \ + defined(event_core_shared_EXPORTS) || \ + defined(event_pthreads_shared_EXPORTS) || \ + defined(event_openssl_shared_EXPORTS) + +# if defined (__SUNPRO_C) && (__SUNPRO_C >= 0x550) +# define EVENT2_EXPORT_SYMBOL __global +# elif defined __GNUC__ +# define EVENT2_EXPORT_SYMBOL __attribute__ ((visibility("default"))) +# elif defined(_MSC_VER) +# define EVENT2_EXPORT_SYMBOL __declspec(dllexport) +# else +# define EVENT2_EXPORT_SYMBOL /* unknown compiler */ +# endif + +#else /* event_*_EXPORTS */ + +# define EVENT2_EXPORT_SYMBOL + +#endif /* event_*_EXPORTS */ + +/** We need to dllimport event_debug_logging_mask_ into event_extra */ +#if defined(_MSC_VER) +# if defined(event_core_shared_EXPORTS) /** from core export */ +# define EVENT2_CORE_EXPORT_SYMBOL __declspec(dllexport) +# elif defined(event_extra_shared_EXPORTS) || /** from extra import */ \ + defined(EVENT_VISIBILITY_WANT_DLLIMPORT) +# define EVENT2_CORE_EXPORT_SYMBOL __declspec(dllimport) +# endif +#endif /* _MSC_VER */ +#if !defined(EVENT2_CORE_EXPORT_SYMBOL) +# define EVENT2_CORE_EXPORT_SYMBOL EVENT2_EXPORT_SYMBOL +#endif + +#endif /* EVENT2_VISIBILITY_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/evhttp.h b/probe-busybox/libevent-2.1.11-stable/include/evhttp.h new file mode 100644 index 00000000..549bc9b1 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/evhttp.h @@ -0,0 +1,45 @@ +/* + * Copyright 2000-2007 Niels Provos + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT1_EVHTTP_H_INCLUDED_ +#define EVENT1_EVHTTP_H_INCLUDED_ + +/** @file evhttp.h + + An http implementation subsystem for Libevent. + + The header is deprecated in Libevent 2.0 and later; please + use instead. Depending on what functionality you + need, you may also want to include more of the other + headers. + */ + +#include +#include +#include +#include + +#endif /* EVENT1_EVHTTP_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/evrpc.h b/probe-busybox/libevent-2.1.11-stable/include/evrpc.h new file mode 100644 index 00000000..7e986f7d --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/evrpc.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT1_EVRPC_H_INCLUDED_ +#define EVENT1_EVRPC_H_INCLUDED_ + +/** @file evrpc.h + + An RPC system for Libevent. + + The header is deprecated in Libevent 2.0 and later; please + use instead. Depending on what functionality you + need, you may also want to include more of the other + headers. + */ + +#include +#include +#include +#include + +#endif /* EVENT1_EVRPC_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/evutil.h b/probe-busybox/libevent-2.1.11-stable/include/evutil.h new file mode 100644 index 00000000..12c137d7 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/evutil.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT1_EVUTIL_H_INCLUDED_ +#define EVENT1_EVUTIL_H_INCLUDED_ + +/** @file evutil.h + + Utility and compatibility functions for Libevent. + + The header is deprecated in Libevent 2.0 and later; please + use instead. +*/ + +#include + +#endif /* EVENT1_EVUTIL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/include/include.am b/probe-busybox/libevent-2.1.11-stable/include/include.am new file mode 100644 index 00000000..aaa2042a --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/include/include.am @@ -0,0 +1,49 @@ +# include/include.am for libevent +# Copyright 2000-2007 Niels Provos +# Copyright 2007-2012 Niels Provos and Nick Mathewson +# +# See LICENSE for copying information. + +include_event2dir = $(includedir)/event2 + +EVENT2_EXPORT = \ + include/event2/buffer.h \ + include/event2/buffer_compat.h \ + include/event2/bufferevent.h \ + include/event2/bufferevent_compat.h \ + include/event2/bufferevent_struct.h \ + include/event2/dns.h \ + include/event2/dns_compat.h \ + include/event2/dns_struct.h \ + include/event2/event.h \ + include/event2/event_compat.h \ + include/event2/event_struct.h \ + include/event2/http.h \ + include/event2/http_compat.h \ + include/event2/http_struct.h \ + include/event2/keyvalq_struct.h \ + include/event2/listener.h \ + include/event2/rpc.h \ + include/event2/rpc_compat.h \ + include/event2/rpc_struct.h \ + include/event2/tag.h \ + include/event2/tag_compat.h \ + include/event2/thread.h \ + include/event2/util.h \ + include/event2/visibility.h + +if OPENSSL +EVENT2_EXPORT += include/event2/bufferevent_ssl.h +endif + +## Without the nobase_ prefixing, Automake would strip "include/event2/" from +## the source header filename to derive the installed header filename. +## With nobase_ the installed path is $(includedir)/include/event2/ev*.h. + +if INSTALL_LIBEVENT +include_event2_HEADERS = $(EVENT2_EXPORT) +nodist_include_event2_HEADERS = include/event2/event-config.h +else +noinst_HEADERS += $(EVENT2_EXPORT) +nodist_noinst_HEADERS = include/event2/event-config.h +endif diff --git a/probe-busybox/libevent-2.1.11-stable/iocp-internal.h b/probe-busybox/libevent-2.1.11-stable/iocp-internal.h new file mode 100644 index 00000000..21e0e0af --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/iocp-internal.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef IOCP_INTERNAL_H_INCLUDED_ +#define IOCP_INTERNAL_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +struct event_overlapped; +struct event_iocp_port; +struct evbuffer; +typedef void (*iocp_callback)(struct event_overlapped *, ev_uintptr_t, ev_ssize_t, int success); + +/* This whole file is actually win32 only. We wrap the structures in a win32 + * ifdef so that we can test-compile code that uses these interfaces on + * non-win32 platforms. */ +#ifdef _WIN32 + +/** + Internal use only. Wraps an OVERLAPPED that we're using for libevent + functionality. Whenever an event_iocp_port gets an event for a given + OVERLAPPED*, it upcasts the pointer to an event_overlapped, and calls the + iocp_callback function with the event_overlapped, the iocp key, and the + number of bytes transferred as arguments. + */ +struct event_overlapped { + OVERLAPPED overlapped; + iocp_callback cb; +}; + +/* Mingw's headers don't define LPFN_ACCEPTEX. */ + +typedef BOOL (WINAPI *AcceptExPtr)(SOCKET, SOCKET, PVOID, DWORD, DWORD, DWORD, LPDWORD, LPOVERLAPPED); +typedef BOOL (WINAPI *ConnectExPtr)(SOCKET, const struct sockaddr *, int, PVOID, DWORD, LPDWORD, LPOVERLAPPED); +typedef void (WINAPI *GetAcceptExSockaddrsPtr)(PVOID, DWORD, DWORD, DWORD, LPSOCKADDR *, LPINT, LPSOCKADDR *, LPINT); + +/** Internal use only. Holds pointers to functions that only some versions of + Windows provide. + */ +struct win32_extension_fns { + AcceptExPtr AcceptEx; + ConnectExPtr ConnectEx; + GetAcceptExSockaddrsPtr GetAcceptExSockaddrs; +}; + +/** + Internal use only. Stores a Windows IO Completion port, along with + related data. + */ +struct event_iocp_port { + /** The port itself */ + HANDLE port; + /* A lock to cover internal structures. */ + CRITICAL_SECTION lock; + /** Number of threads ever open on the port. */ + short n_threads; + /** True iff we're shutting down all the threads on this port */ + short shutdown; + /** How often the threads on this port check for shutdown and other + * conditions */ + long ms; + /* The threads that are waiting for events. */ + HANDLE *threads; + /** Number of threads currently open on this port. */ + short n_live_threads; + /** A semaphore to signal when we are done shutting down. */ + HANDLE *shutdownSemaphore; +}; + +EVENT2_EXPORT_SYMBOL +const struct win32_extension_fns *event_get_win32_extension_fns_(void); +#else +/* Dummy definition so we can test-compile more things on unix. */ +struct event_overlapped { + iocp_callback cb; +}; +#endif + +/** Initialize the fields in an event_overlapped. + + @param overlapped The struct event_overlapped to initialize + @param cb The callback that should be invoked once the IO operation has + finished. + */ +EVENT2_EXPORT_SYMBOL +void event_overlapped_init_(struct event_overlapped *, iocp_callback cb); + +/** Allocate and return a new evbuffer that supports overlapped IO on a given + socket. The socket must be associated with an IO completion port using + event_iocp_port_associate_. +*/ +EVENT2_EXPORT_SYMBOL +struct evbuffer *evbuffer_overlapped_new_(evutil_socket_t fd); + +/** XXXX Document (nickm) */ +evutil_socket_t evbuffer_overlapped_get_fd_(struct evbuffer *buf); + +void evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd); + +/** Start reading data onto the end of an overlapped evbuffer. + + An evbuffer can only have one read pending at a time. While the read + is in progress, no other data may be added to the end of the buffer. + The buffer must be created with event_overlapped_init_(). + evbuffer_commit_read_() must be called in the completion callback. + + @param buf The buffer to read onto + @param n The number of bytes to try to read. + @param ol Overlapped object with associated completion callback. + @return 0 on success, -1 on error. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_launch_read_(struct evbuffer *buf, size_t n, struct event_overlapped *ol); + +/** Start writing data from the start of an evbuffer. + + An evbuffer can only have one write pending at a time. While the write is + in progress, no other data may be removed from the front of the buffer. + The buffer must be created with event_overlapped_init_(). + evbuffer_commit_write_() must be called in the completion callback. + + @param buf The buffer to read onto + @param n The number of bytes to try to read. + @param ol Overlapped object with associated completion callback. + @return 0 on success, -1 on error. + */ +EVENT2_EXPORT_SYMBOL +int evbuffer_launch_write_(struct evbuffer *buf, ev_ssize_t n, struct event_overlapped *ol); + +/** XXX document */ +EVENT2_EXPORT_SYMBOL +void evbuffer_commit_read_(struct evbuffer *, ev_ssize_t); +EVENT2_EXPORT_SYMBOL +void evbuffer_commit_write_(struct evbuffer *, ev_ssize_t); + +/** Create an IOCP, and launch its worker threads. Internal use only. + + This interface is unstable, and will change. + */ +EVENT2_EXPORT_SYMBOL +struct event_iocp_port *event_iocp_port_launch_(int n_cpus); + +/** Associate a file descriptor with an iocp, such that overlapped IO on the + fd will happen on one of the iocp's worker threads. +*/ +EVENT2_EXPORT_SYMBOL +int event_iocp_port_associate_(struct event_iocp_port *port, evutil_socket_t fd, + ev_uintptr_t key); + +/** Tell all threads serving an iocp to stop. Wait for up to waitMsec for all + the threads to finish whatever they're doing. If waitMsec is -1, wait + as long as required. If all the threads are done, free the port and return + 0. Otherwise, return -1. If you get a -1 return value, it is safe to call + this function again. +*/ +EVENT2_EXPORT_SYMBOL +int event_iocp_shutdown_(struct event_iocp_port *port, long waitMsec); + +/* FIXME document. */ +EVENT2_EXPORT_SYMBOL +int event_iocp_activate_overlapped_(struct event_iocp_port *port, + struct event_overlapped *o, + ev_uintptr_t key, ev_uint32_t n_bytes); + +struct event_base; +/* FIXME document. */ +EVENT2_EXPORT_SYMBOL +struct event_iocp_port *event_base_get_iocp_(struct event_base *base); + +/* FIXME document. */ +EVENT2_EXPORT_SYMBOL +int event_base_start_iocp_(struct event_base *base, int n_cpus); +void event_base_stop_iocp_(struct event_base *base); + +/* FIXME document. */ +EVENT2_EXPORT_SYMBOL +struct bufferevent *bufferevent_async_new_(struct event_base *base, + evutil_socket_t fd, int options); + +/* FIXME document. */ +void bufferevent_async_set_connected_(struct bufferevent *bev); +int bufferevent_async_can_connect_(struct bufferevent *bev); +int bufferevent_async_connect_(struct bufferevent *bev, evutil_socket_t fd, + const struct sockaddr *sa, int socklen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/ipv6-internal.h b/probe-busybox/libevent-2.1.11-stable/ipv6-internal.h new file mode 100644 index 00000000..0c207377 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/ipv6-internal.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Internal use only: Fake IPv6 structures and values on platforms that + * do not have them */ + +#ifndef IPV6_INTERNAL_H_INCLUDED_ +#define IPV6_INTERNAL_H_INCLUDED_ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include +#ifdef EVENT__HAVE_SYS_SOCKET_H +#include +#endif +#include "event2/util.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @file ipv6-internal.h + * + * Replacement types and functions for platforms that don't support ipv6 + * properly. + */ + +#ifndef EVENT__HAVE_STRUCT_IN6_ADDR +struct in6_addr { + ev_uint8_t s6_addr[16]; +}; +#endif + +#ifndef EVENT__HAVE_SA_FAMILY_T +typedef int sa_family_t; +#endif + +#ifndef EVENT__HAVE_STRUCT_SOCKADDR_IN6 +struct sockaddr_in6 { + /* This will fail if we find a struct sockaddr that doesn't have + * sa_family as the first element. */ + sa_family_t sin6_family; + ev_uint16_t sin6_port; + struct in6_addr sin6_addr; +}; +#endif + +#ifndef AF_INET6 +#define AF_INET6 3333 +#endif +#ifndef PF_INET6 +#define PF_INET6 AF_INET6 +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/kqueue-internal.h b/probe-busybox/libevent-2.1.11-stable/kqueue-internal.h new file mode 100644 index 00000000..02c5a360 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/kqueue-internal.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef KQUEUE_INTERNAL_H_INCLUDED_ +#define KQUEUE_INTERNAL_H_INCLUDED_ + +/** Notification function, used to tell an event base to wake up from another + * thread. Only works when event_kq_add_notify_event_() has previously been + * called successfully on that base. */ +int event_kq_notify_base_(struct event_base *base); + +/** Prepare a kqueue-using event base to receive notifications via an internal + * EVFILT_USER event. Return 0 on sucess, -1 on failure. + */ +int event_kq_add_notify_event_(struct event_base *base); + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/kqueue.c b/probe-busybox/libevent-2.1.11-stable/kqueue.c new file mode 100644 index 00000000..d08f512c --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/kqueue.c @@ -0,0 +1,577 @@ +/* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */ + +/* + * Copyright 2000-2007 Niels Provos + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef EVENT__HAVE_KQUEUE + +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef EVENT__HAVE_INTTYPES_H +#include +#endif + +/* Some platforms apparently define the udata field of struct kevent as + * intptr_t, whereas others define it as void*. There doesn't seem to be an + * easy way to tell them apart via autoconf, so we need to use OS macros. */ +#if defined(EVENT__HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__) && !defined(__CloudABI__) +#define PTR_TO_UDATA(x) ((intptr_t)(x)) +#define INT_TO_UDATA(x) ((intptr_t)(x)) +#else +#define PTR_TO_UDATA(x) (x) +#define INT_TO_UDATA(x) ((void*)(x)) +#endif + +#include "event-internal.h" +#include "log-internal.h" +#include "evmap-internal.h" +#include "event2/thread.h" +#include "event2/util.h" +#include "evthread-internal.h" +#include "changelist-internal.h" + +#include "kqueue-internal.h" + +#define NEVENT 64 + +struct kqop { + struct kevent *changes; + int changes_size; + + struct kevent *events; + int events_size; + int kq; + int notify_event_added; + pid_t pid; +}; + +static void kqop_free(struct kqop *kqop); + +static void *kq_init(struct event_base *); +static int kq_sig_add(struct event_base *, int, short, short, void *); +static int kq_sig_del(struct event_base *, int, short, short, void *); +static int kq_dispatch(struct event_base *, struct timeval *); +static void kq_dealloc(struct event_base *); + +const struct eventop kqops = { + "kqueue", + kq_init, + event_changelist_add_, + event_changelist_del_, + kq_dispatch, + kq_dealloc, + 1 /* need reinit */, + EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS, + EVENT_CHANGELIST_FDINFO_SIZE +}; + +static const struct eventop kqsigops = { + "kqueue_signal", + NULL, + kq_sig_add, + kq_sig_del, + NULL, + NULL, + 1 /* need reinit */, + 0, + 0 +}; + +static void * +kq_init(struct event_base *base) +{ + int kq = -1; + struct kqop *kqueueop = NULL; + + if (!(kqueueop = mm_calloc(1, sizeof(struct kqop)))) + return (NULL); + +/* Initialize the kernel queue */ + + if ((kq = kqueue()) == -1) { + event_warn("kqueue"); + goto err; + } + + kqueueop->kq = kq; + + kqueueop->pid = getpid(); + + /* Initialize fields */ + kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent)); + if (kqueueop->changes == NULL) + goto err; + kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent)); + if (kqueueop->events == NULL) + goto err; + kqueueop->events_size = kqueueop->changes_size = NEVENT; + + /* Check for Mac OS X kqueue bug. */ + memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]); + kqueueop->changes[0].ident = -1; + kqueueop->changes[0].filter = EVFILT_READ; + kqueueop->changes[0].flags = EV_ADD; + /* + * If kqueue works, then kevent will succeed, and it will + * stick an error in events[0]. If kqueue is broken, then + * kevent will fail. + */ + if (kevent(kq, + kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 || + (int)kqueueop->events[0].ident != -1 || + !(kqueueop->events[0].flags & EV_ERROR)) { + event_warn("%s: detected broken kqueue; not using.", __func__); + goto err; + } + + base->evsigsel = &kqsigops; + + return (kqueueop); +err: + if (kqueueop) + kqop_free(kqueueop); + + return (NULL); +} + +#define ADD_UDATA 0x30303 + +static void +kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change) +{ + memset(out, 0, sizeof(struct kevent)); + out->ident = fd; + out->filter = filter; + + if (change & EV_CHANGE_ADD) { + out->flags = EV_ADD; + /* We set a magic number here so that we can tell 'add' + * errors from 'del' errors. */ + out->udata = INT_TO_UDATA(ADD_UDATA); + if (change & EV_ET) + out->flags |= EV_CLEAR; +#ifdef NOTE_EOF + /* Make it behave like select() and poll() */ + if (filter == EVFILT_READ) + out->fflags = NOTE_EOF; +#endif + } else { + EVUTIL_ASSERT(change & EV_CHANGE_DEL); + out->flags = EV_DELETE; + } +} + +static int +kq_build_changes_list(const struct event_changelist *changelist, + struct kqop *kqop) +{ + int i; + int n_changes = 0; + + for (i = 0; i < changelist->n_changes; ++i) { + struct event_change *in_ch = &changelist->changes[i]; + struct kevent *out_ch; + if (n_changes >= kqop->changes_size - 1) { + int newsize; + struct kevent *newchanges; + + if (kqop->changes_size > INT_MAX / 2 || + (size_t)kqop->changes_size * 2 > EV_SIZE_MAX / + sizeof(struct kevent)) { + event_warnx("%s: int overflow", __func__); + return (-1); + } + + newsize = kqop->changes_size * 2; + newchanges = mm_realloc(kqop->changes, + newsize * sizeof(struct kevent)); + if (newchanges == NULL) { + event_warn("%s: realloc", __func__); + return (-1); + } + kqop->changes = newchanges; + kqop->changes_size = newsize; + } + if (in_ch->read_change) { + out_ch = &kqop->changes[n_changes++]; + kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ, + in_ch->read_change); + } + if (in_ch->write_change) { + out_ch = &kqop->changes[n_changes++]; + kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE, + in_ch->write_change); + } + } + return n_changes; +} + +static int +kq_grow_events(struct kqop *kqop, size_t new_size) +{ + struct kevent *newresult; + + newresult = mm_realloc(kqop->events, + new_size * sizeof(struct kevent)); + + if (newresult) { + kqop->events = newresult; + kqop->events_size = new_size; + return 0; + } else { + return -1; + } +} + +static int +kq_dispatch(struct event_base *base, struct timeval *tv) +{ + struct kqop *kqop = base->evbase; + struct kevent *events = kqop->events; + struct kevent *changes; + struct timespec ts, *ts_p = NULL; + int i, n_changes, res; + + if (tv != NULL) { + ts.tv_sec = tv->tv_sec; + ts.tv_nsec = tv->tv_usec * 1000; + ts_p = &ts; + } + + /* Build "changes" from "base->changes" */ + EVUTIL_ASSERT(kqop->changes); + n_changes = kq_build_changes_list(&base->changelist, kqop); + if (n_changes < 0) + return -1; + + event_changelist_remove_all_(&base->changelist, base); + + /* steal the changes array in case some broken code tries to call + * dispatch twice at once. */ + changes = kqop->changes; + kqop->changes = NULL; + + /* Make sure that 'events' is at least as long as the list of changes: + * otherwise errors in the changes can get reported as a -1 return + * value from kevent() rather than as EV_ERROR events in the events + * array. + * + * (We could instead handle -1 return values from kevent() by + * retrying with a smaller changes array or a larger events array, + * but this approach seems less risky for now.) + */ + if (kqop->events_size < n_changes) { + int new_size = kqop->events_size; + do { + new_size *= 2; + } while (new_size < n_changes); + + kq_grow_events(kqop, new_size); + events = kqop->events; + } + + EVBASE_RELEASE_LOCK(base, th_base_lock); + + res = kevent(kqop->kq, changes, n_changes, + events, kqop->events_size, ts_p); + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + EVUTIL_ASSERT(kqop->changes == NULL); + kqop->changes = changes; + + if (res == -1) { + if (errno != EINTR) { + event_warn("kevent"); + return (-1); + } + + return (0); + } + + event_debug(("%s: kevent reports %d", __func__, res)); + + for (i = 0; i < res; i++) { + int which = 0; + + if (events[i].flags & EV_ERROR) { + switch (events[i].data) { + + /* Can occur on delete if we are not currently + * watching any events on this fd. That can + * happen when the fd was closed and another + * file was opened with that fd. */ + case ENOENT: + /* Can occur for reasons not fully understood + * on FreeBSD. */ + case EINVAL: + continue; +#if defined(__FreeBSD__) + /* + * This currently occurs if an FD is closed + * before the EV_DELETE makes it out via kevent(). + * The FreeBSD capabilities code sees the blank + * capability set and rejects the request to + * modify an event. + * + * To be strictly correct - when an FD is closed, + * all the registered events are also removed. + * Queuing EV_DELETE to a closed FD is wrong. + * The event(s) should just be deleted from + * the pending changelist. + */ + case ENOTCAPABLE: + continue; +#endif + + /* Can occur on a delete if the fd is closed. */ + case EBADF: + /* XXXX On NetBSD, we can also get EBADF if we + * try to add the write side of a pipe, but + * the read side has already been closed. + * Other BSDs call this situation 'EPIPE'. It + * would be good if we had a way to report + * this situation. */ + continue; + /* These two can occur on an add if the fd was one side + * of a pipe, and the other side was closed. */ + case EPERM: + case EPIPE: + /* Report read events, if we're listening for + * them, so that the user can learn about any + * add errors. (If the operation was a + * delete, then udata should be cleared.) */ + if (events[i].udata) { + /* The operation was an add: + * report the error as a read. */ + which |= EV_READ; + break; + } else { + /* The operation was a del: + * report nothing. */ + continue; + } + + /* Other errors shouldn't occur. */ + default: + errno = events[i].data; + return (-1); + } + } else if (events[i].filter == EVFILT_READ) { + which |= EV_READ; + } else if (events[i].filter == EVFILT_WRITE) { + which |= EV_WRITE; + } else if (events[i].filter == EVFILT_SIGNAL) { + which |= EV_SIGNAL; +#ifdef EVFILT_USER + } else if (events[i].filter == EVFILT_USER) { + base->is_notify_pending = 0; +#endif + } + + if (!which) + continue; + + if (events[i].filter == EVFILT_SIGNAL) { + evmap_signal_active_(base, events[i].ident, 1); + } else { + evmap_io_active_(base, events[i].ident, which | EV_ET); + } + } + + if (res == kqop->events_size) { + /* We used all the events space that we have. Maybe we should + make it bigger. */ + kq_grow_events(kqop, kqop->events_size * 2); + } + + return (0); +} + +static void +kqop_free(struct kqop *kqop) +{ + if (kqop->changes) + mm_free(kqop->changes); + if (kqop->events) + mm_free(kqop->events); + if (kqop->kq >= 0 && kqop->pid == getpid()) + close(kqop->kq); + memset(kqop, 0, sizeof(struct kqop)); + mm_free(kqop); +} + +static void +kq_dealloc(struct event_base *base) +{ + struct kqop *kqop = base->evbase; + evsig_dealloc_(base); + kqop_free(kqop); +} + +/* signal handling */ +static int +kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p) +{ + struct kqop *kqop = base->evbase; + struct kevent kev; + struct timespec timeout = { 0, 0 }; + (void)p; + + EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG); + + memset(&kev, 0, sizeof(kev)); + kev.ident = nsignal; + kev.filter = EVFILT_SIGNAL; + kev.flags = EV_ADD; + + /* Be ready for the signal if it is sent any + * time between now and the next call to + * kq_dispatch. */ + if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) + return (-1); + + /* We can set the handler for most signals to SIG_IGN and + * still have them reported to us in the queue. However, + * if the handler for SIGCHLD is SIG_IGN, the system reaps + * zombie processes for us, and we don't get any notification. + * This appears to be the only signal with this quirk. */ + if (evsig_set_handler_(base, nsignal, + nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1) + return (-1); + + return (0); +} + +static int +kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p) +{ + struct kqop *kqop = base->evbase; + struct kevent kev; + + struct timespec timeout = { 0, 0 }; + (void)p; + + EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG); + + memset(&kev, 0, sizeof(kev)); + kev.ident = nsignal; + kev.filter = EVFILT_SIGNAL; + kev.flags = EV_DELETE; + + /* Because we insert signal events + * immediately, we need to delete them + * immediately, too */ + if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) + return (-1); + + if (evsig_restore_handler_(base, nsignal) == -1) + return (-1); + + return (0); +} + + +/* OSX 10.6 and FreeBSD 8.1 add support for EVFILT_USER, which we can use + * to wake up the event loop from another thread. */ + +/* Magic number we use for our filter ID. */ +#define NOTIFY_IDENT 42 + +int +event_kq_add_notify_event_(struct event_base *base) +{ + struct kqop *kqop = base->evbase; +#if defined(EVFILT_USER) && defined(NOTE_TRIGGER) + struct kevent kev; + struct timespec timeout = { 0, 0 }; +#endif + + if (kqop->notify_event_added) + return 0; + +#if defined(EVFILT_USER) && defined(NOTE_TRIGGER) + memset(&kev, 0, sizeof(kev)); + kev.ident = NOTIFY_IDENT; + kev.filter = EVFILT_USER; + kev.flags = EV_ADD | EV_CLEAR; + + if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) { + event_warn("kevent: adding EVFILT_USER event"); + return -1; + } + + kqop->notify_event_added = 1; + + return 0; +#else + return -1; +#endif +} + +int +event_kq_notify_base_(struct event_base *base) +{ + struct kqop *kqop = base->evbase; +#if defined(EVFILT_USER) && defined(NOTE_TRIGGER) + struct kevent kev; + struct timespec timeout = { 0, 0 }; +#endif + if (! kqop->notify_event_added) + return -1; + +#if defined(EVFILT_USER) && defined(NOTE_TRIGGER) + memset(&kev, 0, sizeof(kev)); + kev.ident = NOTIFY_IDENT; + kev.filter = EVFILT_USER; + kev.fflags = NOTE_TRIGGER; + + if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) { + event_warn("kevent: triggering EVFILT_USER event"); + return -1; + } + + return 0; +#else + return -1; +#endif +} + +#endif /* EVENT__HAVE_KQUEUE */ diff --git a/probe-busybox/libevent-2.1.11-stable/libevent.pc.in b/probe-busybox/libevent-2.1.11-stable/libevent.pc.in new file mode 100644 index 00000000..7030884e --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/libevent.pc.in @@ -0,0 +1,16 @@ +#libevent pkg-config source file + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: libevent +Description: libevent is an asynchronous notification event loop library +Version: @VERSION@ +Requires: +Conflicts: +Libs: -L${libdir} -levent +Libs.private: @LIBS@ +Cflags: -I${includedir} + diff --git a/probe-busybox/libevent-2.1.11-stable/libevent_core.pc.in b/probe-busybox/libevent-2.1.11-stable/libevent_core.pc.in new file mode 100644 index 00000000..98ab1efe --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/libevent_core.pc.in @@ -0,0 +1,16 @@ +#libevent pkg-config source file + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: libevent_core +Description: libevent_core +Version: @VERSION@ +Requires: +Conflicts: +Libs: -L${libdir} -levent_core +Libs.private: @LIBS@ +Cflags: -I${includedir} + diff --git a/probe-busybox/libevent-2.1.11-stable/libevent_extra.pc.in b/probe-busybox/libevent-2.1.11-stable/libevent_extra.pc.in new file mode 100644 index 00000000..b9633998 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/libevent_extra.pc.in @@ -0,0 +1,16 @@ +#libevent pkg-config source file + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: libevent_extra +Description: libevent_extra +Version: @VERSION@ +Requires: +Conflicts: +Libs: -L${libdir} -levent_extra +Libs.private: @LIBS@ +Cflags: -I${includedir} + diff --git a/probe-busybox/libevent-2.1.11-stable/libevent_openssl.pc.in b/probe-busybox/libevent-2.1.11-stable/libevent_openssl.pc.in new file mode 100644 index 00000000..a65d1e06 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/libevent_openssl.pc.in @@ -0,0 +1,16 @@ +#libevent pkg-config source file + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: libevent_openssl +Description: libevent_openssl adds openssl-based TLS support to libevent +Version: @VERSION@ +Requires: libevent +Conflicts: +Libs: -L${libdir} -levent_openssl +Libs.private: @LIBS@ @OPENSSL_LIBS@ +Cflags: -I${includedir} @OPENSSL_INCS@ + diff --git a/probe-busybox/libevent-2.1.11-stable/libevent_pthreads.pc.in b/probe-busybox/libevent-2.1.11-stable/libevent_pthreads.pc.in new file mode 100644 index 00000000..9bc2392b --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/libevent_pthreads.pc.in @@ -0,0 +1,16 @@ +#libevent pkg-config source file + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: libevent_pthreads +Description: libevent_pthreads adds pthreads-based threading support to libevent +Version: @VERSION@ +Requires: libevent +Conflicts: +Libs: -L${libdir} -levent_pthreads +Libs.private: @LIBS@ @PTHREAD_LIBS@ +Cflags: -I${includedir} @PTHREAD_CFLAGS@ + diff --git a/probe-busybox/libevent-2.1.11-stable/listener.c b/probe-busybox/libevent-2.1.11-stable/listener.c new file mode 100644 index 00000000..f5c00c9c --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/listener.c @@ -0,0 +1,899 @@ +/* + * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include + +#ifdef _WIN32 +#ifndef _WIN32_WINNT +/* Minimum required for InitializeCriticalSectionAndSpinCount */ +#define _WIN32_WINNT 0x0403 +#endif +#include +#include +#include +#include +#endif +#include +#ifdef EVENT__HAVE_SYS_SOCKET_H +#include +#endif +#ifdef EVENT__HAVE_FCNTL_H +#include +#endif +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif + +#include "event2/listener.h" +#include "event2/util.h" +#include "event2/event.h" +#include "event2/event_struct.h" +#include "mm-internal.h" +#include "util-internal.h" +#include "log-internal.h" +#include "evthread-internal.h" +#ifdef _WIN32 +#include "iocp-internal.h" +#include "defer-internal.h" +#include "event-internal.h" +#endif + +struct evconnlistener_ops { + int (*enable)(struct evconnlistener *); + int (*disable)(struct evconnlistener *); + void (*destroy)(struct evconnlistener *); + void (*shutdown)(struct evconnlistener *); + evutil_socket_t (*getfd)(struct evconnlistener *); + struct event_base *(*getbase)(struct evconnlistener *); +}; + +struct evconnlistener { + const struct evconnlistener_ops *ops; + void *lock; + evconnlistener_cb cb; + evconnlistener_errorcb errorcb; + void *user_data; + unsigned flags; + short refcnt; + int accept4_flags; + unsigned enabled : 1; +}; + +struct evconnlistener_event { + struct evconnlistener base; + struct event listener; +}; + +#ifdef _WIN32 +struct evconnlistener_iocp { + struct evconnlistener base; + evutil_socket_t fd; + struct event_base *event_base; + struct event_iocp_port *port; + short n_accepting; + unsigned shutting_down : 1; + unsigned event_added : 1; + struct accepting_socket **accepting; +}; +#endif + +#define LOCK(listener) EVLOCK_LOCK((listener)->lock, 0) +#define UNLOCK(listener) EVLOCK_UNLOCK((listener)->lock, 0) + +struct evconnlistener * +evconnlistener_new_async(struct event_base *base, + evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, + evutil_socket_t fd); /* XXXX export this? */ + +static int event_listener_enable(struct evconnlistener *); +static int event_listener_disable(struct evconnlistener *); +static void event_listener_destroy(struct evconnlistener *); +static evutil_socket_t event_listener_getfd(struct evconnlistener *); +static struct event_base *event_listener_getbase(struct evconnlistener *); + +#if 0 +static void +listener_incref_and_lock(struct evconnlistener *listener) +{ + LOCK(listener); + ++listener->refcnt; +} +#endif + +static int +listener_decref_and_unlock(struct evconnlistener *listener) +{ + int refcnt = --listener->refcnt; + if (refcnt == 0) { + listener->ops->destroy(listener); + UNLOCK(listener); + EVTHREAD_FREE_LOCK(listener->lock, EVTHREAD_LOCKTYPE_RECURSIVE); + mm_free(listener); + return 1; + } else { + UNLOCK(listener); + return 0; + } +} + +static const struct evconnlistener_ops evconnlistener_event_ops = { + event_listener_enable, + event_listener_disable, + event_listener_destroy, + NULL, /* shutdown */ + event_listener_getfd, + event_listener_getbase +}; + +static void listener_read_cb(evutil_socket_t, short, void *); + +struct evconnlistener * +evconnlistener_new(struct event_base *base, + evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, + evutil_socket_t fd) +{ + struct evconnlistener_event *lev; + +#ifdef _WIN32 + if (base && event_base_get_iocp_(base)) { + const struct win32_extension_fns *ext = + event_get_win32_extension_fns_(); + if (ext->AcceptEx && ext->GetAcceptExSockaddrs) + return evconnlistener_new_async(base, cb, ptr, flags, + backlog, fd); + } +#endif + + if (backlog > 0) { + if (listen(fd, backlog) < 0) + return NULL; + } else if (backlog < 0) { + if (listen(fd, 128) < 0) + return NULL; + } + + lev = mm_calloc(1, sizeof(struct evconnlistener_event)); + if (!lev) + return NULL; + + lev->base.ops = &evconnlistener_event_ops; + lev->base.cb = cb; + lev->base.user_data = ptr; + lev->base.flags = flags; + lev->base.refcnt = 1; + + lev->base.accept4_flags = 0; + if (!(flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING)) + lev->base.accept4_flags |= EVUTIL_SOCK_NONBLOCK; + if (flags & LEV_OPT_CLOSE_ON_EXEC) + lev->base.accept4_flags |= EVUTIL_SOCK_CLOEXEC; + + if (flags & LEV_OPT_THREADSAFE) { + EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); + } + + event_assign(&lev->listener, base, fd, EV_READ|EV_PERSIST, + listener_read_cb, lev); + + if (!(flags & LEV_OPT_DISABLED)) + evconnlistener_enable(&lev->base); + + return &lev->base; +} + +struct evconnlistener * +evconnlistener_new_bind(struct event_base *base, evconnlistener_cb cb, + void *ptr, unsigned flags, int backlog, const struct sockaddr *sa, + int socklen) +{ + struct evconnlistener *listener; + evutil_socket_t fd; + int on = 1; + int family = sa ? sa->sa_family : AF_UNSPEC; + int socktype = SOCK_STREAM | EVUTIL_SOCK_NONBLOCK; + + if (backlog == 0) + return NULL; + + if (flags & LEV_OPT_CLOSE_ON_EXEC) + socktype |= EVUTIL_SOCK_CLOEXEC; + + fd = evutil_socket_(family, socktype, 0); + if (fd == -1) + return NULL; + + if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void*)&on, sizeof(on))<0) + goto err; + + if (flags & LEV_OPT_REUSEABLE) { + if (evutil_make_listen_socket_reuseable(fd) < 0) + goto err; + } + + if (flags & LEV_OPT_REUSEABLE_PORT) { + if (evutil_make_listen_socket_reuseable_port(fd) < 0) + goto err; + } + + if (flags & LEV_OPT_DEFERRED_ACCEPT) { + if (evutil_make_tcp_listen_socket_deferred(fd) < 0) + goto err; + } + + if (flags & LEV_OPT_BIND_IPV6ONLY) { + if (evutil_make_listen_socket_ipv6only(fd) < 0) + goto err; + } + + if (sa) { + if (bind(fd, sa, socklen)<0) + goto err; + } + + listener = evconnlistener_new(base, cb, ptr, flags, backlog, fd); + if (!listener) + goto err; + + return listener; +err: + evutil_closesocket(fd); + return NULL; +} + +void +evconnlistener_free(struct evconnlistener *lev) +{ + LOCK(lev); + lev->cb = NULL; + lev->errorcb = NULL; + if (lev->ops->shutdown) + lev->ops->shutdown(lev); + listener_decref_and_unlock(lev); +} + +static void +event_listener_destroy(struct evconnlistener *lev) +{ + struct evconnlistener_event *lev_e = + EVUTIL_UPCAST(lev, struct evconnlistener_event, base); + + event_del(&lev_e->listener); + if (lev->flags & LEV_OPT_CLOSE_ON_FREE) + evutil_closesocket(event_get_fd(&lev_e->listener)); + event_debug_unassign(&lev_e->listener); +} + +int +evconnlistener_enable(struct evconnlistener *lev) +{ + int r; + LOCK(lev); + lev->enabled = 1; + if (lev->cb) + r = lev->ops->enable(lev); + else + r = 0; + UNLOCK(lev); + return r; +} + +int +evconnlistener_disable(struct evconnlistener *lev) +{ + int r; + LOCK(lev); + lev->enabled = 0; + r = lev->ops->disable(lev); + UNLOCK(lev); + return r; +} + +static int +event_listener_enable(struct evconnlistener *lev) +{ + struct evconnlistener_event *lev_e = + EVUTIL_UPCAST(lev, struct evconnlistener_event, base); + return event_add(&lev_e->listener, NULL); +} + +static int +event_listener_disable(struct evconnlistener *lev) +{ + struct evconnlistener_event *lev_e = + EVUTIL_UPCAST(lev, struct evconnlistener_event, base); + return event_del(&lev_e->listener); +} + +evutil_socket_t +evconnlistener_get_fd(struct evconnlistener *lev) +{ + evutil_socket_t fd; + LOCK(lev); + fd = lev->ops->getfd(lev); + UNLOCK(lev); + return fd; +} + +static evutil_socket_t +event_listener_getfd(struct evconnlistener *lev) +{ + struct evconnlistener_event *lev_e = + EVUTIL_UPCAST(lev, struct evconnlistener_event, base); + return event_get_fd(&lev_e->listener); +} + +struct event_base * +evconnlistener_get_base(struct evconnlistener *lev) +{ + struct event_base *base; + LOCK(lev); + base = lev->ops->getbase(lev); + UNLOCK(lev); + return base; +} + +static struct event_base * +event_listener_getbase(struct evconnlistener *lev) +{ + struct evconnlistener_event *lev_e = + EVUTIL_UPCAST(lev, struct evconnlistener_event, base); + return event_get_base(&lev_e->listener); +} + +void +evconnlistener_set_cb(struct evconnlistener *lev, + evconnlistener_cb cb, void *arg) +{ + int enable = 0; + LOCK(lev); + if (lev->enabled && !lev->cb) + enable = 1; + lev->cb = cb; + lev->user_data = arg; + if (enable) + evconnlistener_enable(lev); + UNLOCK(lev); +} + +void +evconnlistener_set_error_cb(struct evconnlistener *lev, + evconnlistener_errorcb errorcb) +{ + LOCK(lev); + lev->errorcb = errorcb; + UNLOCK(lev); +} + +static void +listener_read_cb(evutil_socket_t fd, short what, void *p) +{ + struct evconnlistener *lev = p; + int err; + evconnlistener_cb cb; + evconnlistener_errorcb errorcb; + void *user_data; + LOCK(lev); + while (1) { + struct sockaddr_storage ss; + ev_socklen_t socklen = sizeof(ss); + evutil_socket_t new_fd = evutil_accept4_(fd, (struct sockaddr*)&ss, &socklen, lev->accept4_flags); + if (new_fd < 0) + break; + if (socklen == 0) { + /* This can happen with some older linux kernels in + * response to nmap. */ + evutil_closesocket(new_fd); + continue; + } + + if (lev->cb == NULL) { + evutil_closesocket(new_fd); + UNLOCK(lev); + return; + } + ++lev->refcnt; + cb = lev->cb; + user_data = lev->user_data; + UNLOCK(lev); + cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen, + user_data); + LOCK(lev); + if (lev->refcnt == 1) { + int freed = listener_decref_and_unlock(lev); + EVUTIL_ASSERT(freed); + return; + } + --lev->refcnt; + if (!lev->enabled) { + /* the callback could have disabled the listener */ + UNLOCK(lev); + return; + } + } + err = evutil_socket_geterror(fd); + if (EVUTIL_ERR_ACCEPT_RETRIABLE(err)) { + UNLOCK(lev); + return; + } + if (lev->errorcb != NULL) { + ++lev->refcnt; + errorcb = lev->errorcb; + user_data = lev->user_data; + UNLOCK(lev); + errorcb(lev, user_data); + LOCK(lev); + listener_decref_and_unlock(lev); + } else { + event_sock_warn(fd, "Error from accept() call"); + UNLOCK(lev); + } +} + +#ifdef _WIN32 +struct accepting_socket { + CRITICAL_SECTION lock; + struct event_overlapped overlapped; + SOCKET s; + int error; + struct event_callback deferred; + struct evconnlistener_iocp *lev; + ev_uint8_t buflen; + ev_uint8_t family; + unsigned free_on_cb:1; + char addrbuf[1]; +}; + +static void accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, + ev_ssize_t n, int ok); +static void accepted_socket_invoke_user_cb(struct event_callback *cb, void *arg); + +static void +iocp_listener_event_add(struct evconnlistener_iocp *lev) +{ + if (lev->event_added) + return; + + lev->event_added = 1; + event_base_add_virtual_(lev->event_base); +} + +static void +iocp_listener_event_del(struct evconnlistener_iocp *lev) +{ + if (!lev->event_added) + return; + + lev->event_added = 0; + event_base_del_virtual_(lev->event_base); +} + +static struct accepting_socket * +new_accepting_socket(struct evconnlistener_iocp *lev, int family) +{ + struct accepting_socket *res; + int addrlen; + int buflen; + + if (family == AF_INET) + addrlen = sizeof(struct sockaddr_in); + else if (family == AF_INET6) + addrlen = sizeof(struct sockaddr_in6); + else + return NULL; + buflen = (addrlen+16)*2; + + res = mm_calloc(1,sizeof(struct accepting_socket)-1+buflen); + if (!res) + return NULL; + + event_overlapped_init_(&res->overlapped, accepted_socket_cb); + res->s = EVUTIL_INVALID_SOCKET; + res->lev = lev; + res->buflen = buflen; + res->family = family; + + event_deferred_cb_init_(&res->deferred, + event_base_get_npriorities(lev->event_base) / 2, + accepted_socket_invoke_user_cb, res); + + InitializeCriticalSectionAndSpinCount(&res->lock, 1000); + + return res; +} + +static void +free_and_unlock_accepting_socket(struct accepting_socket *as) +{ + /* requires lock. */ + if (as->s != EVUTIL_INVALID_SOCKET) + closesocket(as->s); + + LeaveCriticalSection(&as->lock); + DeleteCriticalSection(&as->lock); + mm_free(as); +} + +static int +start_accepting(struct accepting_socket *as) +{ + /* requires lock */ + const struct win32_extension_fns *ext = event_get_win32_extension_fns_(); + DWORD pending = 0; + SOCKET s = socket(as->family, SOCK_STREAM, 0); + int error = 0; + + if (!as->lev->base.enabled) + return 0; + + if (s == EVUTIL_INVALID_SOCKET) { + error = WSAGetLastError(); + goto report_err; + } + + /* XXXX It turns out we need to do this again later. Does this call + * have any effect? */ + setsockopt(s, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, + (char *)&as->lev->fd, sizeof(&as->lev->fd)); + + if (!(as->lev->base.flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING)) + evutil_make_socket_nonblocking(s); + + if (event_iocp_port_associate_(as->lev->port, s, 1) < 0) { + closesocket(s); + return -1; + } + + as->s = s; + + if (ext->AcceptEx(as->lev->fd, s, as->addrbuf, 0, + as->buflen/2, as->buflen/2, &pending, &as->overlapped.overlapped)) + { + /* Immediate success! */ + accepted_socket_cb(&as->overlapped, 1, 0, 1); + } else { + error = WSAGetLastError(); + if (error != ERROR_IO_PENDING) { + goto report_err; + } + } + + return 0; + +report_err: + as->error = error; + event_deferred_cb_schedule_( + as->lev->event_base, + &as->deferred); + return 0; +} + +static void +stop_accepting(struct accepting_socket *as) +{ + /* requires lock. */ + SOCKET s = as->s; + as->s = EVUTIL_INVALID_SOCKET; + closesocket(s); +} + +static void +accepted_socket_invoke_user_cb(struct event_callback *dcb, void *arg) +{ + struct accepting_socket *as = arg; + + struct sockaddr *sa_local=NULL, *sa_remote=NULL; + int socklen_local=0, socklen_remote=0; + const struct win32_extension_fns *ext = event_get_win32_extension_fns_(); + struct evconnlistener *lev = &as->lev->base; + evutil_socket_t sock=-1; + void *data; + evconnlistener_cb cb=NULL; + evconnlistener_errorcb errorcb=NULL; + int error; + + EVUTIL_ASSERT(ext->GetAcceptExSockaddrs); + + LOCK(lev); + EnterCriticalSection(&as->lock); + if (as->free_on_cb) { + free_and_unlock_accepting_socket(as); + listener_decref_and_unlock(lev); + return; + } + + ++lev->refcnt; + + error = as->error; + if (error) { + as->error = 0; + errorcb = lev->errorcb; + } else { + ext->GetAcceptExSockaddrs( + as->addrbuf, 0, as->buflen/2, as->buflen/2, + &sa_local, &socklen_local, &sa_remote, + &socklen_remote); + sock = as->s; + cb = lev->cb; + as->s = EVUTIL_INVALID_SOCKET; + + /* We need to call this so getsockname, getpeername, and + * shutdown work correctly on the accepted socket. */ + /* XXXX handle error? */ + setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, + (char *)&as->lev->fd, sizeof(&as->lev->fd)); + } + data = lev->user_data; + + LeaveCriticalSection(&as->lock); + UNLOCK(lev); + + if (errorcb) { + WSASetLastError(error); + errorcb(lev, data); + } else if (cb) { + cb(lev, sock, sa_remote, socklen_remote, data); + } + + LOCK(lev); + if (listener_decref_and_unlock(lev)) + return; + + EnterCriticalSection(&as->lock); + start_accepting(as); + LeaveCriticalSection(&as->lock); +} + +static void +accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, ev_ssize_t n, int ok) +{ + struct accepting_socket *as = + EVUTIL_UPCAST(o, struct accepting_socket, overlapped); + + LOCK(&as->lev->base); + EnterCriticalSection(&as->lock); + if (ok) { + /* XXXX Don't do this if some EV_MT flag is set. */ + event_deferred_cb_schedule_( + as->lev->event_base, + &as->deferred); + LeaveCriticalSection(&as->lock); + } else if (as->free_on_cb) { + struct evconnlistener *lev = &as->lev->base; + free_and_unlock_accepting_socket(as); + listener_decref_and_unlock(lev); + return; + } else if (as->s == EVUTIL_INVALID_SOCKET) { + /* This is okay; we were disabled by iocp_listener_disable. */ + LeaveCriticalSection(&as->lock); + } else { + /* Some error on accept that we couldn't actually handle. */ + BOOL ok; + DWORD transfer = 0, flags=0; + event_sock_warn(as->s, "Unexpected error on AcceptEx"); + ok = WSAGetOverlappedResult(as->s, &o->overlapped, + &transfer, FALSE, &flags); + if (ok) { + /* well, that was confusing! */ + as->error = 1; + } else { + as->error = WSAGetLastError(); + } + event_deferred_cb_schedule_( + as->lev->event_base, + &as->deferred); + LeaveCriticalSection(&as->lock); + } + UNLOCK(&as->lev->base); +} + +static int +iocp_listener_enable(struct evconnlistener *lev) +{ + int i; + struct evconnlistener_iocp *lev_iocp = + EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); + + LOCK(lev); + iocp_listener_event_add(lev_iocp); + for (i = 0; i < lev_iocp->n_accepting; ++i) { + struct accepting_socket *as = lev_iocp->accepting[i]; + if (!as) + continue; + EnterCriticalSection(&as->lock); + if (!as->free_on_cb && as->s == EVUTIL_INVALID_SOCKET) + start_accepting(as); + LeaveCriticalSection(&as->lock); + } + UNLOCK(lev); + return 0; +} + +static int +iocp_listener_disable_impl(struct evconnlistener *lev, int shutdown) +{ + int i; + struct evconnlistener_iocp *lev_iocp = + EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); + + LOCK(lev); + iocp_listener_event_del(lev_iocp); + for (i = 0; i < lev_iocp->n_accepting; ++i) { + struct accepting_socket *as = lev_iocp->accepting[i]; + if (!as) + continue; + EnterCriticalSection(&as->lock); + if (!as->free_on_cb && as->s != EVUTIL_INVALID_SOCKET) { + if (shutdown) + as->free_on_cb = 1; + stop_accepting(as); + } + LeaveCriticalSection(&as->lock); + } + + if (shutdown && lev->flags & LEV_OPT_CLOSE_ON_FREE) + evutil_closesocket(lev_iocp->fd); + + UNLOCK(lev); + return 0; +} + +static int +iocp_listener_disable(struct evconnlistener *lev) +{ + return iocp_listener_disable_impl(lev,0); +} + +static void +iocp_listener_destroy(struct evconnlistener *lev) +{ + struct evconnlistener_iocp *lev_iocp = + EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); + + if (! lev_iocp->shutting_down) { + lev_iocp->shutting_down = 1; + iocp_listener_disable_impl(lev,1); + } + +} + +static evutil_socket_t +iocp_listener_getfd(struct evconnlistener *lev) +{ + struct evconnlistener_iocp *lev_iocp = + EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); + return lev_iocp->fd; +} +static struct event_base * +iocp_listener_getbase(struct evconnlistener *lev) +{ + struct evconnlistener_iocp *lev_iocp = + EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base); + return lev_iocp->event_base; +} + +static const struct evconnlistener_ops evconnlistener_iocp_ops = { + iocp_listener_enable, + iocp_listener_disable, + iocp_listener_destroy, + iocp_listener_destroy, /* shutdown */ + iocp_listener_getfd, + iocp_listener_getbase +}; + +/* XXX define some way to override this. */ +#define N_SOCKETS_PER_LISTENER 4 + +struct evconnlistener * +evconnlistener_new_async(struct event_base *base, + evconnlistener_cb cb, void *ptr, unsigned flags, int backlog, + evutil_socket_t fd) +{ + struct sockaddr_storage ss; + int socklen = sizeof(ss); + struct evconnlistener_iocp *lev; + int i; + + flags |= LEV_OPT_THREADSAFE; + + if (!base || !event_base_get_iocp_(base)) + goto err; + + /* XXXX duplicate code */ + if (backlog > 0) { + if (listen(fd, backlog) < 0) + goto err; + } else if (backlog < 0) { + if (listen(fd, 128) < 0) + goto err; + } + if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) { + event_sock_warn(fd, "getsockname"); + goto err; + } + lev = mm_calloc(1, sizeof(struct evconnlistener_iocp)); + if (!lev) { + event_warn("calloc"); + goto err; + } + lev->base.ops = &evconnlistener_iocp_ops; + lev->base.cb = cb; + lev->base.user_data = ptr; + lev->base.flags = flags; + lev->base.refcnt = 1; + lev->base.enabled = 1; + + lev->port = event_base_get_iocp_(base); + lev->fd = fd; + lev->event_base = base; + + + if (event_iocp_port_associate_(lev->port, fd, 1) < 0) + goto err_free_lev; + + EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); + + lev->n_accepting = N_SOCKETS_PER_LISTENER; + lev->accepting = mm_calloc(lev->n_accepting, + sizeof(struct accepting_socket *)); + if (!lev->accepting) { + event_warn("calloc"); + goto err_delete_lock; + } + for (i = 0; i < lev->n_accepting; ++i) { + lev->accepting[i] = new_accepting_socket(lev, ss.ss_family); + if (!lev->accepting[i]) { + event_warnx("Couldn't create accepting socket"); + goto err_free_accepting; + } + if (cb && start_accepting(lev->accepting[i]) < 0) { + event_warnx("Couldn't start accepting on socket"); + EnterCriticalSection(&lev->accepting[i]->lock); + free_and_unlock_accepting_socket(lev->accepting[i]); + goto err_free_accepting; + } + ++lev->base.refcnt; + } + + iocp_listener_event_add(lev); + + return &lev->base; + +err_free_accepting: + mm_free(lev->accepting); + /* XXXX free the other elements. */ +err_delete_lock: + EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE); +err_free_lev: + mm_free(lev); +err: + /* Don't close the fd, it is caller's responsibility. */ + return NULL; +} + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/log-internal.h b/probe-busybox/libevent-2.1.11-stable/log-internal.h new file mode 100644 index 00000000..2c31608b --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/log-internal.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef LOG_INTERNAL_H_INCLUDED_ +#define LOG_INTERNAL_H_INCLUDED_ + +#include "event2/util.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __GNUC__ +#define EV_CHECK_FMT(a,b) __attribute__((format(printf, a, b))) +#define EV_NORETURN __attribute__((noreturn)) +#else +#define EV_CHECK_FMT(a,b) +#define EV_NORETURN +#endif + +#define EVENT_ERR_ABORT_ ((int)0xdeaddead) + +#if !defined(EVENT__DISABLE_DEBUG_MODE) || defined(USE_DEBUG) +#define EVENT_DEBUG_LOGGING_ENABLED +#endif + +#ifdef EVENT_DEBUG_LOGGING_ENABLED +EVENT2_CORE_EXPORT_SYMBOL extern ev_uint32_t event_debug_logging_mask_; +#define event_debug_get_logging_mask_() (event_debug_logging_mask_) +#else +#define event_debug_get_logging_mask_() (0) +#endif + +EVENT2_EXPORT_SYMBOL +void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN; +EVENT2_EXPORT_SYMBOL +void event_warn(const char *fmt, ...) EV_CHECK_FMT(1,2); +EVENT2_EXPORT_SYMBOL +void event_sock_err(int eval, evutil_socket_t sock, const char *fmt, ...) EV_CHECK_FMT(3,4) EV_NORETURN; +EVENT2_EXPORT_SYMBOL +void event_sock_warn(evutil_socket_t sock, const char *fmt, ...) EV_CHECK_FMT(2,3); +EVENT2_EXPORT_SYMBOL +void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN; +EVENT2_EXPORT_SYMBOL +void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2); +EVENT2_EXPORT_SYMBOL +void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2); +EVENT2_EXPORT_SYMBOL +void event_debugx_(const char *fmt, ...) EV_CHECK_FMT(1,2); + +EVENT2_EXPORT_SYMBOL +void event_logv_(int severity, const char *errstr, const char *fmt, va_list ap) + EV_CHECK_FMT(3,0); + +#ifdef EVENT_DEBUG_LOGGING_ENABLED +#define event_debug(x) do { \ + if (event_debug_get_logging_mask_()) { \ + event_debugx_ x; \ + } \ + } while (0) +#else +#define event_debug(x) ((void)0) +#endif + +#undef EV_CHECK_FMT + +#ifdef __cplusplus +} +#endif + +#endif /* LOG_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/log.c b/probe-busybox/libevent-2.1.11-stable/log.c new file mode 100644 index 00000000..a9debb86 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/log.c @@ -0,0 +1,244 @@ +/* $OpenBSD: err.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */ + +/* + * log.c + * + * Based on err.c, which was adapted from OpenBSD libc *err* *warn* code. + * + * Copyright (c) 2005-2012 Niels Provos and Nick Mathewson + * + * Copyright (c) 2000 Dug Song + * + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef _WIN32 +#include +#define WIN32_LEAN_AND_MEAN +#include +#undef WIN32_LEAN_AND_MEAN +#endif +#include +#include +#include +#include +#include +#include +#include "event2/event.h" +#include "event2/util.h" + +#include "log-internal.h" + +static void event_log(int severity, const char *msg); +static void event_exit(int errcode) EV_NORETURN; + +static event_fatal_cb fatal_fn = NULL; + +#ifdef EVENT_DEBUG_LOGGING_ENABLED +#ifdef USE_DEBUG +#define DEFAULT_MASK EVENT_DBG_ALL +#else +#define DEFAULT_MASK 0 +#endif + +EVENT2_EXPORT_SYMBOL ev_uint32_t event_debug_logging_mask_ = DEFAULT_MASK; +#endif /* EVENT_DEBUG_LOGGING_ENABLED */ + +void +event_enable_debug_logging(ev_uint32_t which) +{ +#ifdef EVENT_DEBUG_LOGGING_ENABLED + event_debug_logging_mask_ = which; +#endif +} + +void +event_set_fatal_callback(event_fatal_cb cb) +{ + fatal_fn = cb; +} + +static void +event_exit(int errcode) +{ + if (fatal_fn) { + fatal_fn(errcode); + exit(errcode); /* should never be reached */ + } else if (errcode == EVENT_ERR_ABORT_) + abort(); + else + exit(errcode); +} + +void +event_err(int eval, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + event_logv_(EVENT_LOG_ERR, strerror(errno), fmt, ap); + va_end(ap); + event_exit(eval); +} + +void +event_warn(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + event_logv_(EVENT_LOG_WARN, strerror(errno), fmt, ap); + va_end(ap); +} + +void +event_sock_err(int eval, evutil_socket_t sock, const char *fmt, ...) +{ + va_list ap; + int err = evutil_socket_geterror(sock); + + va_start(ap, fmt); + event_logv_(EVENT_LOG_ERR, evutil_socket_error_to_string(err), fmt, ap); + va_end(ap); + event_exit(eval); +} + +void +event_sock_warn(evutil_socket_t sock, const char *fmt, ...) +{ + va_list ap; + int err = evutil_socket_geterror(sock); + + va_start(ap, fmt); + event_logv_(EVENT_LOG_WARN, evutil_socket_error_to_string(err), fmt, ap); + va_end(ap); +} + +void +event_errx(int eval, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + event_logv_(EVENT_LOG_ERR, NULL, fmt, ap); + va_end(ap); + event_exit(eval); +} + +void +event_warnx(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + event_logv_(EVENT_LOG_WARN, NULL, fmt, ap); + va_end(ap); +} + +void +event_msgx(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + event_logv_(EVENT_LOG_MSG, NULL, fmt, ap); + va_end(ap); +} + +void +event_debugx_(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + event_logv_(EVENT_LOG_DEBUG, NULL, fmt, ap); + va_end(ap); +} + +void +event_logv_(int severity, const char *errstr, const char *fmt, va_list ap) +{ + char buf[1024]; + size_t len; + + if (severity == EVENT_LOG_DEBUG && !event_debug_get_logging_mask_()) + return; + + if (fmt != NULL) + evutil_vsnprintf(buf, sizeof(buf), fmt, ap); + else + buf[0] = '\0'; + + if (errstr) { + len = strlen(buf); + if (len < sizeof(buf) - 3) { + evutil_snprintf(buf + len, sizeof(buf) - len, ": %s", errstr); + } + } + + event_log(severity, buf); +} + +static event_log_cb log_fn = NULL; + +void +event_set_log_callback(event_log_cb cb) +{ + log_fn = cb; +} + +static void +event_log(int severity, const char *msg) +{ + if (log_fn) + log_fn(severity, msg); + else { + const char *severity_str; + switch (severity) { + case EVENT_LOG_DEBUG: + severity_str = "debug"; + break; + case EVENT_LOG_MSG: + severity_str = "msg"; + break; + case EVENT_LOG_WARN: + severity_str = "warn"; + break; + case EVENT_LOG_ERR: + severity_str = "err"; + break; + default: + severity_str = "???"; + break; + } + (void)fprintf(stderr, "[%s] %s\n", severity_str, msg); + } +} diff --git a/probe-busybox/libevent-2.1.11-stable/m4/ac_backport_259_ssizet.m4 b/probe-busybox/libevent-2.1.11-stable/m4/ac_backport_259_ssizet.m4 new file mode 100644 index 00000000..75fde386 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/m4/ac_backport_259_ssizet.m4 @@ -0,0 +1,3 @@ +AN_IDENTIFIER([ssize_t], [AC_TYPE_SSIZE_T]) +AC_DEFUN([AC_TYPE_SSIZE_T], [AC_CHECK_TYPE(ssize_t, int)]) + diff --git a/probe-busybox/libevent-2.1.11-stable/m4/acx_pthread.m4 b/probe-busybox/libevent-2.1.11-stable/m4/acx_pthread.m4 new file mode 100644 index 00000000..d2b11694 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/m4/acx_pthread.m4 @@ -0,0 +1,279 @@ +##### http://autoconf-archive.cryp.to/acx_pthread.html +# +# SYNOPSIS +# +# ACX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) +# +# DESCRIPTION +# +# This macro figures out how to build C programs using POSIX threads. +# It sets the PTHREAD_LIBS output variable to the threads library and +# linker flags, and the PTHREAD_CFLAGS output variable to any special +# C compiler flags that are needed. (The user can also force certain +# compiler flags/libs to be tested by setting these environment +# variables.) +# +# Also sets PTHREAD_CC to any special C compiler that is needed for +# multi-threaded programs (defaults to the value of CC otherwise). +# (This is necessary on AIX to use the special cc_r compiler alias.) +# +# NOTE: You are assumed to not only compile your program with these +# flags, but also link it with them as well. e.g. you should link +# with $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS +# $LIBS +# +# If you are only building threads programs, you may wish to use +# these variables in your default LIBS, CFLAGS, and CC: +# +# LIBS="$PTHREAD_LIBS $LIBS" +# CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +# CC="$PTHREAD_CC" +# +# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute +# constant has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to +# that name (e.g. PTHREAD_CREATE_UNDETACHED on AIX). +# +# ACTION-IF-FOUND is a list of shell commands to run if a threads +# library is found, and ACTION-IF-NOT-FOUND is a list of commands to +# run it if it is not found. If ACTION-IF-FOUND is not specified, the +# default action will define HAVE_PTHREAD. +# +# Please let the authors know if this macro fails on any platform, or +# if you have any other suggestions or comments. This macro was based +# on work by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) +# (with help from M. Frigo), as well as ac_pthread and hb_pthread +# macros posted by Alejandro Forero Cuervo to the autoconf macro +# repository. We are also grateful for the helpful feedback of +# numerous users. +# +# LAST MODIFICATION +# +# 2007-07-29 +# +# COPYLEFT +# +# Copyright (c) 2007 Steven G. Johnson +# +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see +# . +# +# As a special exception, the respective Autoconf Macro's copyright +# owner gives unlimited permission to copy, distribute and modify the +# configure scripts that are the output of Autoconf when processing +# the Macro. You need not follow the terms of the GNU General Public +# License when using or distributing such scripts, even though +# portions of the text of the Macro appear in them. The GNU General +# Public License (GPL) does govern all other use of the material that +# constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the +# Autoconf Macro released by the Autoconf Macro Archive. When you +# make and distribute a modified version of the Autoconf Macro, you +# may extend this special exception to the GPL to apply to your +# modified version as well. + +AC_DEFUN([ACX_PTHREAD], [ +AC_REQUIRE([AC_CANONICAL_HOST]) +AC_LANG_SAVE +AC_LANG_C +acx_pthread_ok=no + +# We used to check for pthread.h first, but this fails if pthread.h +# requires special compiler flags (e.g. on True64 or Sequent). +# It gets checked for in the link test anyway. + +# First of all, check if the user has set any of the PTHREAD_LIBS, +# etcetera environment variables, and if threads linking works using +# them: +if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) + AC_TRY_LINK_FUNC(pthread_join, acx_pthread_ok=yes) + AC_MSG_RESULT($acx_pthread_ok) + if test x"$acx_pthread_ok" = xno; then + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" + fi + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" +fi + +# We must check for the threads library under a number of different +# names; the ordering is very important because some systems +# (e.g. DEC) have both -lpthread and -lpthreads, where one of the +# libraries is broken (non-POSIX). + +# Create a list of thread flags to try. Items starting with a "-" are +# C compiler flags, and other items are library names, except for "none" +# which indicates that we try without any flags at all, and "pthread-config" +# which is a program returning the flags for the Pth emulation library. + +acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" + +# The ordering *is* (sometimes) important. Some notes on the +# individual items follow: + +# pthreads: AIX (must check this before -lpthread) +# none: in case threads are in libc; should be tried before -Kthread and +# other compiler flags to prevent continual compiler warnings +# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) +# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) +# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) +# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) +# -pthreads: Solaris/gcc +# -mthreads: Mingw32/gcc, Lynx/gcc +# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it +# doesn't hurt to check since this sometimes defines pthreads too; +# also defines -D_REENTRANT) +# ... -mt is also the pthreads flag for HP/aCC +# pthread: Linux, etcetera +# --thread-safe: KAI C++ +# pthread-config: use pthread-config program (for GNU Pth library) + +case "${host_cpu}-${host_os}" in + *solaris*) + + # On Solaris (at least, for some versions), libc contains stubbed + # (non-functional) versions of the pthreads routines, so link-based + # tests will erroneously succeed. (We need to link with -pthreads/-mt/ + # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather + # a function called by this macro, so we could check for that, but + # who knows whether they'll stub that too in a future libc.) So, + # we'll just look for -pthreads and -lpthread first: + + acx_pthread_flags="-pthreads pthread -mt -pthread $acx_pthread_flags" + ;; +esac + +if test x"$acx_pthread_ok" = xno; then +for flag in $acx_pthread_flags; do + + case $flag in + none) + AC_MSG_CHECKING([whether pthreads work without any flags]) + ;; + + -*) + AC_MSG_CHECKING([whether pthreads work with $flag]) + PTHREAD_CFLAGS="$flag" + ;; + + pthread-config) + AC_CHECK_PROG(acx_pthread_config, pthread-config, yes, no) + if test x"$acx_pthread_config" = xno; then continue; fi + PTHREAD_CFLAGS="`pthread-config --cflags`" + PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" + ;; + + *) + AC_MSG_CHECKING([for the pthreads library -l$flag]) + PTHREAD_LIBS="-l$flag" + ;; + esac + + save_LIBS="$LIBS" + save_CFLAGS="$CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Check for various functions. We must include pthread.h, + # since some functions may be macros. (On the Sequent, we + # need a special flag -Kthread to make this header compile.) + # We check for pthread_join because it is in -lpthread on IRIX + # while pthread_create is in libc. We check for pthread_attr_init + # due to DEC craziness with -lpthreads. We check for + # pthread_cleanup_push because it is one of the few pthread + # functions on Solaris that doesn't have a non-functional libc stub. + # We try pthread_create on general principles. + AC_TRY_LINK([#include ], + [pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); ], + [acx_pthread_ok=yes]) + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + AC_MSG_RESULT($acx_pthread_ok) + if test "x$acx_pthread_ok" = xyes; then + break; + fi + + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" +done +fi + +# Various other checks: +if test "x$acx_pthread_ok" = xyes; then + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. + AC_MSG_CHECKING([for joinable pthread attribute]) + attr_name=unknown + for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do + AC_TRY_LINK([#include ], [int attr=$attr; return attr;], + [attr_name=$attr; break]) + done + AC_MSG_RESULT($attr_name) + if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then + AC_DEFINE_UNQUOTED(PTHREAD_CREATE_JOINABLE, $attr_name, + [Define to necessary symbol if this constant + uses a non-standard name on your system.]) + fi + + AC_MSG_CHECKING([if more special flags are required for pthreads]) + flag=no + case "${host_cpu}-${host_os}" in + *-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";; + *solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";; + esac + AC_MSG_RESULT(${flag}) + if test "x$flag" != xno; then + PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" + fi + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + # More AIX lossage: must compile with xlc_r or cc_r + if test x"$GCC" != xyes; then + AC_CHECK_PROGS(PTHREAD_CC, xlc_r cc_r, ${CC}) + else + PTHREAD_CC=$CC + fi +else + PTHREAD_CC="$CC" +fi + +AC_SUBST(PTHREAD_LIBS) +AC_SUBST(PTHREAD_CFLAGS) +AC_SUBST(PTHREAD_CC) + +# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: +if test x"$acx_pthread_ok" = xyes; then + ifelse([$1],,AC_DEFINE(HAVE_PTHREAD,1,[Define if you have POSIX threads libraries and header files.]),[$1]) + : +else + acx_pthread_ok=no + $2 +fi +AC_LANG_RESTORE +])dnl ACX_PTHREAD diff --git a/probe-busybox/libevent-2.1.11-stable/m4/libevent_openssl.m4 b/probe-busybox/libevent-2.1.11-stable/m4/libevent_openssl.m4 new file mode 100644 index 00000000..19811981 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/m4/libevent_openssl.m4 @@ -0,0 +1,56 @@ +dnl ###################################################################### +dnl OpenSSL support +AC_DEFUN([LIBEVENT_OPENSSL], [ +AC_REQUIRE([NTP_PKG_CONFIG])dnl + +case "$enable_openssl" in + yes) + have_openssl=no + case "$PKG_CONFIG" in + '') + ;; + *) + OPENSSL_LIBS=`$PKG_CONFIG --libs openssl 2>/dev/null` + case "$OPENSSL_LIBS" in + '') ;; + *) OPENSSL_LIBS="$OPENSSL_LIBS $EV_LIB_GDI $EV_LIB_WS32 $OPENSSL_LIBADD" + have_openssl=yes + ;; + esac + OPENSSL_INCS=`$PKG_CONFIG --cflags openssl 2>/dev/null` + ;; + esac + case "$have_openssl" in + yes) ;; + *) + save_LIBS="$LIBS" + LIBS="" + OPENSSL_LIBS="" + for lib in crypto eay32; do + # clear cache + unset ac_cv_search_SSL_new + AC_SEARCH_LIBS([SSL_new], [ssl ssl32], + [have_openssl=yes + OPENSSL_LIBS="$LIBS -l$lib $EV_LIB_GDI $EV_LIB_WS32 $OPENSSL_LIBADD"], + [have_openssl=no], + [-l$lib $EV_LIB_GDI $EV_LIB_WS32 $OPENSSL_LIBADD]) + LIBS="$save_LIBS" + test "$have_openssl" = "yes" && break + done + ;; + esac + CPPFLAGS_SAVE=$CPPFLAGS + CPPFLAGS="$CPPFLAGS $OPENSSL_INCS" + AC_CHECK_HEADERS([openssl/ssl.h], [], [have_openssl=no]) + CPPFLAGS=$CPPFLAGS_SAVE + AC_SUBST(OPENSSL_INCS) + AC_SUBST(OPENSSL_LIBS) + case "$have_openssl" in + yes) AC_DEFINE(HAVE_OPENSSL, 1, [Define if the system has openssl]) ;; + esac + ;; +esac + +# check if we have and should use openssl +AM_CONDITIONAL(OPENSSL, [test "$enable_openssl" != "no" && test "$have_openssl" = "yes"]) +]) diff --git a/probe-busybox/libevent-2.1.11-stable/m4/ntp_pkg_config.m4 b/probe-busybox/libevent-2.1.11-stable/m4/ntp_pkg_config.m4 new file mode 100644 index 00000000..1bce8a6e --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/m4/ntp_pkg_config.m4 @@ -0,0 +1,27 @@ +dnl NTP_PKG_CONFIG -*- Autoconf -*- +dnl +dnl Look for pkg-config, which must be at least +dnl $ntp_pkgconfig_min_version. +dnl +AC_DEFUN([NTP_PKG_CONFIG], [ + +dnl lower the minimum version if you find an earlier one works +ntp_pkgconfig_min_version='0.15.0' +AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) +AS_UNSET([ac_cv_path_PKG_CONFIG]) +AS_UNSET([ac_cv_path_ac_pt_PKG_CONFIG]) + +case "$PKG_CONFIG" in + /*) + AC_MSG_CHECKING([if pkg-config is at least version $ntp_pkgconfig_min_version]) + if $PKG_CONFIG --atleast-pkgconfig-version $ntp_pkgconfig_min_version; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + PKG_CONFIG="" + fi + ;; +esac + +]) dnl NTP_PKG_CONFIG + diff --git a/probe-busybox/libevent-2.1.11-stable/make-event-config.sed b/probe-busybox/libevent-2.1.11-stable/make-event-config.sed new file mode 100644 index 00000000..3dc404b5 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/make-event-config.sed @@ -0,0 +1,27 @@ +# Sed script to postprocess config.h into event-config.h. + +1i\ +/* event2/event-config.h\ + *\ + * This file was generated by autoconf when libevent was built, and post-\ + * processed by Libevent so that its macros would have a uniform prefix.\ + *\ + * DO NOT EDIT THIS FILE.\ + *\ + * Do not rely on macros in this file existing in later versions.\ + */\ +\ +#ifndef EVENT2_EVENT_CONFIG_H_INCLUDED_\ +#define EVENT2_EVENT_CONFIG_H_INCLUDED_\ + +$a\ +\ +#endif /* event2/event-config.h */ + +/#\( *\)undef STDC_HEADERS\>/b +/#\( *\)define STDC_HEADERS\>/b + +# Only rewrite symbols starting with capitals +s/#\( *\)define \([A-Z]\)/#\1define EVENT__\2/ +s/#\( *\)undef \([A-Z]\)/#\1undef EVENT__\2/ +s/#\( *\)if\(n*\)def \([A-Z]\)/#\1if\2def EVENT__\2/ diff --git a/probe-busybox/libevent-2.1.11-stable/make_epoll_table.py b/probe-busybox/libevent-2.1.11-stable/make_epoll_table.py new file mode 100755 index 00000000..1b15a91a --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/make_epoll_table.py @@ -0,0 +1,63 @@ +#!/usr/bin/python2 + +def get(old,wc,rc,cc): + if ('xxx' in (rc, wc, cc)): + return "0",255 + + if ('add' in (rc, wc, cc)): + events = [] + if rc == 'add' or (rc != 'del' and 'r' in old): + events.append("EPOLLIN") + if wc == 'add' or (wc != 'del' and 'w' in old): + events.append("EPOLLOUT") + if cc == 'add' or (cc != 'del' and 'c' in old): + events.append("EPOLLRDHUP") + + if old == "0": + op = "EPOLL_CTL_ADD" + else: + op = "EPOLL_CTL_MOD" + return "|".join(events), op + + if ('del' in (rc, wc, cc)): + delevents = [] + modevents = [] + op = "EPOLL_CTL_DEL" + + if 'r' in old: + modevents.append("EPOLLIN") + if 'w' in old: + modevents.append("EPOLLOUT") + if 'c' in old: + modevents.append("EPOLLRDHUP") + + for item, event in [(rc,"EPOLLIN"), + (wc,"EPOLLOUT"), + (cc,"EPOLLRDHUP")]: + if item == 'del': + delevents.append(event) + if event in modevents: + modevents.remove(event) + + if modevents: + return "|".join(modevents), "EPOLL_CTL_MOD" + else: + return "|".join(delevents), "EPOLL_CTL_DEL" + + return 0, 0 + + +def fmt(op, ev, old, wc, rc, cc): + entry = "{ %s, %s },"%(op, ev) + print "\t/* old=%3s, write:%3s, read:%3s, close:%3s */\n\t%s" % ( + old, wc, rc, cc, entry) + return len(entry) + +for old in ('0','r','w','rw','c','cr','cw','crw'): + for wc in ('0', 'add', 'del', 'xxx'): + for rc in ('0', 'add', 'del', 'xxx'): + for cc in ('0', 'add', 'del', 'xxx'): + + op,ev = get(old,wc,rc,cc) + + fmt(op, ev, old, wc, rc, cc) diff --git a/probe-busybox/libevent-2.1.11-stable/minheap-internal.h b/probe-busybox/libevent-2.1.11-stable/minheap-internal.h new file mode 100644 index 00000000..b3a0eb1f --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/minheap-internal.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Copyright (c) 2006 Maxim Yegorushkin + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef MINHEAP_INTERNAL_H_INCLUDED_ +#define MINHEAP_INTERNAL_H_INCLUDED_ + +#include "event2/event-config.h" +#include "evconfig-private.h" +#include "event2/event.h" +#include "event2/event_struct.h" +#include "event2/util.h" +#include "util-internal.h" +#include "mm-internal.h" + +typedef struct min_heap +{ + struct event** p; + unsigned n, a; +} min_heap_t; + +static inline void min_heap_ctor_(min_heap_t* s); +static inline void min_heap_dtor_(min_heap_t* s); +static inline void min_heap_elem_init_(struct event* e); +static inline int min_heap_elt_is_top_(const struct event *e); +static inline int min_heap_empty_(min_heap_t* s); +static inline unsigned min_heap_size_(min_heap_t* s); +static inline struct event* min_heap_top_(min_heap_t* s); +static inline int min_heap_reserve_(min_heap_t* s, unsigned n); +static inline int min_heap_push_(min_heap_t* s, struct event* e); +static inline struct event* min_heap_pop_(min_heap_t* s); +static inline int min_heap_adjust_(min_heap_t *s, struct event* e); +static inline int min_heap_erase_(min_heap_t* s, struct event* e); +static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e); +static inline void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e); +static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e); + +#define min_heap_elem_greater(a, b) \ + (evutil_timercmp(&(a)->ev_timeout, &(b)->ev_timeout, >)) + +void min_heap_ctor_(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; } +void min_heap_dtor_(min_heap_t* s) { if (s->p) mm_free(s->p); } +void min_heap_elem_init_(struct event* e) { e->ev_timeout_pos.min_heap_idx = -1; } +int min_heap_empty_(min_heap_t* s) { return 0u == s->n; } +unsigned min_heap_size_(min_heap_t* s) { return s->n; } +struct event* min_heap_top_(min_heap_t* s) { return s->n ? *s->p : 0; } + +int min_heap_push_(min_heap_t* s, struct event* e) +{ + if (s->n == UINT32_MAX || min_heap_reserve_(s, s->n + 1)) + return -1; + min_heap_shift_up_(s, s->n++, e); + return 0; +} + +struct event* min_heap_pop_(min_heap_t* s) +{ + if (s->n) + { + struct event* e = *s->p; + min_heap_shift_down_(s, 0u, s->p[--s->n]); + e->ev_timeout_pos.min_heap_idx = -1; + return e; + } + return 0; +} + +int min_heap_elt_is_top_(const struct event *e) +{ + return e->ev_timeout_pos.min_heap_idx == 0; +} + +int min_heap_erase_(min_heap_t* s, struct event* e) +{ + if (-1 != e->ev_timeout_pos.min_heap_idx) + { + struct event *last = s->p[--s->n]; + unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2; + /* we replace e with the last element in the heap. We might need to + shift it upward if it is less than its parent, or downward if it is + greater than one or both its children. Since the children are known + to be less than the parent, it can't need to shift both up and + down. */ + if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last)) + min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, last); + else + min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last); + e->ev_timeout_pos.min_heap_idx = -1; + return 0; + } + return -1; +} + +int min_heap_adjust_(min_heap_t *s, struct event *e) +{ + if (-1 == e->ev_timeout_pos.min_heap_idx) { + return min_heap_push_(s, e); + } else { + unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2; + /* The position of e has changed; we shift it up or down + * as needed. We can't need to do both. */ + if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], e)) + min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, e); + else + min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, e); + return 0; + } +} + +int min_heap_reserve_(min_heap_t* s, unsigned n) +{ + if (s->a < n) + { + struct event** p; + unsigned a = s->a ? s->a * 2 : 8; + if (a < n) + a = n; +#if (SIZE_MAX == UINT32_MAX) + if (a > SIZE_MAX / sizeof *p) + return -1; +#endif + if (!(p = (struct event**)mm_realloc(s->p, a * sizeof *p))) + return -1; + s->p = p; + s->a = a; + } + return 0; +} + +void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e) +{ + unsigned parent = (hole_index - 1) / 2; + do + { + (s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index; + hole_index = parent; + parent = (hole_index - 1) / 2; + } while (hole_index && min_heap_elem_greater(s->p[parent], e)); + (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index; +} + +void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e) +{ + unsigned parent = (hole_index - 1) / 2; + while (hole_index && min_heap_elem_greater(s->p[parent], e)) + { + (s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index; + hole_index = parent; + parent = (hole_index - 1) / 2; + } + (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index; +} + +void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e) +{ + unsigned min_child = 2 * (hole_index + 1); + while (min_child <= s->n) + { + min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]); + if (!(min_heap_elem_greater(e, s->p[min_child]))) + break; + (s->p[hole_index] = s->p[min_child])->ev_timeout_pos.min_heap_idx = hole_index; + hole_index = min_child; + min_child = 2 * (hole_index + 1); + } + (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index; +} + +#endif /* MINHEAP_INTERNAL_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/mm-internal.h b/probe-busybox/libevent-2.1.11-stable/mm-internal.h new file mode 100644 index 00000000..7a95c995 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/mm-internal.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef MM_INTERNAL_H_INCLUDED_ +#define MM_INTERNAL_H_INCLUDED_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef EVENT__DISABLE_MM_REPLACEMENT +/* Internal use only: Memory allocation functions. We give them nice short + * mm_names for our own use, but make sure that the symbols have longer names + * so they don't conflict with other libraries (like, say, libmm). */ + +/** Allocate uninitialized memory. + * + * @return On success, return a pointer to sz newly allocated bytes. + * On failure, set errno to ENOMEM and return NULL. + * If the argument sz is 0, simply return NULL. + */ +EVENT2_EXPORT_SYMBOL +void *event_mm_malloc_(size_t sz); + +/** Allocate memory initialized to zero. + * + * @return On success, return a pointer to (count * size) newly allocated + * bytes, initialized to zero. + * On failure, or if the product would result in an integer overflow, + * set errno to ENOMEM and return NULL. + * If either arguments are 0, simply return NULL. + */ +EVENT2_EXPORT_SYMBOL +void *event_mm_calloc_(size_t count, size_t size); + +/** Duplicate a string. + * + * @return On success, return a pointer to a newly allocated duplicate + * of a string. + * Set errno to ENOMEM and return NULL if a memory allocation error + * occurs (or would occur) in the process. + * If the argument str is NULL, set errno to EINVAL and return NULL. + */ +EVENT2_EXPORT_SYMBOL +char *event_mm_strdup_(const char *str); + +EVENT2_EXPORT_SYMBOL +void *event_mm_realloc_(void *p, size_t sz); +EVENT2_EXPORT_SYMBOL +void event_mm_free_(void *p); +#define mm_malloc(sz) event_mm_malloc_(sz) +#define mm_calloc(count, size) event_mm_calloc_((count), (size)) +#define mm_strdup(s) event_mm_strdup_(s) +#define mm_realloc(p, sz) event_mm_realloc_((p), (sz)) +#define mm_free(p) event_mm_free_(p) +#else +#define mm_malloc(sz) malloc(sz) +#define mm_calloc(n, sz) calloc((n), (sz)) +#define mm_strdup(s) strdup(s) +#define mm_realloc(p, sz) realloc((p), (sz)) +#define mm_free(p) free(p) +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/openssl-compat.h b/probe-busybox/libevent-2.1.11-stable/openssl-compat.h new file mode 100644 index 00000000..5d91ac64 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/openssl-compat.h @@ -0,0 +1,44 @@ +#ifndef OPENSSL_COMPAT_H +#define OPENSSL_COMPAT_H + +#include +#include "util-internal.h" + +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || \ + (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) + +static inline BIO_METHOD *BIO_meth_new(int type, const char *name) +{ + BIO_METHOD *biom = calloc(1, sizeof(BIO_METHOD)); + + if (biom != NULL) { + biom->type = type; + biom->name = name; + } + return biom; +} + +#define BIO_meth_set_write(b, f) (b)->bwrite = (f) +#define BIO_meth_set_read(b, f) (b)->bread = (f) +#define BIO_meth_set_puts(b, f) (b)->bputs = (f) +#define BIO_meth_set_ctrl(b, f) (b)->ctrl = (f) +#define BIO_meth_set_create(b, f) (b)->create = (f) +#define BIO_meth_set_destroy(b, f) (b)->destroy = (f) + +#define BIO_set_init(b, val) (b)->init = (val) +#define BIO_set_data(b, val) (b)->ptr = (val) +#define BIO_set_shutdown(b, val) (b)->shutdown = (val) +#define BIO_get_init(b) (b)->init +#define BIO_get_data(b) (b)->ptr +#define BIO_get_shutdown(b) (b)->shutdown + +#define TLS_method SSLv23_method + +#endif /* (OPENSSL_VERSION_NUMBER < 0x10100000L) || \ + (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) */ + +#if defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER >= 0x20700000L +#define BIO_get_init(b) (b)->init +#endif + +#endif /* OPENSSL_COMPAT_H */ diff --git a/probe-busybox/libevent-2.1.11-stable/poll.c b/probe-busybox/libevent-2.1.11-stable/poll.c new file mode 100644 index 00000000..fe440711 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/poll.c @@ -0,0 +1,341 @@ +/* $OpenBSD: poll.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */ + +/* + * Copyright 2000-2007 Niels Provos + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef EVENT__HAVE_POLL + +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "event-internal.h" +#include "evsignal-internal.h" +#include "log-internal.h" +#include "evmap-internal.h" +#include "event2/thread.h" +#include "evthread-internal.h" +#include "time-internal.h" + +struct pollidx { + int idxplus1; +}; + +struct pollop { + int event_count; /* Highest number alloc */ + int nfds; /* Highest number used */ + int realloc_copy; /* True iff we must realloc + * event_set_copy */ + struct pollfd *event_set; + struct pollfd *event_set_copy; +}; + +static void *poll_init(struct event_base *); +static int poll_add(struct event_base *, int, short old, short events, void *idx); +static int poll_del(struct event_base *, int, short old, short events, void *idx); +static int poll_dispatch(struct event_base *, struct timeval *); +static void poll_dealloc(struct event_base *); + +const struct eventop pollops = { + "poll", + poll_init, + poll_add, + poll_del, + poll_dispatch, + poll_dealloc, + 0, /* doesn't need_reinit */ + EV_FEATURE_FDS, + sizeof(struct pollidx), +}; + +static void * +poll_init(struct event_base *base) +{ + struct pollop *pollop; + + if (!(pollop = mm_calloc(1, sizeof(struct pollop)))) + return (NULL); + + evsig_init_(base); + + evutil_weakrand_seed_(&base->weakrand_seed, 0); + + return (pollop); +} + +#ifdef CHECK_INVARIANTS +static void +poll_check_ok(struct pollop *pop) +{ + int i, idx; + struct event *ev; + + for (i = 0; i < pop->fd_count; ++i) { + idx = pop->idxplus1_by_fd[i]-1; + if (idx < 0) + continue; + EVUTIL_ASSERT(pop->event_set[idx].fd == i); + } + for (i = 0; i < pop->nfds; ++i) { + struct pollfd *pfd = &pop->event_set[i]; + EVUTIL_ASSERT(pop->idxplus1_by_fd[pfd->fd] == i+1); + } +} +#else +#define poll_check_ok(pop) +#endif + +static int +poll_dispatch(struct event_base *base, struct timeval *tv) +{ + int res, i, j, nfds; + long msec = -1; + struct pollop *pop = base->evbase; + struct pollfd *event_set; + + poll_check_ok(pop); + + nfds = pop->nfds; + +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (base->th_base_lock) { + /* If we're using this backend in a multithreaded setting, + * then we need to work on a copy of event_set, so that we can + * let other threads modify the main event_set while we're + * polling. If we're not multithreaded, then we'll skip the + * copy step here to save memory and time. */ + if (pop->realloc_copy) { + struct pollfd *tmp = mm_realloc(pop->event_set_copy, + pop->event_count * sizeof(struct pollfd)); + if (tmp == NULL) { + event_warn("realloc"); + return -1; + } + pop->event_set_copy = tmp; + pop->realloc_copy = 0; + } + memcpy(pop->event_set_copy, pop->event_set, + sizeof(struct pollfd)*nfds); + event_set = pop->event_set_copy; + } else { + event_set = pop->event_set; + } +#else + event_set = pop->event_set; +#endif + + if (tv != NULL) { + msec = evutil_tv_to_msec_(tv); + if (msec < 0 || msec > INT_MAX) + msec = INT_MAX; + } + + EVBASE_RELEASE_LOCK(base, th_base_lock); + + res = poll(event_set, nfds, msec); + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + if (res == -1) { + if (errno != EINTR) { + event_warn("poll"); + return (-1); + } + + return (0); + } + + event_debug(("%s: poll reports %d", __func__, res)); + + if (res == 0 || nfds == 0) + return (0); + + i = evutil_weakrand_range_(&base->weakrand_seed, nfds); + for (j = 0; j < nfds; j++) { + int what; + if (++i == nfds) + i = 0; + what = event_set[i].revents; + if (!what) + continue; + + res = 0; + + /* If the file gets closed notify */ + if (what & (POLLHUP|POLLERR|POLLNVAL)) + what |= POLLIN|POLLOUT; + if (what & POLLIN) + res |= EV_READ; + if (what & POLLOUT) + res |= EV_WRITE; + if (res == 0) + continue; + + evmap_io_active_(base, event_set[i].fd, res); + } + + return (0); +} + +static int +poll_add(struct event_base *base, int fd, short old, short events, void *idx_) +{ + struct pollop *pop = base->evbase; + struct pollfd *pfd = NULL; + struct pollidx *idx = idx_; + int i; + + EVUTIL_ASSERT((events & EV_SIGNAL) == 0); + if (!(events & (EV_READ|EV_WRITE))) + return (0); + + poll_check_ok(pop); + if (pop->nfds + 1 >= pop->event_count) { + struct pollfd *tmp_event_set; + int tmp_event_count; + + if (pop->event_count < 32) + tmp_event_count = 32; + else + tmp_event_count = pop->event_count * 2; + + /* We need more file descriptors */ + tmp_event_set = mm_realloc(pop->event_set, + tmp_event_count * sizeof(struct pollfd)); + if (tmp_event_set == NULL) { + event_warn("realloc"); + return (-1); + } + pop->event_set = tmp_event_set; + + pop->event_count = tmp_event_count; + pop->realloc_copy = 1; + } + + i = idx->idxplus1 - 1; + + if (i >= 0) { + pfd = &pop->event_set[i]; + } else { + i = pop->nfds++; + pfd = &pop->event_set[i]; + pfd->events = 0; + pfd->fd = fd; + idx->idxplus1 = i + 1; + } + + pfd->revents = 0; + if (events & EV_WRITE) + pfd->events |= POLLOUT; + if (events & EV_READ) + pfd->events |= POLLIN; + poll_check_ok(pop); + + return (0); +} + +/* + * Nothing to be done here. + */ + +static int +poll_del(struct event_base *base, int fd, short old, short events, void *idx_) +{ + struct pollop *pop = base->evbase; + struct pollfd *pfd = NULL; + struct pollidx *idx = idx_; + int i; + + EVUTIL_ASSERT((events & EV_SIGNAL) == 0); + if (!(events & (EV_READ|EV_WRITE))) + return (0); + + poll_check_ok(pop); + i = idx->idxplus1 - 1; + if (i < 0) + return (-1); + + /* Do we still want to read or write? */ + pfd = &pop->event_set[i]; + if (events & EV_READ) + pfd->events &= ~POLLIN; + if (events & EV_WRITE) + pfd->events &= ~POLLOUT; + poll_check_ok(pop); + if (pfd->events) + /* Another event cares about that fd. */ + return (0); + + /* Okay, so we aren't interested in that fd anymore. */ + idx->idxplus1 = 0; + + --pop->nfds; + if (i != pop->nfds) { + /* + * Shift the last pollfd down into the now-unoccupied + * position. + */ + memcpy(&pop->event_set[i], &pop->event_set[pop->nfds], + sizeof(struct pollfd)); + idx = evmap_io_get_fdinfo_(&base->io, pop->event_set[i].fd); + EVUTIL_ASSERT(idx); + EVUTIL_ASSERT(idx->idxplus1 == pop->nfds + 1); + idx->idxplus1 = i + 1; + } + + poll_check_ok(pop); + return (0); +} + +static void +poll_dealloc(struct event_base *base) +{ + struct pollop *pop = base->evbase; + + evsig_dealloc_(base); + if (pop->event_set) + mm_free(pop->event_set); + if (pop->event_set_copy) + mm_free(pop->event_set_copy); + + memset(pop, 0, sizeof(struct pollop)); + mm_free(pop); +} + +#endif /* EVENT__HAVE_POLL */ diff --git a/probe-busybox/libevent-2.1.11-stable/ratelim-internal.h b/probe-busybox/libevent-2.1.11-stable/ratelim-internal.h new file mode 100644 index 00000000..6cc1cdde --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/ratelim-internal.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef RATELIM_INTERNAL_H_INCLUDED_ +#define RATELIM_INTERNAL_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "event2/util.h" + +/** A token bucket is an internal structure that tracks how many bytes we are + * currently willing to read or write on a given bufferevent or group of + * bufferevents */ +struct ev_token_bucket { + /** How many bytes are we willing to read or write right now? These + * values are signed so that we can do "defecit spending" */ + ev_ssize_t read_limit, write_limit; + /** When was this bucket last updated? Measured in abstract 'ticks' + * relative to the token bucket configuration. */ + ev_uint32_t last_updated; +}; + +/** Configuration info for a token bucket or set of token buckets. */ +struct ev_token_bucket_cfg { + /** How many bytes are we willing to read on average per tick? */ + size_t read_rate; + /** How many bytes are we willing to read at most in any one tick? */ + size_t read_maximum; + /** How many bytes are we willing to write on average per tick? */ + size_t write_rate; + /** How many bytes are we willing to write at most in any one tick? */ + size_t write_maximum; + + /* How long is a tick? Note that fractions of a millisecond are + * ignored. */ + struct timeval tick_timeout; + + /* How long is a tick, in milliseconds? Derived from tick_timeout. */ + unsigned msec_per_tick; +}; + +/** The current tick is 'current_tick': add bytes to 'bucket' as specified in + * 'cfg'. */ +int ev_token_bucket_update_(struct ev_token_bucket *bucket, + const struct ev_token_bucket_cfg *cfg, + ev_uint32_t current_tick); + +/** In which tick does 'tv' fall according to 'cfg'? Note that ticks can + * overflow easily; your code needs to handle this. */ +ev_uint32_t ev_token_bucket_get_tick_(const struct timeval *tv, + const struct ev_token_bucket_cfg *cfg); + +/** Adjust 'bucket' to respect 'cfg', and note that it was last updated in + * 'current_tick'. If 'reinitialize' is true, we are changing the + * configuration of 'bucket'; otherwise, we are setting it up for the first + * time. + */ +int ev_token_bucket_init_(struct ev_token_bucket *bucket, + const struct ev_token_bucket_cfg *cfg, + ev_uint32_t current_tick, + int reinitialize); + +int bufferevent_remove_from_rate_limit_group_internal_(struct bufferevent *bev, + int unsuspend); + +/** Decrease the read limit of 'b' by 'n' bytes */ +#define ev_token_bucket_decrement_read(b,n) \ + do { \ + (b)->read_limit -= (n); \ + } while (0) +/** Decrease the write limit of 'b' by 'n' bytes */ +#define ev_token_bucket_decrement_write(b,n) \ + do { \ + (b)->write_limit -= (n); \ + } while (0) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/sample/dns-example.c b/probe-busybox/libevent-2.1.11-stable/sample/dns-example.c new file mode 100644 index 00000000..21a75de8 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/dns-example.c @@ -0,0 +1,264 @@ +/* + This example code shows how to use the high-level, low-level, and + server-level interfaces of evdns. + + XXX It's pretty ugly and should probably be cleaned up. + */ + +#include + +/* Compatibility for possible missing IPv6 declarations */ +#include "../ipv6-internal.h" + +#include + +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif + +#ifdef _WIN32 +#include +#include +#include +#else +#include +#include +#include +#endif + +#include +#include +#include +#include + +#ifdef EVENT__HAVE_NETINET_IN6_H +#include +#endif + +#include +#include +#include + +#define u32 ev_uint32_t +#define u8 ev_uint8_t + +static const char * +debug_ntoa(u32 address) +{ + static char buf[32]; + u32 a = ntohl(address); + evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d", + (int)(u8)((a>>24)&0xff), + (int)(u8)((a>>16)&0xff), + (int)(u8)((a>>8 )&0xff), + (int)(u8)((a )&0xff)); + return buf; +} + +static void +main_callback(int result, char type, int count, int ttl, + void *addrs, void *orig) { + char *n = (char*)orig; + int i; + for (i = 0; i < count; ++i) { + if (type == DNS_IPv4_A) { + printf("%s: %s\n", n, debug_ntoa(((u32*)addrs)[i])); + } else if (type == DNS_PTR) { + printf("%s: %s\n", n, ((char**)addrs)[i]); + } + } + if (!count) { + printf("%s: No answer (%d)\n", n, result); + } + fflush(stdout); +} + +static void +gai_callback(int err, struct evutil_addrinfo *ai, void *arg) +{ + const char *name = arg; + int i; + struct evutil_addrinfo *first_ai = ai; + + if (err) { + printf("%s: %s\n", name, evutil_gai_strerror(err)); + } + if (ai && ai->ai_canonname) + printf(" %s ==> %s\n", name, ai->ai_canonname); + for (i=0; ai; ai = ai->ai_next, ++i) { + char buf[128]; + if (ai->ai_family == PF_INET) { + struct sockaddr_in *sin = + (struct sockaddr_in*)ai->ai_addr; + evutil_inet_ntop(AF_INET, &sin->sin_addr, buf, + sizeof(buf)); + printf("[%d] %s: %s\n",i,name,buf); + } else { + struct sockaddr_in6 *sin6 = + (struct sockaddr_in6*)ai->ai_addr; + evutil_inet_ntop(AF_INET6, &sin6->sin6_addr, buf, + sizeof(buf)); + printf("[%d] %s: %s\n",i,name,buf); + } + } + + if (first_ai) + evutil_freeaddrinfo(first_ai); +} + +static void +evdns_server_callback(struct evdns_server_request *req, void *data) +{ + int i, r; + (void)data; + /* dummy; give 192.168.11.11 as an answer for all A questions, + * give foo.bar.example.com as an answer for all PTR questions. */ + for (i = 0; i < req->nquestions; ++i) { + u32 ans = htonl(0xc0a80b0bUL); + if (req->questions[i]->type == EVDNS_TYPE_A && + req->questions[i]->dns_question_class == EVDNS_CLASS_INET) { + printf(" -- replying for %s (A)\n", req->questions[i]->name); + r = evdns_server_request_add_a_reply(req, req->questions[i]->name, + 1, &ans, 10); + if (r<0) + printf("eeep, didn't work.\n"); + } else if (req->questions[i]->type == EVDNS_TYPE_PTR && + req->questions[i]->dns_question_class == EVDNS_CLASS_INET) { + printf(" -- replying for %s (PTR)\n", req->questions[i]->name); + r = evdns_server_request_add_ptr_reply(req, NULL, req->questions[i]->name, + "foo.bar.example.com", 10); + if (r<0) + printf("ugh, no luck"); + } else { + printf(" -- skipping %s [%d %d]\n", req->questions[i]->name, + req->questions[i]->type, req->questions[i]->dns_question_class); + } + } + + r = evdns_server_request_respond(req, 0); + if (r<0) + printf("eeek, couldn't send reply.\n"); +} + +static int verbose = 0; + +static void +logfn(int is_warn, const char *msg) { + if (!is_warn && !verbose) + return; + fprintf(stderr, "%s: %s\n", is_warn?"WARN":"INFO", msg); +} + +int +main(int c, char **v) { + struct options { + int reverse; + int use_getaddrinfo; + int servertest; + const char *resolv_conf; + const char *ns; + }; + struct options o; + int opt; + struct event_base *event_base = NULL; + struct evdns_base *evdns_base = NULL; + + memset(&o, 0, sizeof(o)); + + if (c < 2) { + fprintf(stderr, "syntax: %s [-x] [-v] [-c resolv.conf] [-s ns] hostname\n", v[0]); + fprintf(stderr, "syntax: %s [-T]\n", v[0]); + return 1; + } + + while ((opt = getopt(c, v, "xvc:Ts:g")) != -1) { + switch (opt) { + case 'x': o.reverse = 1; break; + case 'v': ++verbose; break; + case 'g': o.use_getaddrinfo = 1; break; + case 'T': o.servertest = 1; break; + case 'c': o.resolv_conf = optarg; break; + case 's': o.ns = optarg; break; + default : fprintf(stderr, "Unknown option %c\n", opt); break; + } + } + +#ifdef _WIN32 + { + WSADATA WSAData; + WSAStartup(0x101, &WSAData); + } +#endif + + event_base = event_base_new(); + evdns_base = evdns_base_new(event_base, EVDNS_BASE_DISABLE_WHEN_INACTIVE); + evdns_set_log_fn(logfn); + + if (o.servertest) { + evutil_socket_t sock; + struct sockaddr_in my_addr; + sock = socket(PF_INET, SOCK_DGRAM, 0); + if (sock == -1) { + perror("socket"); + exit(1); + } + evutil_make_socket_nonblocking(sock); + my_addr.sin_family = AF_INET; + my_addr.sin_port = htons(10053); + my_addr.sin_addr.s_addr = INADDR_ANY; + if (bind(sock, (struct sockaddr*)&my_addr, sizeof(my_addr))<0) { + perror("bind"); + exit(1); + } + evdns_add_server_port_with_base(event_base, sock, 0, evdns_server_callback, NULL); + } + if (optind < c) { + int res; +#ifdef _WIN32 + if (o.resolv_conf == NULL && !o.ns) + res = evdns_base_config_windows_nameservers(evdns_base); + else +#endif + if (o.ns) + res = evdns_base_nameserver_ip_add(evdns_base, o.ns); + else + res = evdns_base_resolv_conf_parse(evdns_base, + DNS_OPTION_NAMESERVERS, o.resolv_conf); + + if (res < 0) { + fprintf(stderr, "Couldn't configure nameservers"); + return 1; + } + } + + printf("EVUTIL_AI_CANONNAME in example = %d\n", EVUTIL_AI_CANONNAME); + for (; optind < c; ++optind) { + if (o.reverse) { + struct in_addr addr; + if (evutil_inet_pton(AF_INET, v[optind], &addr)!=1) { + fprintf(stderr, "Skipping non-IP %s\n", v[optind]); + continue; + } + fprintf(stderr, "resolving %s...\n",v[optind]); + evdns_base_resolve_reverse(evdns_base, &addr, 0, main_callback, v[optind]); + } else if (o.use_getaddrinfo) { + struct evutil_addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = PF_UNSPEC; + hints.ai_protocol = IPPROTO_TCP; + hints.ai_flags = EVUTIL_AI_CANONNAME; + fprintf(stderr, "resolving (fwd) %s...\n",v[optind]); + evdns_getaddrinfo(evdns_base, v[optind], NULL, &hints, + gai_callback, v[optind]); + } else { + fprintf(stderr, "resolving (fwd) %s...\n",v[optind]); + evdns_base_resolve_ipv4(evdns_base, v[optind], 0, main_callback, v[optind]); + } + } + fflush(stdout); + event_base_dispatch(event_base); + evdns_base_free(evdns_base, 1); + event_base_free(event_base); + return 0; +} + diff --git a/probe-busybox/libevent-2.1.11-stable/sample/event-read-fifo.c b/probe-busybox/libevent-2.1.11-stable/sample/event-read-fifo.c new file mode 100644 index 00000000..27b0b530 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/event-read-fifo.c @@ -0,0 +1,162 @@ +/* + * This sample code shows how to use Libevent to read from a named pipe. + * XXX This code could make better use of the Libevent interfaces. + * + * XXX This does not work on Windows; ignore everything inside the _WIN32 block. + * + * On UNIX, compile with: + * cc -I/usr/local/include -o event-read-fifo event-read-fifo.c \ + * -L/usr/local/lib -levent + */ + +#include + +#include +#include +#ifndef _WIN32 +#include +#include +#include +#include +#else +#include +#include +#endif +#include +#include +#include +#include +#include + +#include + +static void +fifo_read(evutil_socket_t fd, short event, void *arg) +{ + char buf[255]; + int len; + struct event *ev = arg; +#ifdef _WIN32 + DWORD dwBytesRead; +#endif + + fprintf(stderr, "fifo_read called with fd: %d, event: %d, arg: %p\n", + (int)fd, event, arg); +#ifdef _WIN32 + len = ReadFile((HANDLE)fd, buf, sizeof(buf) - 1, &dwBytesRead, NULL); + + /* Check for end of file. */ + if (len && dwBytesRead == 0) { + fprintf(stderr, "End Of File"); + event_del(ev); + return; + } + + buf[dwBytesRead] = '\0'; +#else + len = read(fd, buf, sizeof(buf) - 1); + + if (len <= 0) { + if (len == -1) + perror("read"); + else if (len == 0) + fprintf(stderr, "Connection closed\n"); + event_del(ev); + event_base_loopbreak(event_get_base(ev)); + return; + } + + buf[len] = '\0'; +#endif + fprintf(stdout, "Read: %s\n", buf); +} + +/* On Unix, cleanup event.fifo if SIGINT is received. */ +#ifndef _WIN32 +static void +signal_cb(evutil_socket_t fd, short event, void *arg) +{ + struct event_base *base = arg; + event_base_loopbreak(base); +} +#endif + +int +main(int argc, char **argv) +{ + struct event *evfifo; + struct event_base* base; +#ifdef _WIN32 + HANDLE socket; + /* Open a file. */ + socket = CreateFileA("test.txt", /* open File */ + GENERIC_READ, /* open for reading */ + 0, /* do not share */ + NULL, /* no security */ + OPEN_EXISTING, /* existing file only */ + FILE_ATTRIBUTE_NORMAL, /* normal file */ + NULL); /* no attr. template */ + + if (socket == INVALID_HANDLE_VALUE) + return 1; + +#else + struct event *signal_int; + struct stat st; + const char *fifo = "event.fifo"; + int socket; + + if (lstat(fifo, &st) == 0) { + if ((st.st_mode & S_IFMT) == S_IFREG) { + errno = EEXIST; + perror("lstat"); + exit(1); + } + } + + unlink(fifo); + if (mkfifo(fifo, 0600) == -1) { + perror("mkfifo"); + exit(1); + } + + socket = open(fifo, O_RDONLY | O_NONBLOCK, 0); + + if (socket == -1) { + perror("open"); + exit(1); + } + + fprintf(stderr, "Write data to %s\n", fifo); +#endif + /* Initalize the event library */ + base = event_base_new(); + + /* Initalize one event */ +#ifdef _WIN32 + evfifo = event_new(base, (evutil_socket_t)socket, EV_READ|EV_PERSIST, fifo_read, + event_self_cbarg()); +#else + /* catch SIGINT so that event.fifo can be cleaned up */ + signal_int = evsignal_new(base, SIGINT, signal_cb, base); + event_add(signal_int, NULL); + + evfifo = event_new(base, socket, EV_READ|EV_PERSIST, fifo_read, + event_self_cbarg()); +#endif + + /* Add it to the active events, without a timeout */ + event_add(evfifo, NULL); + + event_base_dispatch(base); + event_base_free(base); +#ifdef _WIN32 + CloseHandle(socket); +#else + close(socket); + unlink(fifo); +#endif + libevent_global_shutdown(); + return (0); +} + diff --git a/probe-busybox/libevent-2.1.11-stable/sample/hello-world.c b/probe-busybox/libevent-2.1.11-stable/sample/hello-world.c new file mode 100644 index 00000000..2023cd6c --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/hello-world.c @@ -0,0 +1,141 @@ +/* + This example program provides a trivial server program that listens for TCP + connections on port 9995. When they arrive, it writes a short message to + each client connection, and closes each connection once it is flushed. + + Where possible, it exits cleanly in response to a SIGINT (ctrl-c). +*/ + + +#include +#include +#include +#include +#ifndef _WIN32 +#include +# ifdef _XOPEN_SOURCE_EXTENDED +# include +# endif +#include +#endif + +#include +#include +#include +#include +#include + +static const char MESSAGE[] = "Hello, World!\n"; + +static const int PORT = 9995; + +static void listener_cb(struct evconnlistener *, evutil_socket_t, + struct sockaddr *, int socklen, void *); +static void conn_writecb(struct bufferevent *, void *); +static void conn_eventcb(struct bufferevent *, short, void *); +static void signal_cb(evutil_socket_t, short, void *); + +int +main(int argc, char **argv) +{ + struct event_base *base; + struct evconnlistener *listener; + struct event *signal_event; + + struct sockaddr_in sin; +#ifdef _WIN32 + WSADATA wsa_data; + WSAStartup(0x0201, &wsa_data); +#endif + + base = event_base_new(); + if (!base) { + fprintf(stderr, "Could not initialize libevent!\n"); + return 1; + } + + memset(&sin, 0, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_port = htons(PORT); + + listener = evconnlistener_new_bind(base, listener_cb, (void *)base, + LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_FREE, -1, + (struct sockaddr*)&sin, + sizeof(sin)); + + if (!listener) { + fprintf(stderr, "Could not create a listener!\n"); + return 1; + } + + signal_event = evsignal_new(base, SIGINT, signal_cb, (void *)base); + + if (!signal_event || event_add(signal_event, NULL)<0) { + fprintf(stderr, "Could not create/add a signal event!\n"); + return 1; + } + + event_base_dispatch(base); + + evconnlistener_free(listener); + event_free(signal_event); + event_base_free(base); + + printf("done\n"); + return 0; +} + +static void +listener_cb(struct evconnlistener *listener, evutil_socket_t fd, + struct sockaddr *sa, int socklen, void *user_data) +{ + struct event_base *base = user_data; + struct bufferevent *bev; + + bev = bufferevent_socket_new(base, fd, BEV_OPT_CLOSE_ON_FREE); + if (!bev) { + fprintf(stderr, "Error constructing bufferevent!"); + event_base_loopbreak(base); + return; + } + bufferevent_setcb(bev, NULL, conn_writecb, conn_eventcb, NULL); + bufferevent_enable(bev, EV_WRITE); + bufferevent_disable(bev, EV_READ); + + bufferevent_write(bev, MESSAGE, strlen(MESSAGE)); +} + +static void +conn_writecb(struct bufferevent *bev, void *user_data) +{ + struct evbuffer *output = bufferevent_get_output(bev); + if (evbuffer_get_length(output) == 0) { + printf("flushed answer\n"); + bufferevent_free(bev); + } +} + +static void +conn_eventcb(struct bufferevent *bev, short events, void *user_data) +{ + if (events & BEV_EVENT_EOF) { + printf("Connection closed.\n"); + } else if (events & BEV_EVENT_ERROR) { + printf("Got an error on the connection: %s\n", + strerror(errno));/*XXX win32*/ + } + /* None of the other events can happen here, since we haven't enabled + * timeouts */ + bufferevent_free(bev); +} + +static void +signal_cb(evutil_socket_t sig, short events, void *user_data) +{ + struct event_base *base = user_data; + struct timeval delay = { 2, 0 }; + + printf("Caught an interrupt signal; exiting cleanly in two seconds.\n"); + + event_base_loopexit(base, &delay); +} diff --git a/probe-busybox/libevent-2.1.11-stable/sample/hostcheck.c b/probe-busybox/libevent-2.1.11-stable/sample/hostcheck.c new file mode 100644 index 00000000..50709369 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/hostcheck.c @@ -0,0 +1,217 @@ +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at http://curl.haxx.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ***************************************************************************/ + +/* This file is an amalgamation of hostcheck.c and most of rawstr.c + from cURL. The contents of the COPYING file mentioned above are: + +COPYRIGHT AND PERMISSION NOTICE + +Copyright (c) 1996 - 2013, Daniel Stenberg, . + +All rights reserved. + +Permission to use, copy, modify, and distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright +notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN +NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the name of a copyright holder shall not +be used in advertising or otherwise to promote the sale, use or other dealings +in this Software without prior written authorization of the copyright holder. +*/ + +#include "hostcheck.h" +#include + +/* Portable, consistent toupper (remember EBCDIC). Do not use toupper() because + its behavior is altered by the current locale. */ +static char Curl_raw_toupper(char in) +{ + switch (in) { + case 'a': + return 'A'; + case 'b': + return 'B'; + case 'c': + return 'C'; + case 'd': + return 'D'; + case 'e': + return 'E'; + case 'f': + return 'F'; + case 'g': + return 'G'; + case 'h': + return 'H'; + case 'i': + return 'I'; + case 'j': + return 'J'; + case 'k': + return 'K'; + case 'l': + return 'L'; + case 'm': + return 'M'; + case 'n': + return 'N'; + case 'o': + return 'O'; + case 'p': + return 'P'; + case 'q': + return 'Q'; + case 'r': + return 'R'; + case 's': + return 'S'; + case 't': + return 'T'; + case 'u': + return 'U'; + case 'v': + return 'V'; + case 'w': + return 'W'; + case 'x': + return 'X'; + case 'y': + return 'Y'; + case 'z': + return 'Z'; + } + return in; +} + +/* + * Curl_raw_equal() is for doing "raw" case insensitive strings. This is meant + * to be locale independent and only compare strings we know are safe for + * this. See http://daniel.haxx.se/blog/2008/10/15/strcasecmp-in-turkish/ for + * some further explanation to why this function is necessary. + * + * The function is capable of comparing a-z case insensitively even for + * non-ascii. + */ + +static int Curl_raw_equal(const char *first, const char *second) +{ + while(*first && *second) { + if(Curl_raw_toupper(*first) != Curl_raw_toupper(*second)) + /* get out of the loop as soon as they don't match */ + break; + first++; + second++; + } + /* we do the comparison here (possibly again), just to make sure that if the + loop above is skipped because one of the strings reached zero, we must not + return this as a successful match */ + return (Curl_raw_toupper(*first) == Curl_raw_toupper(*second)); +} + +static int Curl_raw_nequal(const char *first, const char *second, size_t max) +{ + while(*first && *second && max) { + if(Curl_raw_toupper(*first) != Curl_raw_toupper(*second)) { + break; + } + max--; + first++; + second++; + } + if(0 == max) + return 1; /* they are equal this far */ + + return Curl_raw_toupper(*first) == Curl_raw_toupper(*second); +} + +/* + * Match a hostname against a wildcard pattern. + * E.g. + * "foo.host.com" matches "*.host.com". + * + * We use the matching rule described in RFC6125, section 6.4.3. + * http://tools.ietf.org/html/rfc6125#section-6.4.3 + */ + +static int hostmatch(const char *hostname, const char *pattern) +{ + const char *pattern_label_end, *pattern_wildcard, *hostname_label_end; + int wildcard_enabled; + size_t prefixlen, suffixlen; + pattern_wildcard = strchr(pattern, '*'); + if(pattern_wildcard == NULL) + return Curl_raw_equal(pattern, hostname) ? + CURL_HOST_MATCH : CURL_HOST_NOMATCH; + + /* We require at least 2 dots in pattern to avoid too wide wildcard + match. */ + wildcard_enabled = 1; + pattern_label_end = strchr(pattern, '.'); + if(pattern_label_end == NULL || strchr(pattern_label_end+1, '.') == NULL || + pattern_wildcard > pattern_label_end || + Curl_raw_nequal(pattern, "xn--", 4)) { + wildcard_enabled = 0; + } + if(!wildcard_enabled) + return Curl_raw_equal(pattern, hostname) ? + CURL_HOST_MATCH : CURL_HOST_NOMATCH; + + hostname_label_end = strchr(hostname, '.'); + if(hostname_label_end == NULL || + !Curl_raw_equal(pattern_label_end, hostname_label_end)) + return CURL_HOST_NOMATCH; + + /* The wildcard must match at least one character, so the left-most + label of the hostname is at least as large as the left-most label + of the pattern. */ + if(hostname_label_end - hostname < pattern_label_end - pattern) + return CURL_HOST_NOMATCH; + + prefixlen = pattern_wildcard - pattern; + suffixlen = pattern_label_end - (pattern_wildcard+1); + return Curl_raw_nequal(pattern, hostname, prefixlen) && + Curl_raw_nequal(pattern_wildcard+1, hostname_label_end - suffixlen, + suffixlen) ? + CURL_HOST_MATCH : CURL_HOST_NOMATCH; +} + +int Curl_cert_hostcheck(const char *match_pattern, const char *hostname) +{ + if(!match_pattern || !*match_pattern || + !hostname || !*hostname) /* sanity check */ + return 0; + + if(Curl_raw_equal(hostname, match_pattern)) /* trivial case */ + return 1; + + if(hostmatch(hostname,match_pattern) == CURL_HOST_MATCH) + return 1; + return 0; +} diff --git a/probe-busybox/libevent-2.1.11-stable/sample/hostcheck.h b/probe-busybox/libevent-2.1.11-stable/sample/hostcheck.h new file mode 100644 index 00000000..f40bc434 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/hostcheck.h @@ -0,0 +1,30 @@ +#ifndef HEADER_CURL_HOSTCHECK_H +#define HEADER_CURL_HOSTCHECK_H +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at http://curl.haxx.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ***************************************************************************/ + +#define CURL_HOST_NOMATCH 0 +#define CURL_HOST_MATCH 1 +int Curl_cert_hostcheck(const char *match_pattern, const char *hostname); + +#endif /* HEADER_CURL_HOSTCHECK_H */ + diff --git a/probe-busybox/libevent-2.1.11-stable/sample/http-connect.c b/probe-busybox/libevent-2.1.11-stable/sample/http-connect.c new file mode 100644 index 00000000..af2c86a8 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/http-connect.c @@ -0,0 +1,121 @@ +#include "event2/event-config.h" + +#include +#include +#include +#include +#include +#include +#include + +#define VERIFY(cond) do { \ + if (!(cond)) { \ + fprintf(stderr, "[error] %s\n", #cond); \ + exit(EXIT_FAILURE); \ + } \ +} while (0); \ + +#define URL_MAX 4096 + +struct connect_base +{ + struct evhttp_connection *evcon; + struct evhttp_uri *location; +}; + +static void get_cb(struct evhttp_request *req, void *arg) +{ + ev_ssize_t len; + struct evbuffer *evbuf; + struct evhttp_connection *evcon; + + VERIFY(req); + evcon = evhttp_request_get_connection(req); + VERIFY(evcon); + + evbuf = evhttp_request_get_input_buffer(req); + len = evbuffer_get_length(evbuf); + fwrite(evbuffer_pullup(evbuf, len), len, 1, stdout); + evbuffer_drain(evbuf, len); +} + +static void connect_cb(struct evhttp_request *proxy_req, void *arg) +{ + char buffer[URL_MAX]; + + struct connect_base *base = arg; + struct evhttp_connection *evcon = base->evcon; + struct evhttp_uri *location = base->location; + + VERIFY(proxy_req); + if (evcon) { + struct evhttp_request *req = evhttp_request_new(get_cb, NULL); + evhttp_add_header(req->output_headers, "Connection", "close"); + VERIFY(!evhttp_make_request(evcon, req, EVHTTP_REQ_GET, + evhttp_uri_join(location, buffer, URL_MAX))); + } +} + +int main(int argc, const char **argv) +{ + char buffer[URL_MAX]; + + struct evhttp_uri *host_port; + struct evhttp_uri *location; + struct evhttp_uri *proxy; + + struct event_base *base; + struct evhttp_connection *evcon; + struct evhttp_request *req; + + struct connect_base connect_base; + + if (argc != 3) { + printf("Usage: %s proxy url\n", argv[0]); + return 1; + } + + { + VERIFY(proxy = evhttp_uri_parse(argv[1])); + VERIFY(evhttp_uri_get_host(proxy)); + VERIFY(evhttp_uri_get_port(proxy) > 0); + } + { + host_port = evhttp_uri_parse(argv[2]); + evhttp_uri_set_scheme(host_port, NULL); + evhttp_uri_set_userinfo(host_port, NULL); + evhttp_uri_set_path(host_port, NULL); + evhttp_uri_set_query(host_port, NULL); + evhttp_uri_set_fragment(host_port, NULL); + VERIFY(evhttp_uri_get_host(host_port)); + VERIFY(evhttp_uri_get_port(host_port) > 0); + } + { + location = evhttp_uri_parse(argv[2]); + evhttp_uri_set_scheme(location, NULL); + evhttp_uri_set_userinfo(location, 0); + evhttp_uri_set_host(location, NULL); + evhttp_uri_set_port(location, -1); + } + + VERIFY(base = event_base_new()); + VERIFY(evcon = evhttp_connection_base_new(base, NULL, + evhttp_uri_get_host(proxy), evhttp_uri_get_port(proxy))); + connect_base.evcon = evcon; + connect_base.location = location; + VERIFY(req = evhttp_request_new(connect_cb, &connect_base)); + + evhttp_add_header(req->output_headers, "Connection", "keep-alive"); + evhttp_add_header(req->output_headers, "Proxy-Connection", "keep-alive"); + evutil_snprintf(buffer, URL_MAX, "%s:%d", + evhttp_uri_get_host(host_port), evhttp_uri_get_port(host_port)); + evhttp_make_request(evcon, req, EVHTTP_REQ_CONNECT, buffer); + + event_base_dispatch(base); + evhttp_connection_free(evcon); + event_base_free(base); + evhttp_uri_free(proxy); + evhttp_uri_free(host_port); + evhttp_uri_free(location); + return 0; +} diff --git a/probe-busybox/libevent-2.1.11-stable/sample/http-server.c b/probe-busybox/libevent-2.1.11-stable/sample/http-server.c new file mode 100644 index 00000000..cedb2af8 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/http-server.c @@ -0,0 +1,580 @@ +/* + A trivial static http webserver using Libevent's evhttp. + + This is not the best code in the world, and it does some fairly stupid stuff + that you would never want to do in a production webserver. Caveat hackor! + + */ + +/* Compatibility for possible missing IPv6 declarations */ +#include "../util-internal.h" + +#include +#include +#include + +#include +#include + +#ifdef _WIN32 +#include +#include +#include +#include +#include +#include +#ifndef S_ISDIR +#define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR) +#endif +#else /* !_WIN32 */ +#include +#include +#include +#include +#include +#endif /* _WIN32 */ +#include + +#ifdef EVENT__HAVE_SYS_UN_H +#include +#endif +#ifdef EVENT__HAVE_AFUNIX_H +#include +#endif + +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#endif /* _WIN32 */ + +#ifdef EVENT__HAVE_NETINET_IN_H +#include +# ifdef _XOPEN_SOURCE_EXTENDED +# include +# endif +#endif + +#ifdef _WIN32 +#ifndef stat +#define stat _stat +#endif +#ifndef fstat +#define fstat _fstat +#endif +#ifndef open +#define open _open +#endif +#ifndef close +#define close _close +#endif +#ifndef O_RDONLY +#define O_RDONLY _O_RDONLY +#endif +#endif /* _WIN32 */ + +char uri_root[512]; + +static const struct table_entry { + const char *extension; + const char *content_type; +} content_type_table[] = { + { "txt", "text/plain" }, + { "c", "text/plain" }, + { "h", "text/plain" }, + { "html", "text/html" }, + { "htm", "text/htm" }, + { "css", "text/css" }, + { "gif", "image/gif" }, + { "jpg", "image/jpeg" }, + { "jpeg", "image/jpeg" }, + { "png", "image/png" }, + { "pdf", "application/pdf" }, + { "ps", "application/postscript" }, + { NULL, NULL }, +}; + +struct options +{ + int port; + int iocp; + int verbose; + + int unlink; + const char *unixsock; +}; + +/* Try to guess a good content-type for 'path' */ +static const char * +guess_content_type(const char *path) +{ + const char *last_period, *extension; + const struct table_entry *ent; + last_period = strrchr(path, '.'); + if (!last_period || strchr(last_period, '/')) + goto not_found; /* no exension */ + extension = last_period + 1; + for (ent = &content_type_table[0]; ent->extension; ++ent) { + if (!evutil_ascii_strcasecmp(ent->extension, extension)) + return ent->content_type; + } + +not_found: + return "application/misc"; +} + +/* Callback used for the /dump URI, and for every non-GET request: + * dumps all information to stdout and gives back a trivial 200 ok */ +static void +dump_request_cb(struct evhttp_request *req, void *arg) +{ + const char *cmdtype; + struct evkeyvalq *headers; + struct evkeyval *header; + struct evbuffer *buf; + + switch (evhttp_request_get_command(req)) { + case EVHTTP_REQ_GET: cmdtype = "GET"; break; + case EVHTTP_REQ_POST: cmdtype = "POST"; break; + case EVHTTP_REQ_HEAD: cmdtype = "HEAD"; break; + case EVHTTP_REQ_PUT: cmdtype = "PUT"; break; + case EVHTTP_REQ_DELETE: cmdtype = "DELETE"; break; + case EVHTTP_REQ_OPTIONS: cmdtype = "OPTIONS"; break; + case EVHTTP_REQ_TRACE: cmdtype = "TRACE"; break; + case EVHTTP_REQ_CONNECT: cmdtype = "CONNECT"; break; + case EVHTTP_REQ_PATCH: cmdtype = "PATCH"; break; + default: cmdtype = "unknown"; break; + } + + printf("Received a %s request for %s\nHeaders:\n", + cmdtype, evhttp_request_get_uri(req)); + + headers = evhttp_request_get_input_headers(req); + for (header = headers->tqh_first; header; + header = header->next.tqe_next) { + printf(" %s: %s\n", header->key, header->value); + } + + buf = evhttp_request_get_input_buffer(req); + puts("Input data: <<<"); + while (evbuffer_get_length(buf)) { + int n; + char cbuf[128]; + n = evbuffer_remove(buf, cbuf, sizeof(cbuf)); + if (n > 0) + (void) fwrite(cbuf, 1, n, stdout); + } + puts(">>>"); + + evhttp_send_reply(req, 200, "OK", NULL); +} + +/* This callback gets invoked when we get any http request that doesn't match + * any other callback. Like any evhttp server callback, it has a simple job: + * it must eventually call evhttp_send_error() or evhttp_send_reply(). + */ +static void +send_document_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *evb = NULL; + const char *docroot = arg; + const char *uri = evhttp_request_get_uri(req); + struct evhttp_uri *decoded = NULL; + const char *path; + char *decoded_path; + char *whole_path = NULL; + size_t len; + int fd = -1; + struct stat st; + + if (evhttp_request_get_command(req) != EVHTTP_REQ_GET) { + dump_request_cb(req, arg); + return; + } + + printf("Got a GET request for <%s>\n", uri); + + /* Decode the URI */ + decoded = evhttp_uri_parse(uri); + if (!decoded) { + printf("It's not a good URI. Sending BADREQUEST\n"); + evhttp_send_error(req, HTTP_BADREQUEST, 0); + return; + } + + /* Let's see what path the user asked for. */ + path = evhttp_uri_get_path(decoded); + if (!path) path = "/"; + + /* We need to decode it, to see what path the user really wanted. */ + decoded_path = evhttp_uridecode(path, 0, NULL); + if (decoded_path == NULL) + goto err; + /* Don't allow any ".."s in the path, to avoid exposing stuff outside + * of the docroot. This test is both overzealous and underzealous: + * it forbids aceptable paths like "/this/one..here", but it doesn't + * do anything to prevent symlink following." */ + if (strstr(decoded_path, "..")) + goto err; + + len = strlen(decoded_path)+strlen(docroot)+2; + if (!(whole_path = malloc(len))) { + perror("malloc"); + goto err; + } + evutil_snprintf(whole_path, len, "%s/%s", docroot, decoded_path); + + if (stat(whole_path, &st)<0) { + goto err; + } + + /* This holds the content we're sending. */ + evb = evbuffer_new(); + + if (S_ISDIR(st.st_mode)) { + /* If it's a directory, read the comments and make a little + * index page */ +#ifdef _WIN32 + HANDLE d; + WIN32_FIND_DATAA ent; + char *pattern; + size_t dirlen; +#else + DIR *d; + struct dirent *ent; +#endif + const char *trailing_slash = ""; + + if (!strlen(path) || path[strlen(path)-1] != '/') + trailing_slash = "/"; + +#ifdef _WIN32 + dirlen = strlen(whole_path); + pattern = malloc(dirlen+3); + memcpy(pattern, whole_path, dirlen); + pattern[dirlen] = '\\'; + pattern[dirlen+1] = '*'; + pattern[dirlen+2] = '\0'; + d = FindFirstFileA(pattern, &ent); + free(pattern); + if (d == INVALID_HANDLE_VALUE) + goto err; +#else + if (!(d = opendir(whole_path))) + goto err; +#endif + + evbuffer_add_printf(evb, + "\n" + "\n \n" + " \n" + " %s\n" + " \n" + " \n" + " \n" + "

%s

\n" + "
    \n", + decoded_path, /* XXX html-escape this. */ + path, /* XXX html-escape this? */ + trailing_slash, + decoded_path /* XXX html-escape this */); +#ifdef _WIN32 + do { + const char *name = ent.cFileName; +#else + while ((ent = readdir(d))) { + const char *name = ent->d_name; +#endif + evbuffer_add_printf(evb, + "
  • %s\n", + name, name);/* XXX escape this */ +#ifdef _WIN32 + } while (FindNextFileA(d, &ent)); +#else + } +#endif + evbuffer_add_printf(evb, "
\n"); +#ifdef _WIN32 + FindClose(d); +#else + closedir(d); +#endif + evhttp_add_header(evhttp_request_get_output_headers(req), + "Content-Type", "text/html"); + } else { + /* Otherwise it's a file; add it to the buffer to get + * sent via sendfile */ + const char *type = guess_content_type(decoded_path); + if ((fd = open(whole_path, O_RDONLY)) < 0) { + perror("open"); + goto err; + } + + if (fstat(fd, &st)<0) { + /* Make sure the length still matches, now that we + * opened the file :/ */ + perror("fstat"); + goto err; + } + evhttp_add_header(evhttp_request_get_output_headers(req), + "Content-Type", type); + evbuffer_add_file(evb, fd, 0, st.st_size); + } + + evhttp_send_reply(req, 200, "OK", evb); + goto done; +err: + evhttp_send_error(req, 404, "Document was not found"); + if (fd>=0) + close(fd); +done: + if (decoded) + evhttp_uri_free(decoded); + if (decoded_path) + free(decoded_path); + if (whole_path) + free(whole_path); + if (evb) + evbuffer_free(evb); +} + +static void +print_usage(FILE *out, const char *prog, int exit_code) +{ + fprintf(out, "Syntax: [ OPTS ] %s \n", prog); + fprintf(out, " -p - port\n"); + fprintf(out, " -U - bind to unix socket\n"); + fprintf(out, " -u - unlink unix socket before bind\n"); + fprintf(out, " -I - IOCP\n"); + fprintf(out, " -v - verbosity, enables libevent debug logging too\n"); + exit(exit_code); +} +static struct options +parse_opts(int argc, char **argv) +{ + struct options o; + int opt; + + memset(&o, 0, sizeof(o)); + + while ((opt = getopt(argc, argv, "hp:U:uIv")) != -1) { + switch (opt) { + case 'p': o.port = atoi(optarg); break; + case 'U': o.unixsock = optarg; break; + case 'u': o.unlink = 1; break; + case 'I': o.iocp = 1; break; + case 'v': ++o.verbose; break; + case 'h': print_usage(stdout, argv[0], 0); break; + default : fprintf(stderr, "Unknown option %c\n", opt); break; + } + } + + if (optind >= argc || (argc-optind) > 1) { + print_usage(stdout, argv[0], 1); + } + + return o; +} + +static void +do_term(int sig, short events, void *arg) +{ + struct event_base *base = arg; + event_base_loopbreak(base); + fprintf(stderr, "Got %i, Terminating\n", sig); +} + +static int +display_listen_sock(struct evhttp_bound_socket *handle) +{ + struct sockaddr_storage ss; + evutil_socket_t fd; + ev_socklen_t socklen = sizeof(ss); + char addrbuf[128]; + void *inaddr; + const char *addr; + int got_port = -1; + + fd = evhttp_bound_socket_get_fd(handle); + memset(&ss, 0, sizeof(ss)); + if (getsockname(fd, (struct sockaddr *)&ss, &socklen)) { + perror("getsockname() failed"); + return 1; + } + + if (ss.ss_family == AF_INET) { + got_port = ntohs(((struct sockaddr_in*)&ss)->sin_port); + inaddr = &((struct sockaddr_in*)&ss)->sin_addr; + } else if (ss.ss_family == AF_INET6) { + got_port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port); + inaddr = &((struct sockaddr_in6*)&ss)->sin6_addr; + } +#ifdef EVENT__HAVE_STRUCT_SOCKADDR_UN + else if (ss.ss_family == AF_UNIX) { + printf("Listening on <%s>\n", ((struct sockaddr_un*)&ss)->sun_path); + return 0; + } +#endif + else { + fprintf(stderr, "Weird address family %d\n", + ss.ss_family); + return 1; + } + + addr = evutil_inet_ntop(ss.ss_family, inaddr, addrbuf, + sizeof(addrbuf)); + if (addr) { + printf("Listening on %s:%d\n", addr, got_port); + evutil_snprintf(uri_root, sizeof(uri_root), + "http://%s:%d",addr,got_port); + } else { + fprintf(stderr, "evutil_inet_ntop failed\n"); + return 1; + } + + return 0; +} + +int +main(int argc, char **argv) +{ + struct event_config *cfg = NULL; + struct event_base *base = NULL; + struct evhttp *http = NULL; + struct evhttp_bound_socket *handle = NULL; + struct evconnlistener *lev = NULL; + struct event *term = NULL; + struct options o = parse_opts(argc, argv); + int ret = 0; + +#ifdef _WIN32 + { + WORD wVersionRequested; + WSADATA wsaData; + wVersionRequested = MAKEWORD(2, 2); + WSAStartup(wVersionRequested, &wsaData); + } +#else + if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) { + ret = 1; + goto err; + } +#endif + + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + /** Read env like in regress */ + if (o.verbose || getenv("EVENT_DEBUG_LOGGING_ALL")) + event_enable_debug_logging(EVENT_DBG_ALL); + + cfg = event_config_new(); +#ifdef _WIN32 + if (o.iocp) { +#ifdef EVTHREAD_USE_WINDOWS_THREADS_IMPLEMENTED + evthread_use_windows_threads(); + event_config_set_num_cpus_hint(cfg, 8); +#endif + event_config_set_flag(cfg, EVENT_BASE_FLAG_STARTUP_IOCP); + } +#endif + + base = event_base_new_with_config(cfg); + if (!base) { + fprintf(stderr, "Couldn't create an event_base: exiting\n"); + ret = 1; + } + event_config_free(cfg); + cfg = NULL; + + /* Create a new evhttp object to handle requests. */ + http = evhttp_new(base); + if (!http) { + fprintf(stderr, "couldn't create evhttp. Exiting.\n"); + ret = 1; + } + + /* The /dump URI will dump all requests to stdout and say 200 ok. */ + evhttp_set_cb(http, "/dump", dump_request_cb, NULL); + + /* We want to accept arbitrary requests, so we need to set a "generic" + * cb. We can also add callbacks for specific paths. */ + evhttp_set_gencb(http, send_document_cb, argv[1]); + + if (o.unixsock) { +#ifdef EVENT__HAVE_STRUCT_SOCKADDR_UN + struct sockaddr_un addr; + + if (o.unlink && (unlink(o.unixsock) && errno != ENOENT)) { + perror(o.unixsock); + ret = 1; + goto err; + } + + addr.sun_family = AF_UNIX; + strcpy(addr.sun_path, o.unixsock); + + lev = evconnlistener_new_bind(base, NULL, NULL, + LEV_OPT_CLOSE_ON_FREE, -1, + (struct sockaddr *)&addr, sizeof(addr)); + if (!lev) { + perror("Cannot create listener"); + ret = 1; + goto err; + } + + handle = evhttp_bind_listener(http, lev); + if (!handle) { + fprintf(stderr, "couldn't bind to %s. Exiting.\n", o.unixsock); + ret = 1; + goto err; + } +#else /* !EVENT__HAVE_STRUCT_SOCKADDR_UN */ + fprintf(stderr, "-U is not supported on this platform. Exiting.\n"); + ret = 1; + goto err; +#endif /* EVENT__HAVE_STRUCT_SOCKADDR_UN */ + } + else { + handle = evhttp_bind_socket_with_handle(http, "0.0.0.0", o.port); + if (!handle) { + fprintf(stderr, "couldn't bind to port %d. Exiting.\n", o.port); + ret = 1; + goto err; + } + } + + if (display_listen_sock(handle)) { + ret = 1; + goto err; + } + + term = evsignal_new(base, SIGINT, do_term, base); + if (!term) + goto err; + if (event_add(term, NULL)) + goto err; + + event_base_dispatch(base); + +#ifdef _WIN32 + WSACleanup(); +#endif + +err: + if (cfg) + event_config_free(cfg); + if (http) + evhttp_free(http); + if (term) + event_free(term); + if (base) + event_base_free(base); + + return ret; +} diff --git a/probe-busybox/libevent-2.1.11-stable/sample/https-client.c b/probe-busybox/libevent-2.1.11-stable/sample/https-client.c new file mode 100644 index 00000000..58e449b1 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/https-client.c @@ -0,0 +1,514 @@ +/* + This is an example of how to hook up evhttp with bufferevent_ssl + + It just GETs an https URL given on the command-line and prints the response + body to stdout. + + Actually, it also accepts plain http URLs to make it easy to compare http vs + https code paths. + + Loosely based on le-proxy.c. + */ + +// Get rid of OSX 10.7 and greater deprecation warnings. +#if defined(__APPLE__) && defined(__clang__) +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#include + +#define snprintf _snprintf +#define strcasecmp _stricmp +#else +#include +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "openssl_hostname_validation.h" + +static int ignore_cert = 0; + +static void +http_request_done(struct evhttp_request *req, void *ctx) +{ + char buffer[256]; + int nread; + + if (!req || !evhttp_request_get_response_code(req)) { + /* If req is NULL, it means an error occurred, but + * sadly we are mostly left guessing what the error + * might have been. We'll do our best... */ + struct bufferevent *bev = (struct bufferevent *) ctx; + unsigned long oslerr; + int printed_err = 0; + int errcode = EVUTIL_SOCKET_ERROR(); + fprintf(stderr, "some request failed - no idea which one though!\n"); + /* Print out the OpenSSL error queue that libevent + * squirreled away for us, if any. */ + while ((oslerr = bufferevent_get_openssl_error(bev))) { + ERR_error_string_n(oslerr, buffer, sizeof(buffer)); + fprintf(stderr, "%s\n", buffer); + printed_err = 1; + } + /* If the OpenSSL error queue was empty, maybe it was a + * socket error; let's try printing that. */ + if (! printed_err) + fprintf(stderr, "socket error = %s (%d)\n", + evutil_socket_error_to_string(errcode), + errcode); + return; + } + + fprintf(stderr, "Response line: %d %s\n", + evhttp_request_get_response_code(req), + evhttp_request_get_response_code_line(req)); + + while ((nread = evbuffer_remove(evhttp_request_get_input_buffer(req), + buffer, sizeof(buffer))) + > 0) { + /* These are just arbitrary chunks of 256 bytes. + * They are not lines, so we can't treat them as such. */ + fwrite(buffer, nread, 1, stdout); + } +} + +static void +syntax(void) +{ + fputs("Syntax:\n", stderr); + fputs(" https-client -url [-data data-file.bin] [-ignore-cert] [-retries num] [-timeout sec] [-crt crt]\n", stderr); + fputs("Example:\n", stderr); + fputs(" https-client -url https://ip.appspot.com/\n", stderr); +} + +static void +err(const char *msg) +{ + fputs(msg, stderr); +} + +static void +err_openssl(const char *func) +{ + fprintf (stderr, "%s failed:\n", func); + + /* This is the OpenSSL function that prints the contents of the + * error stack to the specified file handle. */ + ERR_print_errors_fp (stderr); + + exit(1); +} + +#ifndef _WIN32 +/* See http://archives.seul.org/libevent/users/Jan-2013/msg00039.html */ +static int cert_verify_callback(X509_STORE_CTX *x509_ctx, void *arg) +{ + char cert_str[256]; + const char *host = (const char *) arg; + const char *res_str = "X509_verify_cert failed"; + HostnameValidationResult res = Error; + + /* This is the function that OpenSSL would call if we hadn't called + * SSL_CTX_set_cert_verify_callback(). Therefore, we are "wrapping" + * the default functionality, rather than replacing it. */ + int ok_so_far = 0; + + X509 *server_cert = NULL; + + if (ignore_cert) { + return 1; + } + + ok_so_far = X509_verify_cert(x509_ctx); + + server_cert = X509_STORE_CTX_get_current_cert(x509_ctx); + + if (ok_so_far) { + res = validate_hostname(host, server_cert); + + switch (res) { + case MatchFound: + res_str = "MatchFound"; + break; + case MatchNotFound: + res_str = "MatchNotFound"; + break; + case NoSANPresent: + res_str = "NoSANPresent"; + break; + case MalformedCertificate: + res_str = "MalformedCertificate"; + break; + case Error: + res_str = "Error"; + break; + default: + res_str = "WTF!"; + break; + } + } + + X509_NAME_oneline(X509_get_subject_name (server_cert), + cert_str, sizeof (cert_str)); + + if (res == MatchFound) { + printf("https server '%s' has this certificate, " + "which looks good to me:\n%s\n", + host, cert_str); + return 1; + } else { + printf("Got '%s' for hostname '%s' and certificate:\n%s\n", + res_str, host, cert_str); + return 0; + } +} +#endif + +int +main(int argc, char **argv) +{ + int r; + struct event_base *base = NULL; + struct evhttp_uri *http_uri = NULL; + const char *url = NULL, *data_file = NULL; + const char *crt = NULL; + const char *scheme, *host, *path, *query; + char uri[256]; + int port; + int retries = 0; + int timeout = -1; + + SSL_CTX *ssl_ctx = NULL; + SSL *ssl = NULL; + struct bufferevent *bev; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req; + struct evkeyvalq *output_headers; + struct evbuffer *output_buffer; + + int i; + int ret = 0; + enum { HTTP, HTTPS } type = HTTP; + + for (i = 1; i < argc; i++) { + if (!strcmp("-url", argv[i])) { + if (i < argc - 1) { + url = argv[i + 1]; + } else { + syntax(); + goto error; + } + } else if (!strcmp("-crt", argv[i])) { + if (i < argc - 1) { + crt = argv[i + 1]; + } else { + syntax(); + goto error; + } + } else if (!strcmp("-ignore-cert", argv[i])) { + ignore_cert = 1; + } else if (!strcmp("-data", argv[i])) { + if (i < argc - 1) { + data_file = argv[i + 1]; + } else { + syntax(); + goto error; + } + } else if (!strcmp("-retries", argv[i])) { + if (i < argc - 1) { + retries = atoi(argv[i + 1]); + } else { + syntax(); + goto error; + } + } else if (!strcmp("-timeout", argv[i])) { + if (i < argc - 1) { + timeout = atoi(argv[i + 1]); + } else { + syntax(); + goto error; + } + } else if (!strcmp("-help", argv[i])) { + syntax(); + goto error; + } + } + + if (!url) { + syntax(); + goto error; + } + +#ifdef _WIN32 + { + WORD wVersionRequested; + WSADATA wsaData; + int err; + + wVersionRequested = MAKEWORD(2, 2); + + err = WSAStartup(wVersionRequested, &wsaData); + if (err != 0) { + printf("WSAStartup failed with error: %d\n", err); + goto error; + } + } +#endif // _WIN32 + + http_uri = evhttp_uri_parse(url); + if (http_uri == NULL) { + err("malformed url"); + goto error; + } + + scheme = evhttp_uri_get_scheme(http_uri); + if (scheme == NULL || (strcasecmp(scheme, "https") != 0 && + strcasecmp(scheme, "http") != 0)) { + err("url must be http or https"); + goto error; + } + + host = evhttp_uri_get_host(http_uri); + if (host == NULL) { + err("url must have a host"); + goto error; + } + + port = evhttp_uri_get_port(http_uri); + if (port == -1) { + port = (strcasecmp(scheme, "http") == 0) ? 80 : 443; + } + + path = evhttp_uri_get_path(http_uri); + if (strlen(path) == 0) { + path = "/"; + } + + query = evhttp_uri_get_query(http_uri); + if (query == NULL) { + snprintf(uri, sizeof(uri) - 1, "%s", path); + } else { + snprintf(uri, sizeof(uri) - 1, "%s?%s", path, query); + } + uri[sizeof(uri) - 1] = '\0'; + +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || \ + (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) + // Initialize OpenSSL + SSL_library_init(); + ERR_load_crypto_strings(); + SSL_load_error_strings(); + OpenSSL_add_all_algorithms(); +#endif + + /* This isn't strictly necessary... OpenSSL performs RAND_poll + * automatically on first use of random number generator. */ + r = RAND_poll(); + if (r == 0) { + err_openssl("RAND_poll"); + goto error; + } + + /* Create a new OpenSSL context */ + ssl_ctx = SSL_CTX_new(SSLv23_method()); + if (!ssl_ctx) { + err_openssl("SSL_CTX_new"); + goto error; + } + +#ifndef _WIN32 + /* TODO: Add certificate loading on Windows as well */ + + if (crt == NULL) { + X509_STORE *store; + /* Attempt to use the system's trusted root certificates. */ + store = SSL_CTX_get_cert_store(ssl_ctx); + if (X509_STORE_set_default_paths(store) != 1) { + err_openssl("X509_STORE_set_default_paths"); + goto error; + } + } else { + if (SSL_CTX_load_verify_locations(ssl_ctx, crt, NULL) != 1) { + err_openssl("SSL_CTX_load_verify_locations"); + goto error; + } + } + /* Ask OpenSSL to verify the server certificate. Note that this + * does NOT include verifying that the hostname is correct. + * So, by itself, this means anyone with any legitimate + * CA-issued certificate for any website, can impersonate any + * other website in the world. This is not good. See "The + * Most Dangerous Code in the World" article at + * https://crypto.stanford.edu/~dabo/pubs/abstracts/ssl-client-bugs.html + */ + SSL_CTX_set_verify(ssl_ctx, SSL_VERIFY_PEER, NULL); + /* This is how we solve the problem mentioned in the previous + * comment. We "wrap" OpenSSL's validation routine in our + * own routine, which also validates the hostname by calling + * the code provided by iSECPartners. Note that even though + * the "Everything You've Always Wanted to Know About + * Certificate Validation With OpenSSL (But Were Afraid to + * Ask)" paper from iSECPartners says very explicitly not to + * call SSL_CTX_set_cert_verify_callback (at the bottom of + * page 2), what we're doing here is safe because our + * cert_verify_callback() calls X509_verify_cert(), which is + * OpenSSL's built-in routine which would have been called if + * we hadn't set the callback. Therefore, we're just + * "wrapping" OpenSSL's routine, not replacing it. */ + SSL_CTX_set_cert_verify_callback(ssl_ctx, cert_verify_callback, + (void *) host); +#else // _WIN32 + (void)crt; +#endif // _WIN32 + + // Create event base + base = event_base_new(); + if (!base) { + perror("event_base_new()"); + goto error; + } + + // Create OpenSSL bufferevent and stack evhttp on top of it + ssl = SSL_new(ssl_ctx); + if (ssl == NULL) { + err_openssl("SSL_new()"); + goto error; + } + + #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME + // Set hostname for SNI extension + SSL_set_tlsext_host_name(ssl, host); + #endif + + if (strcasecmp(scheme, "http") == 0) { + bev = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE); + } else { + type = HTTPS; + bev = bufferevent_openssl_socket_new(base, -1, ssl, + BUFFEREVENT_SSL_CONNECTING, + BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS); + } + + if (bev == NULL) { + fprintf(stderr, "bufferevent_openssl_socket_new() failed\n"); + goto error; + } + + bufferevent_openssl_set_allow_dirty_shutdown(bev, 1); + + // For simplicity, we let DNS resolution block. Everything else should be + // asynchronous though. + evcon = evhttp_connection_base_bufferevent_new(base, NULL, bev, + host, port); + if (evcon == NULL) { + fprintf(stderr, "evhttp_connection_base_bufferevent_new() failed\n"); + goto error; + } + + if (retries > 0) { + evhttp_connection_set_retries(evcon, retries); + } + if (timeout >= 0) { + evhttp_connection_set_timeout(evcon, timeout); + } + + // Fire off the request + req = evhttp_request_new(http_request_done, bev); + if (req == NULL) { + fprintf(stderr, "evhttp_request_new() failed\n"); + goto error; + } + + output_headers = evhttp_request_get_output_headers(req); + evhttp_add_header(output_headers, "Host", host); + evhttp_add_header(output_headers, "Connection", "close"); + + if (data_file) { + /* NOTE: In production code, you'd probably want to use + * evbuffer_add_file() or evbuffer_add_file_segment(), to + * avoid needless copying. */ + FILE * f = fopen(data_file, "rb"); + char buf[1024]; + size_t s; + size_t bytes = 0; + + if (!f) { + syntax(); + goto error; + } + + output_buffer = evhttp_request_get_output_buffer(req); + while ((s = fread(buf, 1, sizeof(buf), f)) > 0) { + evbuffer_add(output_buffer, buf, s); + bytes += s; + } + evutil_snprintf(buf, sizeof(buf)-1, "%lu", (unsigned long)bytes); + evhttp_add_header(output_headers, "Content-Length", buf); + fclose(f); + } + + r = evhttp_make_request(evcon, req, data_file ? EVHTTP_REQ_POST : EVHTTP_REQ_GET, uri); + if (r != 0) { + fprintf(stderr, "evhttp_make_request() failed\n"); + goto error; + } + + event_base_dispatch(base); + goto cleanup; + +error: + ret = 1; +cleanup: + if (evcon) + evhttp_connection_free(evcon); + if (http_uri) + evhttp_uri_free(http_uri); + if (base) + event_base_free(base); + + if (ssl_ctx) + SSL_CTX_free(ssl_ctx); + if (type == HTTP && ssl) + SSL_free(ssl); +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || \ + (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) + EVP_cleanup(); + ERR_free_strings(); + +#if OPENSSL_VERSION_NUMBER < 0x10000000L + ERR_remove_state(0); +#else + ERR_remove_thread_state(NULL); +#endif + + CRYPTO_cleanup_all_ex_data(); + + sk_SSL_COMP_free(SSL_COMP_get_compression_methods()); +#endif /* (OPENSSL_VERSION_NUMBER < 0x10100000L) || \ + (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) */ + +#ifdef _WIN32 + WSACleanup(); +#endif + + return ret; +} diff --git a/probe-busybox/libevent-2.1.11-stable/sample/include.am b/probe-busybox/libevent-2.1.11-stable/sample/include.am new file mode 100644 index 00000000..cc003b78 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/include.am @@ -0,0 +1,53 @@ +# sample/include.am for libevent +# Copyright 2000-2007 Niels Provos +# Copyright 2007-2012 Niels Provos and Nick Mathewson +# +# See LICENSE for copying information. + +SAMPLES = \ + sample/dns-example \ + sample/event-read-fifo \ + sample/hello-world \ + sample/http-server \ + sample/http-connect \ + sample/signal-test \ + sample/time-test + +if OPENSSL +SAMPLES += sample/le-proxy +sample_le_proxy_SOURCES = sample/le-proxy.c +sample_le_proxy_LDADD = libevent.la libevent_openssl.la $(OPENSSL_LIBS) $(OPENSSL_LIBADD) +sample_le_proxy_CPPFLAGS = $(AM_CPPFLAGS) $(OPENSSL_INCS) + +SAMPLES += sample/https-client +sample_https_client_SOURCES = \ + sample/https-client.c \ + sample/hostcheck.c \ + sample/openssl_hostname_validation.c +sample_https_client_LDADD = libevent.la libevent_openssl.la $(OPENSSL_LIBS) $(OPENSSL_LIBADD) +sample_https_client_CPPFLAGS = $(AM_CPPFLAGS) $(OPENSSL_INCS) +noinst_HEADERS += \ + sample/hostcheck.h \ + sample/openssl_hostname_validation.h +endif + +if BUILD_SAMPLES +noinst_PROGRAMS += $(SAMPLES) +endif + +$(SAMPLES) : libevent.la + +sample_event_read_fifo_SOURCES = sample/event-read-fifo.c +sample_event_read_fifo_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la +sample_time_test_SOURCES = sample/time-test.c +sample_time_test_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la +sample_signal_test_SOURCES = sample/signal-test.c +sample_signal_test_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la +sample_dns_example_SOURCES = sample/dns-example.c +sample_dns_example_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la +sample_hello_world_SOURCES = sample/hello-world.c +sample_hello_world_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la +sample_http_server_SOURCES = sample/http-server.c +sample_http_server_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la +sample_http_connect_SOURCES = sample/http-connect.c +sample_http_connect_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la diff --git a/probe-busybox/libevent-2.1.11-stable/sample/le-proxy.c b/probe-busybox/libevent-2.1.11-stable/sample/le-proxy.c new file mode 100644 index 00000000..13e0e2ae --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/le-proxy.c @@ -0,0 +1,305 @@ +/* + This example code shows how to write an (optionally encrypting) SSL proxy + with Libevent's bufferevent layer. + + XXX It's a little ugly and should probably be cleaned up. + */ + +// Get rid of OSX 10.7 and greater deprecation warnings. +#if defined(__APPLE__) && defined(__clang__) +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#include +#else +#include +#include +#endif + +#include +#include +#include +#include +#include + +#include "util-internal.h" +#include +#include +#include +#include "openssl-compat.h" + +static struct event_base *base; +static struct sockaddr_storage listen_on_addr; +static struct sockaddr_storage connect_to_addr; +static int connect_to_addrlen; +static int use_wrapper = 1; + +static SSL_CTX *ssl_ctx = NULL; + +#define MAX_OUTPUT (512*1024) + +static void drained_writecb(struct bufferevent *bev, void *ctx); +static void eventcb(struct bufferevent *bev, short what, void *ctx); + +static void +readcb(struct bufferevent *bev, void *ctx) +{ + struct bufferevent *partner = ctx; + struct evbuffer *src, *dst; + size_t len; + src = bufferevent_get_input(bev); + len = evbuffer_get_length(src); + if (!partner) { + evbuffer_drain(src, len); + return; + } + dst = bufferevent_get_output(partner); + evbuffer_add_buffer(dst, src); + + if (evbuffer_get_length(dst) >= MAX_OUTPUT) { + /* We're giving the other side data faster than it can + * pass it on. Stop reading here until we have drained the + * other side to MAX_OUTPUT/2 bytes. */ + bufferevent_setcb(partner, readcb, drained_writecb, + eventcb, bev); + bufferevent_setwatermark(partner, EV_WRITE, MAX_OUTPUT/2, + MAX_OUTPUT); + bufferevent_disable(bev, EV_READ); + } +} + +static void +drained_writecb(struct bufferevent *bev, void *ctx) +{ + struct bufferevent *partner = ctx; + + /* We were choking the other side until we drained our outbuf a bit. + * Now it seems drained. */ + bufferevent_setcb(bev, readcb, NULL, eventcb, partner); + bufferevent_setwatermark(bev, EV_WRITE, 0, 0); + if (partner) + bufferevent_enable(partner, EV_READ); +} + +static void +close_on_finished_writecb(struct bufferevent *bev, void *ctx) +{ + struct evbuffer *b = bufferevent_get_output(bev); + + if (evbuffer_get_length(b) == 0) { + bufferevent_free(bev); + } +} + +static void +eventcb(struct bufferevent *bev, short what, void *ctx) +{ + struct bufferevent *partner = ctx; + + if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) { + if (what & BEV_EVENT_ERROR) { + unsigned long err; + while ((err = (bufferevent_get_openssl_error(bev)))) { + const char *msg = (const char*) + ERR_reason_error_string(err); + const char *lib = (const char*) + ERR_lib_error_string(err); + const char *func = (const char*) + ERR_func_error_string(err); + fprintf(stderr, + "%s in %s %s\n", msg, lib, func); + } + if (errno) + perror("connection error"); + } + + if (partner) { + /* Flush all pending data */ + readcb(bev, ctx); + + if (evbuffer_get_length( + bufferevent_get_output(partner))) { + /* We still have to flush data from the other + * side, but when that's done, close the other + * side. */ + bufferevent_setcb(partner, + NULL, close_on_finished_writecb, + eventcb, NULL); + bufferevent_disable(partner, EV_READ); + } else { + /* We have nothing left to say to the other + * side; close it. */ + bufferevent_free(partner); + } + } + bufferevent_free(bev); + } +} + +static void +syntax(void) +{ + fputs("Syntax:\n", stderr); + fputs(" le-proxy [-s] [-W] \n", stderr); + fputs("Example:\n", stderr); + fputs(" le-proxy 127.0.0.1:8888 1.2.3.4:80\n", stderr); + + exit(1); +} + +static void +accept_cb(struct evconnlistener *listener, evutil_socket_t fd, + struct sockaddr *a, int slen, void *p) +{ + struct bufferevent *b_out, *b_in; + /* Create two linked bufferevent objects: one to connect, one for the + * new connection */ + b_in = bufferevent_socket_new(base, fd, + BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS); + + if (!ssl_ctx || use_wrapper) + b_out = bufferevent_socket_new(base, -1, + BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS); + else { + SSL *ssl = SSL_new(ssl_ctx); + b_out = bufferevent_openssl_socket_new(base, -1, ssl, + BUFFEREVENT_SSL_CONNECTING, + BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS); + } + + assert(b_in && b_out); + + if (bufferevent_socket_connect(b_out, + (struct sockaddr*)&connect_to_addr, connect_to_addrlen)<0) { + perror("bufferevent_socket_connect"); + bufferevent_free(b_out); + bufferevent_free(b_in); + return; + } + + if (ssl_ctx && use_wrapper) { + struct bufferevent *b_ssl; + SSL *ssl = SSL_new(ssl_ctx); + b_ssl = bufferevent_openssl_filter_new(base, + b_out, ssl, BUFFEREVENT_SSL_CONNECTING, + BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS); + if (!b_ssl) { + perror("Bufferevent_openssl_new"); + bufferevent_free(b_out); + bufferevent_free(b_in); + return; + } + b_out = b_ssl; + } + + bufferevent_setcb(b_in, readcb, NULL, eventcb, b_out); + bufferevent_setcb(b_out, readcb, NULL, eventcb, b_in); + + bufferevent_enable(b_in, EV_READ|EV_WRITE); + bufferevent_enable(b_out, EV_READ|EV_WRITE); +} + +int +main(int argc, char **argv) +{ + int i; + int socklen; + + int use_ssl = 0; + struct evconnlistener *listener; + +#ifdef _WIN32 + WORD wVersionRequested; + WSADATA wsaData; + wVersionRequested = MAKEWORD(2, 2); + (void) WSAStartup(wVersionRequested, &wsaData); +#endif + + if (argc < 3) + syntax(); + + for (i=1; i < argc; ++i) { + if (!strcmp(argv[i], "-s")) { + use_ssl = 1; + } else if (!strcmp(argv[i], "-W")) { + use_wrapper = 0; + } else if (argv[i][0] == '-') { + syntax(); + } else + break; + } + + if (i+2 != argc) + syntax(); + + memset(&listen_on_addr, 0, sizeof(listen_on_addr)); + socklen = sizeof(listen_on_addr); + if (evutil_parse_sockaddr_port(argv[i], + (struct sockaddr*)&listen_on_addr, &socklen)<0) { + int p = atoi(argv[i]); + struct sockaddr_in *sin = (struct sockaddr_in*)&listen_on_addr; + if (p < 1 || p > 65535) + syntax(); + sin->sin_port = htons(p); + sin->sin_addr.s_addr = htonl(0x7f000001); + sin->sin_family = AF_INET; + socklen = sizeof(struct sockaddr_in); + } + + memset(&connect_to_addr, 0, sizeof(connect_to_addr)); + connect_to_addrlen = sizeof(connect_to_addr); + if (evutil_parse_sockaddr_port(argv[i+1], + (struct sockaddr*)&connect_to_addr, &connect_to_addrlen)<0) + syntax(); + + base = event_base_new(); + if (!base) { + perror("event_base_new()"); + return 1; + } + + if (use_ssl) { + int r; +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || \ + (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) + SSL_library_init(); + ERR_load_crypto_strings(); + SSL_load_error_strings(); + OpenSSL_add_all_algorithms(); +#endif + r = RAND_poll(); + if (r == 0) { + fprintf(stderr, "RAND_poll() failed.\n"); + return 1; + } + ssl_ctx = SSL_CTX_new(TLS_method()); + } + + listener = evconnlistener_new_bind(base, accept_cb, NULL, + LEV_OPT_CLOSE_ON_FREE|LEV_OPT_CLOSE_ON_EXEC|LEV_OPT_REUSEABLE, + -1, (struct sockaddr*)&listen_on_addr, socklen); + + if (! listener) { + fprintf(stderr, "Couldn't open listener.\n"); + event_base_free(base); + return 1; + } + event_base_dispatch(base); + + evconnlistener_free(listener); + event_base_free(base); + +#ifdef _WIN32 + WSACleanup(); +#endif + + return 0; +} diff --git a/probe-busybox/libevent-2.1.11-stable/sample/openssl_hostname_validation.c b/probe-busybox/libevent-2.1.11-stable/sample/openssl_hostname_validation.c new file mode 100644 index 00000000..4036ccba --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/openssl_hostname_validation.c @@ -0,0 +1,178 @@ +/* Obtained from: https://github.com/iSECPartners/ssl-conservatory */ + +/* +Copyright (C) 2012, iSEC Partners. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + */ + +/* + * Helper functions to perform basic hostname validation using OpenSSL. + * + * Please read "everything-you-wanted-to-know-about-openssl.pdf" before + * attempting to use this code. This whitepaper describes how the code works, + * how it should be used, and what its limitations are. + * + * Author: Alban Diquet + * License: See LICENSE + * + */ + +// Get rid of OSX 10.7 and greater deprecation warnings. +#if defined(__APPLE__) && defined(__clang__) +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + +#include +#include +#include + +#include "openssl_hostname_validation.h" +#include "hostcheck.h" + +#define HOSTNAME_MAX_SIZE 255 + +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || \ + (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) +#define ASN1_STRING_get0_data ASN1_STRING_data +#endif + +/** +* Tries to find a match for hostname in the certificate's Common Name field. +* +* Returns MatchFound if a match was found. +* Returns MatchNotFound if no matches were found. +* Returns MalformedCertificate if the Common Name had a NUL character embedded in it. +* Returns Error if the Common Name could not be extracted. +*/ +static HostnameValidationResult matches_common_name(const char *hostname, const X509 *server_cert) { + int common_name_loc = -1; + X509_NAME_ENTRY *common_name_entry = NULL; + ASN1_STRING *common_name_asn1 = NULL; + const char *common_name_str = NULL; + + // Find the position of the CN field in the Subject field of the certificate + common_name_loc = X509_NAME_get_index_by_NID(X509_get_subject_name((X509 *) server_cert), NID_commonName, -1); + if (common_name_loc < 0) { + return Error; + } + + // Extract the CN field + common_name_entry = X509_NAME_get_entry(X509_get_subject_name((X509 *) server_cert), common_name_loc); + if (common_name_entry == NULL) { + return Error; + } + + // Convert the CN field to a C string + common_name_asn1 = X509_NAME_ENTRY_get_data(common_name_entry); + if (common_name_asn1 == NULL) { + return Error; + } + common_name_str = (char *) ASN1_STRING_get0_data(common_name_asn1); + + // Make sure there isn't an embedded NUL character in the CN + if ((size_t)ASN1_STRING_length(common_name_asn1) != strlen(common_name_str)) { + return MalformedCertificate; + } + + // Compare expected hostname with the CN + if (Curl_cert_hostcheck(common_name_str, hostname) == CURL_HOST_MATCH) { + return MatchFound; + } + else { + return MatchNotFound; + } +} + + +/** +* Tries to find a match for hostname in the certificate's Subject Alternative Name extension. +* +* Returns MatchFound if a match was found. +* Returns MatchNotFound if no matches were found. +* Returns MalformedCertificate if any of the hostnames had a NUL character embedded in it. +* Returns NoSANPresent if the SAN extension was not present in the certificate. +*/ +static HostnameValidationResult matches_subject_alternative_name(const char *hostname, const X509 *server_cert) { + HostnameValidationResult result = MatchNotFound; + int i; + int san_names_nb = -1; + STACK_OF(GENERAL_NAME) *san_names = NULL; + + // Try to extract the names within the SAN extension from the certificate + san_names = X509_get_ext_d2i((X509 *) server_cert, NID_subject_alt_name, NULL, NULL); + if (san_names == NULL) { + return NoSANPresent; + } + san_names_nb = sk_GENERAL_NAME_num(san_names); + + // Check each name within the extension + for (i=0; itype == GEN_DNS) { + // Current name is a DNS name, let's check it + const char *dns_name = (char *) ASN1_STRING_get0_data(current_name->d.dNSName); + + // Make sure there isn't an embedded NUL character in the DNS name + if ((size_t)ASN1_STRING_length(current_name->d.dNSName) != strlen(dns_name)) { + result = MalformedCertificate; + break; + } + else { // Compare expected hostname with the DNS name + if (Curl_cert_hostcheck(dns_name, hostname) + == CURL_HOST_MATCH) { + result = MatchFound; + break; + } + } + } + } + sk_GENERAL_NAME_pop_free(san_names, GENERAL_NAME_free); + + return result; +} + + +/** +* Validates the server's identity by looking for the expected hostname in the +* server's certificate. As described in RFC 6125, it first tries to find a match +* in the Subject Alternative Name extension. If the extension is not present in +* the certificate, it checks the Common Name instead. +* +* Returns MatchFound if a match was found. +* Returns MatchNotFound if no matches were found. +* Returns MalformedCertificate if any of the hostnames had a NUL character embedded in it. +* Returns Error if there was an error. +*/ +HostnameValidationResult validate_hostname(const char *hostname, const X509 *server_cert) { + HostnameValidationResult result; + + if((hostname == NULL) || (server_cert == NULL)) + return Error; + + // First try the Subject Alternative Names extension + result = matches_subject_alternative_name(hostname, server_cert); + if (result == NoSANPresent) { + // Extension was not found: try the Common Name + result = matches_common_name(hostname, server_cert); + } + + return result; +} diff --git a/probe-busybox/libevent-2.1.11-stable/sample/openssl_hostname_validation.h b/probe-busybox/libevent-2.1.11-stable/sample/openssl_hostname_validation.h new file mode 100644 index 00000000..54aa1c43 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/openssl_hostname_validation.h @@ -0,0 +1,56 @@ +/* Obtained from: https://github.com/iSECPartners/ssl-conservatory */ + +/* +Copyright (C) 2012, iSEC Partners. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + */ + +/* + * Helper functions to perform basic hostname validation using OpenSSL. + * + * Please read "everything-you-wanted-to-know-about-openssl.pdf" before + * attempting to use this code. This whitepaper describes how the code works, + * how it should be used, and what its limitations are. + * + * Author: Alban Diquet + * License: See LICENSE + * + */ + +typedef enum { + MatchFound, + MatchNotFound, + NoSANPresent, + MalformedCertificate, + Error +} HostnameValidationResult; + +/** +* Validates the server's identity by looking for the expected hostname in the +* server's certificate. As described in RFC 6125, it first tries to find a match +* in the Subject Alternative Name extension. If the extension is not present in +* the certificate, it checks the Common Name instead. +* +* Returns MatchFound if a match was found. +* Returns MatchNotFound if no matches were found. +* Returns MalformedCertificate if any of the hostnames had a NUL character embedded in it. +* Returns Error if there was an error. +*/ +HostnameValidationResult validate_hostname(const char *hostname, const X509 *server_cert); diff --git a/probe-busybox/libevent-2.1.11-stable/sample/signal-test.c b/probe-busybox/libevent-2.1.11-stable/sample/signal-test.c new file mode 100644 index 00000000..18668350 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/signal-test.c @@ -0,0 +1,72 @@ +/* + * Compile with: + * cc -I/usr/local/include -o signal-test \ + * signal-test.c -L/usr/local/lib -levent + */ + +#include + +#include + +#include +#ifndef _WIN32 +#include +#include +#include +#else +#include +#include +#endif +#include +#include +#include +#include +#include +#include + +#include + +int called = 0; + +static void +signal_cb(evutil_socket_t fd, short event, void *arg) +{ + struct event *signal = arg; + + printf("signal_cb: got signal %d\n", event_get_signal(signal)); + + if (called >= 2) + event_del(signal); + + called++; +} + +int +main(int argc, char **argv) +{ + struct event *signal_int; + struct event_base* base; +#ifdef _WIN32 + WORD wVersionRequested; + WSADATA wsaData; + + wVersionRequested = MAKEWORD(2, 2); + + (void) WSAStartup(wVersionRequested, &wsaData); +#endif + + /* Initalize the event library */ + base = event_base_new(); + + /* Initalize one event */ + signal_int = evsignal_new(base, SIGINT, signal_cb, event_self_cbarg()); + + event_add(signal_int, NULL); + + event_base_dispatch(base); + event_free(signal_int); + event_base_free(base); + + return (0); +} + diff --git a/probe-busybox/libevent-2.1.11-stable/sample/time-test.c b/probe-busybox/libevent-2.1.11-stable/sample/time-test.c new file mode 100644 index 00000000..8d0fd91b --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/sample/time-test.c @@ -0,0 +1,110 @@ +/* + * XXX This sample code was once meant to show how to use the basic Libevent + * interfaces, but it never worked on non-Unix platforms, and some of the + * interfaces have changed since it was first written. It should probably + * be removed or replaced with something better. + * + * Compile with: + * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent + */ + +#include + +#include + +#include +#ifndef _WIN32 +#include +#include +#endif +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef _WIN32 +#include +#endif + +struct timeval lasttime; + +int event_is_persistent; + +static void +timeout_cb(evutil_socket_t fd, short event, void *arg) +{ + struct timeval newtime, difference; + struct event *timeout = arg; + double elapsed; + + evutil_gettimeofday(&newtime, NULL); + evutil_timersub(&newtime, &lasttime, &difference); + elapsed = difference.tv_sec + + (difference.tv_usec / 1.0e6); + + printf("timeout_cb called at %d: %.3f seconds elapsed.\n", + (int)newtime.tv_sec, elapsed); + lasttime = newtime; + + if (! event_is_persistent) { + struct timeval tv; + evutil_timerclear(&tv); + tv.tv_sec = 2; + event_add(timeout, &tv); + } +} + +int +main(int argc, char **argv) +{ + struct event timeout; + struct timeval tv; + struct event_base *base; + int flags; + +#ifdef _WIN32 + WORD wVersionRequested; + WSADATA wsaData; + + wVersionRequested = MAKEWORD(2, 2); + + (void)WSAStartup(wVersionRequested, &wsaData); +#endif + + if (argc == 2 && !strcmp(argv[1], "-p")) { + event_is_persistent = 1; + flags = EV_PERSIST; + } else { + event_is_persistent = 0; + flags = 0; + } + + /* Initalize the event library */ + base = event_base_new(); + + /* Initalize one event */ + event_assign(&timeout, base, -1, flags, timeout_cb, (void*) &timeout); + + evutil_timerclear(&tv); + tv.tv_sec = 2; + event_add(&timeout, &tv); + + evutil_gettimeofday(&lasttime, NULL); + + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + event_base_dispatch(base); + + return (0); +} + diff --git a/probe-busybox/libevent-2.1.11-stable/select.c b/probe-busybox/libevent-2.1.11-stable/select.c new file mode 100644 index 00000000..8ae53cc1 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/select.c @@ -0,0 +1,346 @@ +/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */ + +/* + * Copyright 2000-2007 Niels Provos + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef EVENT__HAVE_SELECT + +#ifdef __APPLE__ +/* Apple wants us to define this if we might ever pass more than + * FD_SETSIZE bits to select(). */ +#define _DARWIN_UNLIMITED_SELECT +#endif + +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#ifdef EVENT__HAVE_SYS_SELECT_H +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#include "event-internal.h" +#include "evsignal-internal.h" +#include "event2/thread.h" +#include "evthread-internal.h" +#include "log-internal.h" +#include "evmap-internal.h" + +#ifndef EVENT__HAVE_FD_MASK +/* This type is mandatory, but Android doesn't define it. */ +typedef unsigned long fd_mask; +#endif + +#ifndef NFDBITS +#define NFDBITS (sizeof(fd_mask)*8) +#endif + +/* Divide positive x by y, rounding up. */ +#define DIV_ROUNDUP(x, y) (((x)+((y)-1))/(y)) + +/* How many bytes to allocate for N fds? */ +#define SELECT_ALLOC_SIZE(n) \ + (DIV_ROUNDUP(n, NFDBITS) * sizeof(fd_mask)) + +struct selectop { + int event_fds; /* Highest fd in fd set */ + int event_fdsz; + int resize_out_sets; + fd_set *event_readset_in; + fd_set *event_writeset_in; + fd_set *event_readset_out; + fd_set *event_writeset_out; +}; + +static void *select_init(struct event_base *); +static int select_add(struct event_base *, int, short old, short events, void*); +static int select_del(struct event_base *, int, short old, short events, void*); +static int select_dispatch(struct event_base *, struct timeval *); +static void select_dealloc(struct event_base *); + +const struct eventop selectops = { + "select", + select_init, + select_add, + select_del, + select_dispatch, + select_dealloc, + 0, /* doesn't need reinit. */ + EV_FEATURE_FDS, + 0, +}; + +static int select_resize(struct selectop *sop, int fdsz); +static void select_free_selectop(struct selectop *sop); + +static void * +select_init(struct event_base *base) +{ + struct selectop *sop; + + if (!(sop = mm_calloc(1, sizeof(struct selectop)))) + return (NULL); + + if (select_resize(sop, SELECT_ALLOC_SIZE(32 + 1))) { + select_free_selectop(sop); + return (NULL); + } + + evsig_init_(base); + + evutil_weakrand_seed_(&base->weakrand_seed, 0); + + return (sop); +} + +#ifdef CHECK_INVARIANTS +static void +check_selectop(struct selectop *sop) +{ + /* nothing to be done here */ +} +#else +#define check_selectop(sop) do { (void) sop; } while (0) +#endif + +static int +select_dispatch(struct event_base *base, struct timeval *tv) +{ + int res=0, i, j, nfds; + struct selectop *sop = base->evbase; + + check_selectop(sop); + if (sop->resize_out_sets) { + fd_set *readset_out=NULL, *writeset_out=NULL; + size_t sz = sop->event_fdsz; + if (!(readset_out = mm_realloc(sop->event_readset_out, sz))) + return (-1); + sop->event_readset_out = readset_out; + if (!(writeset_out = mm_realloc(sop->event_writeset_out, sz))) { + /* We don't free readset_out here, since it was + * already successfully reallocated. The next time + * we call select_dispatch, the realloc will be a + * no-op. */ + return (-1); + } + sop->event_writeset_out = writeset_out; + sop->resize_out_sets = 0; + } + + memcpy(sop->event_readset_out, sop->event_readset_in, + sop->event_fdsz); + memcpy(sop->event_writeset_out, sop->event_writeset_in, + sop->event_fdsz); + + nfds = sop->event_fds+1; + + EVBASE_RELEASE_LOCK(base, th_base_lock); + + res = select(nfds, sop->event_readset_out, + sop->event_writeset_out, NULL, tv); + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + + check_selectop(sop); + + if (res == -1) { + if (errno != EINTR) { + event_warn("select"); + return (-1); + } + + return (0); + } + + event_debug(("%s: select reports %d", __func__, res)); + + check_selectop(sop); + i = evutil_weakrand_range_(&base->weakrand_seed, nfds); + for (j = 0; j < nfds; ++j) { + if (++i >= nfds) + i = 0; + res = 0; + if (FD_ISSET(i, sop->event_readset_out)) + res |= EV_READ; + if (FD_ISSET(i, sop->event_writeset_out)) + res |= EV_WRITE; + + if (res == 0) + continue; + + evmap_io_active_(base, i, res); + } + check_selectop(sop); + + return (0); +} + +static int +select_resize(struct selectop *sop, int fdsz) +{ + fd_set *readset_in = NULL; + fd_set *writeset_in = NULL; + + if (sop->event_readset_in) + check_selectop(sop); + + if ((readset_in = mm_realloc(sop->event_readset_in, fdsz)) == NULL) + goto error; + sop->event_readset_in = readset_in; + if ((writeset_in = mm_realloc(sop->event_writeset_in, fdsz)) == NULL) { + /* Note that this will leave event_readset_in expanded. + * That's okay; we wouldn't want to free it, since that would + * change the semantics of select_resize from "expand the + * readset_in and writeset_in, or return -1" to "expand the + * *set_in members, or trash them and return -1." + */ + goto error; + } + sop->event_writeset_in = writeset_in; + sop->resize_out_sets = 1; + + memset((char *)sop->event_readset_in + sop->event_fdsz, 0, + fdsz - sop->event_fdsz); + memset((char *)sop->event_writeset_in + sop->event_fdsz, 0, + fdsz - sop->event_fdsz); + + sop->event_fdsz = fdsz; + check_selectop(sop); + + return (0); + + error: + event_warn("malloc"); + return (-1); +} + + +static int +select_add(struct event_base *base, int fd, short old, short events, void *p) +{ + struct selectop *sop = base->evbase; + (void) p; + + EVUTIL_ASSERT((events & EV_SIGNAL) == 0); + check_selectop(sop); + /* + * Keep track of the highest fd, so that we can calculate the size + * of the fd_sets for select(2) + */ + if (sop->event_fds < fd) { + int fdsz = sop->event_fdsz; + + if (fdsz < (int)sizeof(fd_mask)) + fdsz = (int)sizeof(fd_mask); + + /* In theory we should worry about overflow here. In + * reality, though, the highest fd on a unixy system will + * not overflow here. XXXX */ + while (fdsz < (int) SELECT_ALLOC_SIZE(fd + 1)) + fdsz *= 2; + + if (fdsz != sop->event_fdsz) { + if (select_resize(sop, fdsz)) { + check_selectop(sop); + return (-1); + } + } + + sop->event_fds = fd; + } + + if (events & EV_READ) + FD_SET(fd, sop->event_readset_in); + if (events & EV_WRITE) + FD_SET(fd, sop->event_writeset_in); + check_selectop(sop); + + return (0); +} + +/* + * Nothing to be done here. + */ + +static int +select_del(struct event_base *base, int fd, short old, short events, void *p) +{ + struct selectop *sop = base->evbase; + (void)p; + + EVUTIL_ASSERT((events & EV_SIGNAL) == 0); + check_selectop(sop); + + if (sop->event_fds < fd) { + check_selectop(sop); + return (0); + } + + if (events & EV_READ) + FD_CLR(fd, sop->event_readset_in); + + if (events & EV_WRITE) + FD_CLR(fd, sop->event_writeset_in); + + check_selectop(sop); + return (0); +} + +static void +select_free_selectop(struct selectop *sop) +{ + if (sop->event_readset_in) + mm_free(sop->event_readset_in); + if (sop->event_writeset_in) + mm_free(sop->event_writeset_in); + if (sop->event_readset_out) + mm_free(sop->event_readset_out); + if (sop->event_writeset_out) + mm_free(sop->event_writeset_out); + + memset(sop, 0, sizeof(struct selectop)); + mm_free(sop); +} + +static void +select_dealloc(struct event_base *base) +{ + evsig_dealloc_(base); + + select_free_selectop(base->evbase); +} + +#endif /* EVENT__HAVE_SELECT */ diff --git a/probe-busybox/libevent-2.1.11-stable/signal.c b/probe-busybox/libevent-2.1.11-stable/signal.c new file mode 100644 index 00000000..89f5fc17 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/signal.c @@ -0,0 +1,481 @@ +/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */ + +/* + * Copyright 2000-2007 Niels Provos + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "event2/event-config.h" +#include "evconfig-private.h" + +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#include +#undef WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#ifdef EVENT__HAVE_SYS_SOCKET_H +#include +#endif +#include +#include +#include +#include +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif +#include +#ifdef EVENT__HAVE_FCNTL_H +#include +#endif + +#include "event2/event.h" +#include "event2/event_struct.h" +#include "event-internal.h" +#include "event2/util.h" +#include "evsignal-internal.h" +#include "log-internal.h" +#include "evmap-internal.h" +#include "evthread-internal.h" + +/* + signal.c + + This is the signal-handling implementation we use for backends that don't + have a better way to do signal handling. It uses sigaction() or signal() + to set a signal handler, and a socket pair to tell the event base when + + Note that I said "the event base" : only one event base can be set up to use + this at a time. For historical reasons and backward compatibility, if you + add an event for a signal to event_base A, then add an event for a signal + (any signal!) to event_base B, event_base B will get informed about the + signal, but event_base A won't. + + It would be neat to change this behavior in some future version of Libevent. + kqueue already does something far more sensible. We can make all backends + on Linux do a reasonable thing using signalfd. +*/ + +#ifndef _WIN32 +/* Windows wants us to call our signal handlers as __cdecl. Nobody else + * expects you to do anything crazy like this. */ +#ifndef __cdecl +#define __cdecl +#endif +#endif + +static int evsig_add(struct event_base *, evutil_socket_t, short, short, void *); +static int evsig_del(struct event_base *, evutil_socket_t, short, short, void *); + +static const struct eventop evsigops = { + "signal", + NULL, + evsig_add, + evsig_del, + NULL, + NULL, + 0, 0, 0 +}; + +#ifndef EVENT__DISABLE_THREAD_SUPPORT +/* Lock for evsig_base and evsig_base_n_signals_added fields. */ +static void *evsig_base_lock = NULL; +#endif +/* The event base that's currently getting informed about signals. */ +static struct event_base *evsig_base = NULL; +/* A copy of evsig_base->sigev_n_signals_added. */ +static int evsig_base_n_signals_added = 0; +static evutil_socket_t evsig_base_fd = -1; + +static void __cdecl evsig_handler(int sig); + +#define EVSIGBASE_LOCK() EVLOCK_LOCK(evsig_base_lock, 0) +#define EVSIGBASE_UNLOCK() EVLOCK_UNLOCK(evsig_base_lock, 0) + +void +evsig_set_base_(struct event_base *base) +{ + EVSIGBASE_LOCK(); + evsig_base = base; + evsig_base_n_signals_added = base->sig.ev_n_signals_added; + evsig_base_fd = base->sig.ev_signal_pair[1]; + EVSIGBASE_UNLOCK(); +} + +/* Callback for when the signal handler write a byte to our signaling socket */ +static void +evsig_cb(evutil_socket_t fd, short what, void *arg) +{ + static char signals[1024]; + ev_ssize_t n; + int i; + int ncaught[NSIG]; + struct event_base *base; + + base = arg; + + memset(&ncaught, 0, sizeof(ncaught)); + + while (1) { +#ifdef _WIN32 + n = recv(fd, signals, sizeof(signals), 0); +#else + n = read(fd, signals, sizeof(signals)); +#endif + if (n == -1) { + int err = evutil_socket_geterror(fd); + if (! EVUTIL_ERR_RW_RETRIABLE(err)) + event_sock_err(1, fd, "%s: recv", __func__); + break; + } else if (n == 0) { + /* XXX warn? */ + break; + } + for (i = 0; i < n; ++i) { + ev_uint8_t sig = signals[i]; + if (sig < NSIG) + ncaught[sig]++; + } + } + + EVBASE_ACQUIRE_LOCK(base, th_base_lock); + for (i = 0; i < NSIG; ++i) { + if (ncaught[i]) + evmap_signal_active_(base, i, ncaught[i]); + } + EVBASE_RELEASE_LOCK(base, th_base_lock); +} + +int +evsig_init_(struct event_base *base) +{ + /* + * Our signal handler is going to write to one end of the socket + * pair to wake up our event loop. The event loop then scans for + * signals that got delivered. + */ + if (evutil_make_internal_pipe_(base->sig.ev_signal_pair) == -1) { +#ifdef _WIN32 + /* Make this nonfatal on win32, where sometimes people + have localhost firewalled. */ + event_sock_warn(-1, "%s: socketpair", __func__); +#else + event_sock_err(1, -1, "%s: socketpair", __func__); +#endif + return -1; + } + + if (base->sig.sh_old) { + mm_free(base->sig.sh_old); + } + base->sig.sh_old = NULL; + base->sig.sh_old_max = 0; + + event_assign(&base->sig.ev_signal, base, base->sig.ev_signal_pair[0], + EV_READ | EV_PERSIST, evsig_cb, base); + + base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL; + event_priority_set(&base->sig.ev_signal, 0); + + base->evsigsel = &evsigops; + + return 0; +} + +/* Helper: set the signal handler for evsignal to handler in base, so that + * we can restore the original handler when we clear the current one. */ +int +evsig_set_handler_(struct event_base *base, + int evsignal, void (__cdecl *handler)(int)) +{ +#ifdef EVENT__HAVE_SIGACTION + struct sigaction sa; +#else + ev_sighandler_t sh; +#endif + struct evsig_info *sig = &base->sig; + void *p; + + /* + * resize saved signal handler array up to the highest signal number. + * a dynamic array is used to keep footprint on the low side. + */ + if (evsignal >= sig->sh_old_max) { + int new_max = evsignal + 1; + event_debug(("%s: evsignal (%d) >= sh_old_max (%d), resizing", + __func__, evsignal, sig->sh_old_max)); + p = mm_realloc(sig->sh_old, new_max * sizeof(*sig->sh_old)); + if (p == NULL) { + event_warn("realloc"); + return (-1); + } + + memset((char *)p + sig->sh_old_max * sizeof(*sig->sh_old), + 0, (new_max - sig->sh_old_max) * sizeof(*sig->sh_old)); + + sig->sh_old_max = new_max; + sig->sh_old = p; + } + + /* allocate space for previous handler out of dynamic array */ + sig->sh_old[evsignal] = mm_malloc(sizeof *sig->sh_old[evsignal]); + if (sig->sh_old[evsignal] == NULL) { + event_warn("malloc"); + return (-1); + } + + /* save previous handler and setup new handler */ +#ifdef EVENT__HAVE_SIGACTION + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = handler; + sa.sa_flags |= SA_RESTART; + sigfillset(&sa.sa_mask); + + if (sigaction(evsignal, &sa, sig->sh_old[evsignal]) == -1) { + event_warn("sigaction"); + mm_free(sig->sh_old[evsignal]); + sig->sh_old[evsignal] = NULL; + return (-1); + } +#else + if ((sh = signal(evsignal, handler)) == SIG_ERR) { + event_warn("signal"); + mm_free(sig->sh_old[evsignal]); + sig->sh_old[evsignal] = NULL; + return (-1); + } + *sig->sh_old[evsignal] = sh; +#endif + + return (0); +} + +static int +evsig_add(struct event_base *base, evutil_socket_t evsignal, short old, short events, void *p) +{ + struct evsig_info *sig = &base->sig; + (void)p; + + EVUTIL_ASSERT(evsignal >= 0 && evsignal < NSIG); + + /* catch signals if they happen quickly */ + EVSIGBASE_LOCK(); + if (evsig_base != base && evsig_base_n_signals_added) { + event_warnx("Added a signal to event base %p with signals " + "already added to event_base %p. Only one can have " + "signals at a time with the %s backend. The base with " + "the most recently added signal or the most recent " + "event_base_loop() call gets preference; do " + "not rely on this behavior in future Libevent versions.", + base, evsig_base, base->evsel->name); + } + evsig_base = base; + evsig_base_n_signals_added = ++sig->ev_n_signals_added; + evsig_base_fd = base->sig.ev_signal_pair[1]; + EVSIGBASE_UNLOCK(); + + event_debug(("%s: %d: changing signal handler", __func__, (int)evsignal)); + if (evsig_set_handler_(base, (int)evsignal, evsig_handler) == -1) { + goto err; + } + + + if (!sig->ev_signal_added) { + if (event_add_nolock_(&sig->ev_signal, NULL, 0)) + goto err; + sig->ev_signal_added = 1; + } + + return (0); + +err: + EVSIGBASE_LOCK(); + --evsig_base_n_signals_added; + --sig->ev_n_signals_added; + EVSIGBASE_UNLOCK(); + return (-1); +} + +int +evsig_restore_handler_(struct event_base *base, int evsignal) +{ + int ret = 0; + struct evsig_info *sig = &base->sig; +#ifdef EVENT__HAVE_SIGACTION + struct sigaction *sh; +#else + ev_sighandler_t *sh; +#endif + + if (evsignal >= sig->sh_old_max) { + /* Can't actually restore. */ + /* XXXX.*/ + return 0; + } + + /* restore previous handler */ + sh = sig->sh_old[evsignal]; + sig->sh_old[evsignal] = NULL; +#ifdef EVENT__HAVE_SIGACTION + if (sigaction(evsignal, sh, NULL) == -1) { + event_warn("sigaction"); + ret = -1; + } +#else + if (signal(evsignal, *sh) == SIG_ERR) { + event_warn("signal"); + ret = -1; + } +#endif + + mm_free(sh); + + return ret; +} + +static int +evsig_del(struct event_base *base, evutil_socket_t evsignal, short old, short events, void *p) +{ + EVUTIL_ASSERT(evsignal >= 0 && evsignal < NSIG); + + event_debug(("%s: "EV_SOCK_FMT": restoring signal handler", + __func__, EV_SOCK_ARG(evsignal))); + + EVSIGBASE_LOCK(); + --evsig_base_n_signals_added; + --base->sig.ev_n_signals_added; + EVSIGBASE_UNLOCK(); + + return (evsig_restore_handler_(base, (int)evsignal)); +} + +static void __cdecl +evsig_handler(int sig) +{ + int save_errno = errno; +#ifdef _WIN32 + int socket_errno = EVUTIL_SOCKET_ERROR(); +#endif + ev_uint8_t msg; + + if (evsig_base == NULL) { + event_warnx( + "%s: received signal %d, but have no base configured", + __func__, sig); + return; + } + +#ifndef EVENT__HAVE_SIGACTION + signal(sig, evsig_handler); +#endif + + /* Wake up our notification mechanism */ + msg = sig; +#ifdef _WIN32 + send(evsig_base_fd, (char*)&msg, 1, 0); +#else + { + int r = write(evsig_base_fd, (char*)&msg, 1); + (void)r; /* Suppress 'unused return value' and 'unused var' */ + } +#endif + errno = save_errno; +#ifdef _WIN32 + EVUTIL_SET_SOCKET_ERROR(socket_errno); +#endif +} + +void +evsig_dealloc_(struct event_base *base) +{ + int i = 0; + if (base->sig.ev_signal_added) { + event_del(&base->sig.ev_signal); + base->sig.ev_signal_added = 0; + } + /* debug event is created in evsig_init_/event_assign even when + * ev_signal_added == 0, so unassign is required */ + event_debug_unassign(&base->sig.ev_signal); + + for (i = 0; i < NSIG; ++i) { + if (i < base->sig.sh_old_max && base->sig.sh_old[i] != NULL) + evsig_restore_handler_(base, i); + } + EVSIGBASE_LOCK(); + if (base == evsig_base) { + evsig_base = NULL; + evsig_base_n_signals_added = 0; + evsig_base_fd = -1; + } + EVSIGBASE_UNLOCK(); + + if (base->sig.ev_signal_pair[0] != -1) { + evutil_closesocket(base->sig.ev_signal_pair[0]); + base->sig.ev_signal_pair[0] = -1; + } + if (base->sig.ev_signal_pair[1] != -1) { + evutil_closesocket(base->sig.ev_signal_pair[1]); + base->sig.ev_signal_pair[1] = -1; + } + base->sig.sh_old_max = 0; + + /* per index frees are handled in evsig_del() */ + if (base->sig.sh_old) { + mm_free(base->sig.sh_old); + base->sig.sh_old = NULL; + } +} + +static void +evsig_free_globals_locks(void) +{ +#ifndef EVENT__DISABLE_THREAD_SUPPORT + if (evsig_base_lock != NULL) { + EVTHREAD_FREE_LOCK(evsig_base_lock, 0); + evsig_base_lock = NULL; + } +#endif + return; +} + +void +evsig_free_globals_(void) +{ + evsig_free_globals_locks(); +} + +#ifndef EVENT__DISABLE_THREAD_SUPPORT +int +evsig_global_setup_locks_(const int enable_locks) +{ + EVTHREAD_SETUP_GLOBAL_LOCK(evsig_base_lock, 0); + return 0; +} + +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/strlcpy-internal.h b/probe-busybox/libevent-2.1.11-stable/strlcpy-internal.h new file mode 100644 index 00000000..4151d601 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/strlcpy-internal.h @@ -0,0 +1,24 @@ +#ifndef STRLCPY_INTERNAL_H_INCLUDED_ +#define STRLCPY_INTERNAL_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "event2/event-config.h" +#include "event2/visibility.h" +#include "evconfig-private.h" + +#ifndef EVENT__HAVE_STRLCPY +#include +EVENT2_EXPORT_SYMBOL +size_t event_strlcpy_(char *dst, const char *src, size_t siz); +#define strlcpy event_strlcpy_ +#endif + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/probe-busybox/libevent-2.1.11-stable/strlcpy.c b/probe-busybox/libevent-2.1.11-stable/strlcpy.c new file mode 100644 index 00000000..3876475f --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/strlcpy.c @@ -0,0 +1,75 @@ +/* $OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $ */ + +/* + * Copyright (c) 1998 Todd C. Miller + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +static char *rcsid = "$OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $"; +#endif /* LIBC_SCCS and not lint */ + +#include "event2/event-config.h" +#include "evconfig-private.h" + +#include + +#ifndef EVENT__HAVE_STRLCPY +#include "strlcpy-internal.h" + +/* + * Copy src to string dst of size siz. At most siz-1 characters + * will be copied. Always NUL terminates (unless siz == 0). + * Returns strlen(src); if retval >= siz, truncation occurred. + */ +size_t +event_strlcpy_(dst, src, siz) + char *dst; + const char *src; + size_t siz; +{ + register char *d = dst; + register const char *s = src; + register size_t n = siz; + + /* Copy as many bytes as will fit */ + if (n != 0 && --n != 0) { + do { + if ((*d++ = *s++) == 0) + break; + } while (--n != 0); + } + + /* Not enough room in dst, add NUL and traverse rest of src */ + if (n == 0) { + if (siz != 0) + *d = '\0'; /* NUL-terminate dst */ + while (*s++) + ; + } + + return (s - src - 1); /* count does not include NUL */ +} +#endif diff --git a/probe-busybox/libevent-2.1.11-stable/test/Makefile.nmake b/probe-busybox/libevent-2.1.11-stable/test/Makefile.nmake new file mode 100644 index 00000000..30c3eb79 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/Makefile.nmake @@ -0,0 +1,79 @@ +# WATCH OUT! This makefile is a work in progress. -*- makefile -*- + +!IFDEF OPENSSL_DIR +SSL_CFLAGS=/I$(OPENSSL_DIR)\include /DEVENT__HAVE_OPENSSL +SSL_OBJS=regress_ssl.obj +SSL_LIBS=..\libevent_openssl.lib $(OPENSSL_DIR)\lib\libeay32.lib $(OPENSSL_DIR)\lib\ssleay32.lib gdi32.lib User32.lib +!ELSE +SSL_CFLAGS= +SSL_OBJS= +SSL_LIBS= +!ENDIF + +CFLAGS=/I.. /I../WIN32-Code /I../WIN32-Code/nmake /I../include /I../compat /DHAVE_CONFIG_H /DTINYTEST_LOCAL $(SSL_CFLAGS) + +CFLAGS=$(CFLAGS) /Ox /W3 /wd4996 /nologo + +REGRESS_OBJS=regress.obj regress_buffer.obj regress_http.obj regress_dns.obj \ + regress_testutils.obj \ + regress_rpc.obj regress.gen.obj \ + regress_et.obj regress_bufferevent.obj \ + regress_listener.obj regress_util.obj tinytest.obj \ + regress_main.obj regress_minheap.obj regress_iocp.obj \ + regress_thread.obj regress_finalize.obj $(SSL_OBJS) + +OTHER_OBJS=test-init.obj test-eof.obj test-closed.obj test-weof.obj test-time.obj \ + bench.obj bench_cascade.obj bench_http.obj bench_httpclient.obj \ + test-changelist.obj \ + print-winsock-errors.obj + +PROGRAMS=regress.exe \ + test-init.exe test-eof.exe test-closed.exe test-weof.exe test-time.exe \ + test-changelist.exe \ + print-winsock-errors.exe + +# Disabled for now: +# bench.exe bench_cascade.exe bench_http.exe bench_httpclient.exe + + +LIBS=..\libevent.lib ws2_32.lib shell32.lib advapi32.lib + +all: $(PROGRAMS) + +regress.exe: $(REGRESS_OBJS) + $(CC) $(CFLAGS) $(LIBS) $(SSL_LIBS) $(REGRESS_OBJS) + +test-init.exe: test-init.obj + $(CC) $(CFLAGS) $(LIBS) test-init.obj +test-eof.exe: test-eof.obj + $(CC) $(CFLAGS) $(LIBS) test-eof.obj +test-closed.exe: test-closed.obj + $(CC) $(CFLAGS) $(LIBS) test-closed.obj +test-changelist.exe: test-changelist.obj + $(CC) $(CFLAGS) $(LIBS) test-changelist.obj +test-weof.exe: test-weof.obj + $(CC) $(CFLAGS) $(LIBS) test-weof.obj +test-time.exe: test-time.obj + $(CC) $(CFLAGS) $(LIBS) test-time.obj + +print-winsock-errors.exe: print-winsock-errors.obj + $(CC) $(CFLAGS) $(LIBS) print-winsock-errors.obj + +bench.exe: bench.obj + $(CC) $(CFLAGS) $(LIBS) bench.obj +bench_cascade.exe: bench_cascade.obj + $(CC) $(CFLAGS) $(LIBS) bench_cascade.obj +bench_http.exe: bench_http.obj + $(CC) $(CFLAGS) $(LIBS) bench_http.obj +bench_httpclient.exe: bench_httpclient.obj + $(CC) $(CFLAGS) $(LIBS) bench_httpclient.obj + +regress.gen.c regress.gen.h: regress.rpc ../event_rpcgen.py + echo // > regress.gen.c + echo #define NO_PYTHON_EXISTS > regress.gen.h + -python ..\event_rpcgen.py regress.rpc + +clean: + -del $(REGRESS_OBJS) + -del $(OTHER_OBJS) + -del $(PROGRAMS) diff --git a/probe-busybox/libevent-2.1.11-stable/test/bench.c b/probe-busybox/libevent-2.1.11-stable/test/bench.c new file mode 100644 index 00000000..3a6886db --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/bench.c @@ -0,0 +1,212 @@ +/* + * Copyright 2003-2007 Niels Provos + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Mon 03/10/2003 - Modified by Davide Libenzi + * + * Added chain event propagation to improve the sensitivity of + * the measure respect to the event loop efficency. + * + * + */ + +#include "event2/event-config.h" +#include "../util-internal.h" + +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#else +#include +#include +#include +#endif +#include +#include +#include +#include +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif +#include + +#ifdef _WIN32 +#include +#endif + +#include +#include + +static ev_ssize_t count, fired; +static int writes, failures; +static evutil_socket_t *pipes; +static int num_pipes, num_active, num_writes; +static struct event *events; + + +static void +read_cb(evutil_socket_t fd, short which, void *arg) +{ + ev_intptr_t idx = (ev_intptr_t) arg, widx = idx + 1; + unsigned char ch; + ev_ssize_t n; + + n = recv(fd, (char*)&ch, sizeof(ch), 0); + if (n >= 0) + count += n; + else + failures++; + if (writes) { + if (widx >= num_pipes) + widx -= num_pipes; + n = send(pipes[2 * widx + 1], "e", 1, 0); + if (n != 1) + failures++; + writes--; + fired++; + } +} + +static struct timeval * +run_once(void) +{ + evutil_socket_t *cp, space; + long i; + static struct timeval ts, te; + + for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { + if (event_initialized(&events[i])) + event_del(&events[i]); + event_set(&events[i], cp[0], EV_READ | EV_PERSIST, read_cb, (void *)(ev_intptr_t) i); + event_add(&events[i], NULL); + } + + event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK); + + fired = 0; + space = num_pipes / num_active; + space = space * 2; + for (i = 0; i < num_active; i++, fired++) + (void) send(pipes[i * space + 1], "e", 1, 0); + + count = 0; + writes = num_writes; + { + int xcount = 0; + evutil_gettimeofday(&ts, NULL); + do { + event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK); + xcount++; + } while (count != fired); + evutil_gettimeofday(&te, NULL); + + if (xcount != count) + fprintf(stderr, "Xcount: %d, Rcount: " EV_SSIZE_FMT "\n", + xcount, count); + } + + evutil_timersub(&te, &ts, &te); + + return (&te); +} + +int +main(int argc, char **argv) +{ +#ifdef EVENT__HAVE_SETRLIMIT + struct rlimit rl; +#endif + int i, c; + struct timeval *tv; + evutil_socket_t *cp; + +#ifdef _WIN32 + WSADATA WSAData; + WSAStartup(0x101, &WSAData); +#endif + num_pipes = 100; + num_active = 1; + num_writes = num_pipes; + while ((c = getopt(argc, argv, "n:a:w:")) != -1) { + switch (c) { + case 'n': + num_pipes = atoi(optarg); + break; + case 'a': + num_active = atoi(optarg); + break; + case 'w': + num_writes = atoi(optarg); + break; + default: + fprintf(stderr, "Illegal argument \"%c\"\n", c); + exit(1); + } + } + +#ifdef EVENT__HAVE_SETRLIMIT + rl.rlim_cur = rl.rlim_max = num_pipes * 2 + 50; + if (setrlimit(RLIMIT_NOFILE, &rl) == -1) { + perror("setrlimit"); + exit(1); + } +#endif + + events = calloc(num_pipes, sizeof(struct event)); + pipes = calloc(num_pipes * 2, sizeof(evutil_socket_t)); + if (events == NULL || pipes == NULL) { + perror("malloc"); + exit(1); + } + + event_init(); + + for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { +#ifdef USE_PIPES + if (pipe(cp) == -1) { +#else + if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, cp) == -1) { +#endif + perror("pipe"); + exit(1); + } + } + + for (i = 0; i < 25; i++) { + tv = run_once(); + if (tv == NULL) + exit(1); + fprintf(stdout, "%ld\n", + tv->tv_sec * 1000000L + tv->tv_usec); + } + + exit(0); +} diff --git a/probe-busybox/libevent-2.1.11-stable/test/bench_cascade.c b/probe-busybox/libevent-2.1.11-stable/test/bench_cascade.c new file mode 100644 index 00000000..29a3203e --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/bench_cascade.c @@ -0,0 +1,188 @@ +/* + * Copyright 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "event2/event-config.h" + +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#include +#else /* _WIN32 */ +#include +#include +#endif +#include +#include +#include +#include +#include +#ifdef EVENT__HAVE_UNISTD_H +#include +#endif +#include +#include +#include + +/* + * This benchmark tests how quickly we can propagate a write down a chain + * of socket pairs. We start by writing to the first socket pair and all + * events will fire subsequently until the last socket pair has been reached + * and the benchmark terminates. + */ + +static int fired; +static evutil_socket_t *pipes; +static struct event *events; + +static void +read_cb(evutil_socket_t fd, short which, void *arg) +{ + char ch; + evutil_socket_t sock = (evutil_socket_t)(ev_intptr_t)arg; + + (void) recv(fd, &ch, sizeof(ch), 0); + if (sock >= 0) { + if (send(sock, "e", 1, 0) < 0) + perror("send"); + } + fired++; +} + +static struct timeval * +run_once(int num_pipes) +{ + int i; + evutil_socket_t *cp; + static struct timeval ts, te, tv_timeout; + + events = (struct event *)calloc(num_pipes, sizeof(struct event)); + pipes = (evutil_socket_t *)calloc(num_pipes * 2, sizeof(evutil_socket_t)); + + if (events == NULL || pipes == NULL) { + perror("malloc"); + exit(1); + } + + for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { + if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, cp) == -1) { + perror("socketpair"); + exit(1); + } + } + + /* measurements includes event setup */ + evutil_gettimeofday(&ts, NULL); + + /* provide a default timeout for events */ + evutil_timerclear(&tv_timeout); + tv_timeout.tv_sec = 60; + + for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { + evutil_socket_t fd = i < num_pipes - 1 ? cp[3] : -1; + event_set(&events[i], cp[0], EV_READ, read_cb, + (void *)(ev_intptr_t)fd); + event_add(&events[i], &tv_timeout); + } + + fired = 0; + + /* kick everything off with a single write */ + if (send(pipes[1], "e", 1, 0) < 0) + perror("send"); + + event_dispatch(); + + evutil_gettimeofday(&te, NULL); + evutil_timersub(&te, &ts, &te); + + for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) { + event_del(&events[i]); + evutil_closesocket(cp[0]); + evutil_closesocket(cp[1]); + } + + free(pipes); + free(events); + + return (&te); +} + +int +main(int argc, char **argv) +{ +#ifdef EVENT__HAVE_SETRLIMIT + struct rlimit rl; +#endif + int i, c; + struct timeval *tv; + + int num_pipes = 100; +#ifdef _WIN32 + WSADATA WSAData; + WSAStartup(0x101, &WSAData); +#endif + + while ((c = getopt(argc, argv, "n:")) != -1) { + switch (c) { + case 'n': + num_pipes = atoi(optarg); + break; + default: + fprintf(stderr, "Illegal argument \"%c\"\n", c); + exit(1); + } + } + +#ifdef EVENT__HAVE_SETRLIMIT + rl.rlim_cur = rl.rlim_max = num_pipes * 2 + 50; + if (setrlimit(RLIMIT_NOFILE, &rl) == -1) { + perror("setrlimit"); + exit(1); + } +#endif + + event_init(); + + for (i = 0; i < 25; i++) { + tv = run_once(num_pipes); + if (tv == NULL) + exit(1); + fprintf(stdout, "%ld\n", + tv->tv_sec * 1000000L + tv->tv_usec); + } + +#ifdef _WIN32 + WSACleanup(); +#endif + + exit(0); +} diff --git a/probe-busybox/libevent-2.1.11-stable/test/bench_http.c b/probe-busybox/libevent-2.1.11-stable/test/bench_http.c new file mode 100644 index 00000000..80377ee6 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/bench_http.c @@ -0,0 +1,200 @@ +/* + * Copyright 2008-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#ifdef _WIN32 +#include +#else +#include +#include +#include +#include +#endif +#include +#include +#include +#include +#include +#include + +#include "event2/event.h" +#include "event2/buffer.h" +#include "event2/util.h" +#include "event2/http.h" +#include "event2/thread.h" + +static void http_basic_cb(struct evhttp_request *req, void *arg); + +static char *content; +static size_t content_len = 0; + +static void +http_basic_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *evb = evbuffer_new(); + + evbuffer_add(evb, content, content_len); + + /* allow sending of an empty reply */ + evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb); + + evbuffer_free(evb); +} + +#if LIBEVENT_VERSION_NUMBER >= 0x02000200 +static void +http_ref_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *evb = evbuffer_new(); + + evbuffer_add_reference(evb, content, content_len, NULL, NULL); + + /* allow sending of an empty reply */ + evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb); + + evbuffer_free(evb); +} +#endif + +int +main(int argc, char **argv) +{ + struct event_config *cfg = event_config_new(); + struct event_base *base; + struct evhttp *http; + int i; + int c; + int use_iocp = 0; + ev_uint16_t port = 8080; + char *endptr = NULL; + +#ifdef _WIN32 + WSADATA WSAData; + WSAStartup(0x101, &WSAData); +#else + if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) + return (1); +#endif + + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + for (i = 1; i < argc; ++i) { + if (*argv[i] != '-') + continue; + + c = argv[i][1]; + + if ((c == 'p' || c == 'l') && i + 1 >= argc) { + fprintf(stderr, "-%c requires argument.\n", c); + exit(1); + } + + switch (c) { + case 'p': + if (i+1 >= argc || !argv[i+1]) { + fprintf(stderr, "Missing port\n"); + exit(1); + } + port = (int)strtol(argv[i+1], &endptr, 10); + if (*endptr != '\0') { + fprintf(stderr, "Bad port\n"); + exit(1); + } + break; + case 'l': + if (i+1 >= argc || !argv[i+1]) { + fprintf(stderr, "Missing content length\n"); + exit(1); + } + content_len = (size_t)strtol(argv[i+1], &endptr, 10); + if (*endptr != '\0' || content_len == 0) { + fprintf(stderr, "Bad content length\n"); + exit(1); + } + break; +#ifdef _WIN32 + case 'i': + use_iocp = 1; +#ifdef EVTHREAD_USE_WINDOWS_THREADS_IMPLEMENTED + evthread_use_windows_threads(); +#endif + event_config_set_flag(cfg,EVENT_BASE_FLAG_STARTUP_IOCP); + break; +#endif + default: + fprintf(stderr, "Illegal argument \"%c\"\n", c); + exit(1); + } + } + + base = event_base_new_with_config(cfg); + if (!base) { + fprintf(stderr, "creating event_base failed. Exiting.\n"); + return 1; + } + + http = evhttp_new(base); + + content = malloc(content_len); + if (content == NULL) { + fprintf(stderr, "Cannot allocate content\n"); + exit(1); + } else { + int i = 0; + for (i = 0; i < (int)content_len; ++i) + content[i] = (i & 255); + } + + evhttp_set_cb(http, "/ind", http_basic_cb, NULL); + fprintf(stderr, "/ind - basic content (memory copy)\n"); + + evhttp_set_cb(http, "/ref", http_ref_cb, NULL); + fprintf(stderr, "/ref - basic content (reference)\n"); + + fprintf(stderr, "Serving %d bytes on port %d using %s\n", + (int)content_len, port, + use_iocp? "IOCP" : event_base_get_method(base)); + + evhttp_bind_socket(http, "0.0.0.0", port); + +#ifdef _WIN32 + if (use_iocp) { + struct timeval tv={99999999,0}; + event_base_loopexit(base, &tv); + } +#endif + event_base_dispatch(base); + +#ifdef _WIN32 + WSACleanup(); +#endif + + /* NOTREACHED */ + return (0); +} diff --git a/probe-busybox/libevent-2.1.11-stable/test/bench_httpclient.c b/probe-busybox/libevent-2.1.11-stable/test/bench_httpclient.c new file mode 100644 index 00000000..7c7ee470 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/bench_httpclient.c @@ -0,0 +1,240 @@ +/* + * Copyright 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* for EVUTIL_ERR_CONNECT_RETRIABLE macro */ +#include "util-internal.h" + +#include +#ifdef _WIN32 +#include +#else +#include +#include +# ifdef _XOPEN_SOURCE_EXTENDED +# include +# endif +#endif +#include +#include +#include + +#include "event2/event.h" +#include "event2/bufferevent.h" +#include "event2/buffer.h" +#include "event2/util.h" + +const char *resource = NULL; +struct event_base *base = NULL; + +int total_n_handled = 0; +int total_n_errors = 0; +int total_n_launched = 0; +size_t total_n_bytes = 0; +struct timeval total_time = {0,0}; +int n_errors = 0; + +const int PARALLELISM = 200; +const int N_REQUESTS = 20000; + +struct request_info { + size_t n_read; + struct timeval started; +}; + +static int launch_request(void); +static void readcb(struct bufferevent *b, void *arg); +static void errorcb(struct bufferevent *b, short what, void *arg); + +static void +readcb(struct bufferevent *b, void *arg) +{ + struct request_info *ri = arg; + struct evbuffer *input = bufferevent_get_input(b); + size_t n = evbuffer_get_length(input); + + ri->n_read += n; + evbuffer_drain(input, n); +} + +static void +errorcb(struct bufferevent *b, short what, void *arg) +{ + struct request_info *ri = arg; + struct timeval now, diff; + if (what & BEV_EVENT_EOF) { + ++total_n_handled; + total_n_bytes += ri->n_read; + evutil_gettimeofday(&now, NULL); + evutil_timersub(&now, &ri->started, &diff); + evutil_timeradd(&diff, &total_time, &total_time); + + if (total_n_handled && (total_n_handled%1000)==0) + printf("%d requests done\n",total_n_handled); + + if (total_n_launched < N_REQUESTS) { + if (launch_request() < 0) + perror("Can't launch"); + } + } else { + ++total_n_errors; + perror("Unexpected error"); + } + + bufferevent_setcb(b, NULL, NULL, NULL, NULL); + free(ri); + bufferevent_disable(b, EV_READ|EV_WRITE); + bufferevent_free(b); +} + +static void +frob_socket(evutil_socket_t sock) +{ +#ifdef EVENT__HAVE_STRUCT_LINGER + struct linger l; +#endif + int one = 1; + if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void*)&one, sizeof(one))<0) + perror("setsockopt(SO_REUSEADDR)"); +#ifdef EVENT__HAVE_STRUCT_LINGER + l.l_onoff = 1; + l.l_linger = 0; + if (setsockopt(sock, SOL_SOCKET, SO_LINGER, (void*)&l, sizeof(l))<0) + perror("setsockopt(SO_LINGER)"); +#endif +} + +static int +launch_request(void) +{ + evutil_socket_t sock; + struct sockaddr_in sin; + struct bufferevent *b; + + struct request_info *ri; + + memset(&sin, 0, sizeof(sin)); + + ++total_n_launched; + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = htonl(0x7f000001); + sin.sin_port = htons(8080); + if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) + return -1; + if (evutil_make_socket_nonblocking(sock) < 0) { + evutil_closesocket(sock); + return -1; + } + frob_socket(sock); + if (connect(sock, (struct sockaddr*)&sin, sizeof(sin)) < 0) { + int e = evutil_socket_geterror(sock); + if (! EVUTIL_ERR_CONNECT_RETRIABLE(e)) { + evutil_closesocket(sock); + return -1; + } + } + + ri = malloc(sizeof(*ri)); + ri->n_read = 0; + evutil_gettimeofday(&ri->started, NULL); + + b = bufferevent_socket_new(base, sock, BEV_OPT_CLOSE_ON_FREE); + + bufferevent_setcb(b, readcb, NULL, errorcb, ri); + bufferevent_enable(b, EV_READ|EV_WRITE); + + evbuffer_add_printf(bufferevent_get_output(b), + "GET %s HTTP/1.0\r\n\r\n", resource); + + return 0; +} + + +int +main(int argc, char **argv) +{ + int i; + struct timeval start, end, total; + long long usec; + double throughput; + +#ifdef _WIN32 + WSADATA WSAData; + WSAStartup(0x101, &WSAData); +#endif + + resource = "/ref"; + + setvbuf(stdout, NULL, _IONBF, 0); + + base = event_base_new(); + + for (i=0; i < PARALLELISM; ++i) { + if (launch_request() < 0) + perror("launch"); + } + + evutil_gettimeofday(&start, NULL); + + event_base_dispatch(base); + + evutil_gettimeofday(&end, NULL); + evutil_timersub(&end, &start, &total); + usec = total_time.tv_sec * (long long)1000000 + total_time.tv_usec; + + if (!total_n_handled) { + puts("Nothing worked. You probably did something dumb."); + return 0; + } + + + throughput = total_n_handled / + (total.tv_sec+ ((double)total.tv_usec)/1000000.0); + +#ifdef _WIN32 +#define I64_FMT "%I64d" +#define I64_TYP __int64 +#else +#define I64_FMT "%lld" +#define I64_TYP long long int +#endif + + printf("\n%d requests in %d.%06d sec. (%.2f throughput)\n" + "Each took about %.02f msec latency\n" + I64_FMT "bytes read. %d errors.\n", + total_n_handled, + (int)total.tv_sec, (int)total.tv_usec, + throughput, + (double)(usec/1000) / total_n_handled, + (I64_TYP)total_n_bytes, n_errors); + +#ifdef _WIN32 + WSACleanup(); +#endif + + return 0; +} diff --git a/probe-busybox/libevent-2.1.11-stable/test/check-dumpevents.py b/probe-busybox/libevent-2.1.11-stable/test/check-dumpevents.py new file mode 100755 index 00000000..3e1df30c --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/check-dumpevents.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# +# Post-process the output of test-dumpevents and check it for correctness. +# + +import math +import re +import sys + +text = sys.stdin.readlines() + +try: + expect_inserted_pos = text.index("Inserted:\n") + expect_active_pos = text.index("Active:\n") + got_inserted_pos = text.index("Inserted events:\n") + got_active_pos = text.index("Active events:\n") +except ValueError: + sys.stderr.write("Missing expected dividing line in dumpevents output") + sys.exit(1) + +if not (expect_inserted_pos < expect_active_pos < + got_inserted_pos < got_active_pos): + sys.stderr.write("Sections out of order in dumpevents output") + sys.exit(1) + +now,T= text[1].split() +T = float(T) + +want_inserted = set(text[expect_inserted_pos+1:expect_active_pos]) +want_active = set(text[expect_active_pos+1:got_inserted_pos-1]) +got_inserted = set(text[got_inserted_pos+1:got_active_pos]) +got_active = set(text[got_active_pos+1:]) + +pat = re.compile(r'Timeout=([0-9\.]+)') +def replace_time(m): + t = float(m.group(1)) + if .9 < abs(t-T) < 1.1: + return "Timeout=T+1" + elif 2.4 < abs(t-T) < 2.6: + return "Timeout=T+2.5" + else: + return m.group(0) + +cleaned_inserted = set( pat.sub(replace_time, s) for s in got_inserted + if "Internal" not in s) + +if cleaned_inserted != want_inserted: + sys.stderr.write("Inserted event lists were not as expected!") + sys.exit(1) + +if set(got_active) != set(want_active): + sys.stderr.write("Active event lists were not as expected!") + sys.exit(1) + diff --git a/probe-busybox/libevent-2.1.11-stable/test/include.am b/probe-busybox/libevent-2.1.11-stable/test/include.am new file mode 100644 index 00000000..04375247 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/include.am @@ -0,0 +1,176 @@ +# test/Makefile.am for libevent +# Copyright 2000-2007 Niels Provos +# Copyright 2007-2012 Niels Provos and Nick Mathewson +# +# See LICENSE for copying information. + +regress_CPPFLAGS = -DTINYTEST_LOCAL + +EXTRA_DIST+= \ + test/check-dumpevents.py \ + test/regress.gen.c \ + test/regress.gen.h \ + test/regress.rpc \ + test/rpcgen_wrapper.sh \ + test/print-winsock-errors.c \ + test/test.sh + +TESTPROGRAMS = \ + test/bench \ + test/bench_cascade \ + test/bench_http \ + test/bench_httpclient \ + test/test-changelist \ + test/test-dumpevents \ + test/test-eof \ + test/test-closed \ + test/test-fdleak \ + test/test-init \ + test/test-ratelim \ + test/test-time \ + test/test-weof \ + test/regress + +if BUILD_REGRESS +noinst_PROGRAMS += $(TESTPROGRAMS) +EXTRA_PROGRAMS+= test/regress +endif + +noinst_HEADERS+= \ + test/regress.h \ + test/regress_thread.h \ + test/tinytest.h \ + test/tinytest_local.h \ + test/tinytest_macros.h + +TESTS = \ + test_runner_epoll \ + test_runner_select \ + test_runner_kqueue \ + test_runner_evport \ + test_runner_devpoll \ + test_runner_poll \ + test_runner_win32 \ + test_runner_timerfd \ + test_runner_changelist \ + test_runner_timerfd_changelist +LOG_COMPILER = true +TESTS_COMPILER = true + +test_runner_epoll: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b EPOLL +test_runner_select: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b SELECT +test_runner_kqueue: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b KQUEUE +test_runner_evport: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b EVPORT +test_runner_devpoll: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b DEVPOLL +test_runner_poll: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b POLL +test_runner_win32: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b WIN32 +test_runner_timerfd: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b "" -t +test_runner_changelist: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b "" -c +test_runner_timerfd_changelist: $(top_srcdir)/test/test.sh + $(top_srcdir)/test/test.sh -b "" -T + +DISTCLEANFILES += test/regress.gen.c test/regress.gen.h + +if BUILD_REGRESS +BUILT_SOURCES += test/regress.gen.c test/regress.gen.h +endif + +test_test_init_SOURCES = test/test-init.c +test_test_init_LDADD = libevent_core.la +test_test_dumpevents_SOURCES = test/test-dumpevents.c +test_test_dumpevents_LDADD = libevent_core.la +test_test_eof_SOURCES = test/test-eof.c +test_test_eof_LDADD = libevent_core.la +test_test_closed_SOURCES = test/test-closed.c +test_test_closed_LDADD = libevent_core.la +test_test_changelist_SOURCES = test/test-changelist.c +test_test_changelist_LDADD = libevent_core.la +test_test_weof_SOURCES = test/test-weof.c +test_test_weof_LDADD = libevent_core.la +test_test_time_SOURCES = test/test-time.c +test_test_time_LDADD = libevent_core.la +test_test_ratelim_SOURCES = test/test-ratelim.c +test_test_ratelim_LDADD = libevent_core.la -lm +test_test_fdleak_SOURCES = test/test-fdleak.c +test_test_fdleak_LDADD = libevent_core.la + +test_regress_SOURCES = \ + test/regress.c \ + test/regress.gen.c \ + test/regress.gen.h \ + test/regress_buffer.c \ + test/regress_bufferevent.c \ + test/regress_dns.c \ + test/regress_et.c \ + test/regress_finalize.c \ + test/regress_http.c \ + test/regress_listener.c \ + test/regress_main.c \ + test/regress_minheap.c \ + test/regress_rpc.c \ + test/regress_testutils.c \ + test/regress_testutils.h \ + test/regress_util.c \ + test/tinytest.c \ + $(regress_thread_SOURCES) \ + $(regress_zlib_SOURCES) + +if PTHREADS +regress_thread_SOURCES = test/regress_thread.c +PTHREAD_LIBS += libevent_pthreads.la +endif +if BUILD_WIN32 +if THREADS +regress_thread_SOURCES = test/regress_thread.c +endif +endif +if ZLIB_REGRESS +regress_zlib_SOURCES = test/regress_zlib.c +endif +if BUILD_WIN32 +test_regress_SOURCES += test/regress_iocp.c +endif + +test_regress_LDADD = $(LIBEVENT_GC_SECTIONS) libevent_core.la libevent_extra.la $(PTHREAD_LIBS) $(ZLIB_LIBS) +test_regress_CPPFLAGS = $(AM_CPPFLAGS) $(PTHREAD_CFLAGS) $(ZLIB_CFLAGS) -Itest +test_regress_LDFLAGS = $(PTHREAD_CFLAGS) + +if OPENSSL +test_regress_SOURCES += test/regress_ssl.c +test_regress_CPPFLAGS += $(OPENSSL_INCS) +test_regress_LDADD += libevent_openssl.la $(OPENSSL_LIBS) ${OPENSSL_LIBADD} +endif + +test_bench_SOURCES = test/bench.c +test_bench_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la +test_bench_cascade_SOURCES = test/bench_cascade.c +test_bench_cascade_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la +test_bench_http_SOURCES = test/bench_http.c +test_bench_http_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la +test_bench_httpclient_SOURCES = test/bench_httpclient.c +test_bench_httpclient_LDADD = $(LIBEVENT_GC_SECTIONS) libevent_core.la + +test/regress.gen.c test/regress.gen.h: test/rpcgen-attempted + +test/rpcgen-attempted: test/regress.rpc event_rpcgen.py test/rpcgen_wrapper.sh + $(AM_V_GEN)date -u > $@ + $(AM_V_at)if $(srcdir)/test/rpcgen_wrapper.sh $(srcdir)/test; then \ + true; \ + else \ + echo "No Python installed; stubbing out RPC test." >&2; \ + echo " "> test/regress.gen.c; \ + echo "#define NO_PYTHON_EXISTS" > test/regress.gen.h; \ + fi + +CLEANFILES += test/rpcgen-attempted + +$(TESTPROGRAMS) : libevent.la diff --git a/probe-busybox/libevent-2.1.11-stable/test/print-winsock-errors.c b/probe-busybox/libevent-2.1.11-stable/test/print-winsock-errors.c new file mode 100644 index 00000000..64d6b0e7 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/print-winsock-errors.c @@ -0,0 +1,86 @@ +#include +#include + +#include +#include + +#include "event2/event.h" +#include "event2/util.h" +#include "event2/thread.h" + +#define E(x) printf (#x " -> \"%s\"\n", evutil_socket_error_to_string (x)); + +int main (int argc, char **argv) +{ + int i, j; + const char *s1, *s2; + +#ifdef EVTHREAD_USE_WINDOWS_THREADS_IMPLEMENTED + evthread_use_windows_threads (); +#endif + + s1 = evutil_socket_error_to_string (WSAEINTR); + + for (i = 0; i < 3; i++) { + printf ("\niteration %d:\n\n", i); + E(WSAEINTR); + E(WSAEACCES); + E(WSAEFAULT); + E(WSAEINVAL); + E(WSAEMFILE); + E(WSAEWOULDBLOCK); + E(WSAEINPROGRESS); + E(WSAEALREADY); + E(WSAENOTSOCK); + E(WSAEDESTADDRREQ); + E(WSAEMSGSIZE); + E(WSAEPROTOTYPE); + E(WSAENOPROTOOPT); + E(WSAEPROTONOSUPPORT); + E(WSAESOCKTNOSUPPORT); + E(WSAEOPNOTSUPP); + E(WSAEPFNOSUPPORT); + E(WSAEAFNOSUPPORT); + E(WSAEADDRINUSE); + E(WSAEADDRNOTAVAIL); + E(WSAENETDOWN); + E(WSAENETUNREACH); + E(WSAENETRESET); + E(WSAECONNABORTED); + E(WSAECONNRESET); + E(WSAENOBUFS); + E(WSAEISCONN); + E(WSAENOTCONN); + E(WSAESHUTDOWN); + E(WSAETIMEDOUT); + E(WSAECONNREFUSED); + E(WSAEHOSTDOWN); + E(WSAEHOSTUNREACH); + E(WSAEPROCLIM); + E(WSASYSNOTREADY); + E(WSAVERNOTSUPPORTED); + E(WSANOTINITIALISED); + E(WSAEDISCON); + E(WSATYPE_NOT_FOUND); + E(WSAHOST_NOT_FOUND); + E(WSATRY_AGAIN); + E(WSANO_RECOVERY); + E(WSANO_DATA); + E(0xdeadbeef); /* test the case where no message is available */ + + /* fill up the hash table a bit to make sure it grows properly */ + for (j = 0; j < 50; j++) { + int err; + evutil_secure_rng_get_bytes(&err, sizeof(err)); + evutil_socket_error_to_string(err); + } + } + + s2 = evutil_socket_error_to_string (WSAEINTR); + if (s1 != s2) + printf ("caching failed!\n"); + + libevent_global_shutdown (); + + return EXIT_SUCCESS; +} diff --git a/probe-busybox/libevent-2.1.11-stable/test/regress.c b/probe-busybox/libevent-2.1.11-stable/test/regress.c new file mode 100644 index 00000000..0ebadcb9 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/regress.c @@ -0,0 +1,3541 @@ +/* + * Copyright (c) 2003-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "util-internal.h" + +#ifdef _WIN32 +#include +#include +#endif + +#ifdef EVENT__HAVE_PTHREADS +#include +#endif + +#include "event2/event-config.h" + +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#ifndef _WIN32 +#include +#include +#include +#include +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#include "event2/event.h" +#include "event2/event_struct.h" +#include "event2/event_compat.h" +#include "event2/tag.h" +#include "event2/buffer.h" +#include "event2/buffer_compat.h" +#include "event2/util.h" +#include "event-internal.h" +#include "evthread-internal.h" +#include "log-internal.h" +#include "time-internal.h" + +#include "regress.h" + +#ifndef _WIN32 +#include "regress.gen.h" +#endif + +evutil_socket_t pair[2]; +int test_ok; +int called; +struct event_base *global_base; + +static char wbuf[4096]; +static char rbuf[4096]; +static int woff; +static int roff; +static int usepersist; +static struct timeval tset; +static struct timeval tcalled; + + +#define TEST1 "this is a test" + +#ifdef _WIN32 +#define write(fd,buf,len) send((fd),(buf),(int)(len),0) +#define read(fd,buf,len) recv((fd),(buf),(int)(len),0) +#endif + +struct basic_cb_args +{ + struct event_base *eb; + struct event *ev; + unsigned int callcount; +}; + +static void +simple_read_cb(evutil_socket_t fd, short event, void *arg) +{ + char buf[256]; + int len; + + len = read(fd, buf, sizeof(buf)); + + if (len) { + if (!called) { + if (event_add(arg, NULL) == -1) + exit(1); + } + } else if (called == 1) + test_ok = 1; + + called++; +} + +static void +basic_read_cb(evutil_socket_t fd, short event, void *data) +{ + char buf[256]; + int len; + struct basic_cb_args *arg = data; + + len = read(fd, buf, sizeof(buf)); + + if (len < 0) { + tt_fail_perror("read (callback)"); + } else { + switch (arg->callcount++) { + case 0: /* first call: expect to read data; cycle */ + if (len > 0) + return; + + tt_fail_msg("EOF before data read"); + break; + + case 1: /* second call: expect EOF; stop */ + if (len > 0) + tt_fail_msg("not all data read on first cycle"); + break; + + default: /* third call: should not happen */ + tt_fail_msg("too many cycles"); + } + } + + event_del(arg->ev); + event_base_loopexit(arg->eb, NULL); +} + +static void +dummy_read_cb(evutil_socket_t fd, short event, void *arg) +{ +} + +static void +simple_write_cb(evutil_socket_t fd, short event, void *arg) +{ + int len; + + len = write(fd, TEST1, strlen(TEST1) + 1); + if (len == -1) + test_ok = 0; + else + test_ok = 1; +} + +static void +multiple_write_cb(evutil_socket_t fd, short event, void *arg) +{ + struct event *ev = arg; + int len; + + len = 128; + if (woff + len >= (int)sizeof(wbuf)) + len = sizeof(wbuf) - woff; + + len = write(fd, wbuf + woff, len); + if (len == -1) { + fprintf(stderr, "%s: write\n", __func__); + if (usepersist) + event_del(ev); + return; + } + + woff += len; + + if (woff >= (int)sizeof(wbuf)) { + shutdown(fd, EVUTIL_SHUT_WR); + if (usepersist) + event_del(ev); + return; + } + + if (!usepersist) { + if (event_add(ev, NULL) == -1) + exit(1); + } +} + +static void +multiple_read_cb(evutil_socket_t fd, short event, void *arg) +{ + struct event *ev = arg; + int len; + + len = read(fd, rbuf + roff, sizeof(rbuf) - roff); + if (len == -1) + fprintf(stderr, "%s: read\n", __func__); + if (len <= 0) { + if (usepersist) + event_del(ev); + return; + } + + roff += len; + if (!usepersist) { + if (event_add(ev, NULL) == -1) + exit(1); + } +} + +static void +timeout_cb(evutil_socket_t fd, short event, void *arg) +{ + evutil_gettimeofday(&tcalled, NULL); +} + +struct both { + struct event ev; + int nread; +}; + +static void +combined_read_cb(evutil_socket_t fd, short event, void *arg) +{ + struct both *both = arg; + char buf[128]; + int len; + + len = read(fd, buf, sizeof(buf)); + if (len == -1) + fprintf(stderr, "%s: read\n", __func__); + if (len <= 0) + return; + + both->nread += len; + if (event_add(&both->ev, NULL) == -1) + exit(1); +} + +static void +combined_write_cb(evutil_socket_t fd, short event, void *arg) +{ + struct both *both = arg; + char buf[128]; + int len; + + len = sizeof(buf); + if (len > both->nread) + len = both->nread; + + memset(buf, 'q', len); + + len = write(fd, buf, len); + if (len == -1) + fprintf(stderr, "%s: write\n", __func__); + if (len <= 0) { + shutdown(fd, EVUTIL_SHUT_WR); + return; + } + + both->nread -= len; + if (event_add(&both->ev, NULL) == -1) + exit(1); +} + +/* These macros used to replicate the work of the legacy test wrapper code */ +#define setup_test(x) do { \ + if (!in_legacy_test_wrapper) { \ + TT_FAIL(("Legacy test %s not wrapped properly", x)); \ + return; \ + } \ + } while (0) +#define cleanup_test() setup_test("cleanup") + +static void +test_simpleread(void) +{ + struct event ev; + + /* Very simple read test */ + setup_test("Simple read: "); + + if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { + tt_fail_perror("write"); + } + + shutdown(pair[0], EVUTIL_SHUT_WR); + + event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev); + if (event_add(&ev, NULL) == -1) + exit(1); + event_dispatch(); + + cleanup_test(); +} + +static void +test_simplewrite(void) +{ + struct event ev; + + /* Very simple write test */ + setup_test("Simple write: "); + + event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev); + if (event_add(&ev, NULL) == -1) + exit(1); + event_dispatch(); + + cleanup_test(); +} + +static void +simpleread_multiple_cb(evutil_socket_t fd, short event, void *arg) +{ + if (++called == 2) + test_ok = 1; +} + +static void +test_simpleread_multiple(void) +{ + struct event one, two; + + /* Very simple read test */ + setup_test("Simple read to multiple evens: "); + + if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { + tt_fail_perror("write"); + } + + shutdown(pair[0], EVUTIL_SHUT_WR); + + event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL); + if (event_add(&one, NULL) == -1) + exit(1); + event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL); + if (event_add(&two, NULL) == -1) + exit(1); + event_dispatch(); + + cleanup_test(); +} + +static int have_closed = 0; +static int premature_event = 0; +static void +simpleclose_close_fd_cb(evutil_socket_t s, short what, void *ptr) +{ + evutil_socket_t **fds = ptr; + TT_BLATHER(("Closing")); + evutil_closesocket(*fds[0]); + evutil_closesocket(*fds[1]); + *fds[0] = -1; + *fds[1] = -1; + have_closed = 1; +} + +static void +record_event_cb(evutil_socket_t s, short what, void *ptr) +{ + short *whatp = ptr; + if (!have_closed) + premature_event = 1; + *whatp = what; + TT_BLATHER(("Recorded %d on socket %d", (int)what, (int)s)); +} + +static void +test_simpleclose(void *ptr) +{ + /* Test that a close of FD is detected as a read and as a write. */ + struct event_base *base = event_base_new(); + evutil_socket_t pair1[2]={-1,-1}, pair2[2] = {-1, -1}; + evutil_socket_t *to_close[2]; + struct event *rev=NULL, *wev=NULL, *closeev=NULL; + struct timeval tv; + short got_read_on_close = 0, got_write_on_close = 0; + char buf[1024]; + memset(buf, 99, sizeof(buf)); +#ifdef _WIN32 +#define LOCAL_SOCKETPAIR_AF AF_INET +#else +#define LOCAL_SOCKETPAIR_AF AF_UNIX +#endif + if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair1)<0) + TT_DIE(("socketpair: %s", strerror(errno))); + if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair2)<0) + TT_DIE(("socketpair: %s", strerror(errno))); + if (evutil_make_socket_nonblocking(pair1[1]) < 0) + TT_DIE(("make_socket_nonblocking")); + if (evutil_make_socket_nonblocking(pair2[1]) < 0) + TT_DIE(("make_socket_nonblocking")); + + /** Stuff pair2[1] full of data, until write fails */ + while (1) { + int r = write(pair2[1], buf, sizeof(buf)); + if (r<0) { + int err = evutil_socket_geterror(pair2[1]); + if (! EVUTIL_ERR_RW_RETRIABLE(err)) + TT_DIE(("write failed strangely: %s", + evutil_socket_error_to_string(err))); + break; + } + } + to_close[0] = &pair1[0]; + to_close[1] = &pair2[0]; + + closeev = event_new(base, -1, EV_TIMEOUT, simpleclose_close_fd_cb, + to_close); + rev = event_new(base, pair1[1], EV_READ, record_event_cb, + &got_read_on_close); + TT_BLATHER(("Waiting for read on %d", (int)pair1[1])); + wev = event_new(base, pair2[1], EV_WRITE, record_event_cb, + &got_write_on_close); + TT_BLATHER(("Waiting for write on %d", (int)pair2[1])); + tv.tv_sec = 0; + tv.tv_usec = 100*1000; /* Close pair1[0] after a little while, and make + * sure we get a read event. */ + event_add(closeev, &tv); + event_add(rev, NULL); + event_add(wev, NULL); + /* Don't let the test go on too long. */ + tv.tv_sec = 0; + tv.tv_usec = 200*1000; + event_base_loopexit(base, &tv); + event_base_loop(base, 0); + + tt_int_op(got_read_on_close, ==, EV_READ); + tt_int_op(got_write_on_close, ==, EV_WRITE); + tt_int_op(premature_event, ==, 0); + +end: + if (pair1[0] >= 0) + evutil_closesocket(pair1[0]); + if (pair1[1] >= 0) + evutil_closesocket(pair1[1]); + if (pair2[0] >= 0) + evutil_closesocket(pair2[0]); + if (pair2[1] >= 0) + evutil_closesocket(pair2[1]); + if (rev) + event_free(rev); + if (wev) + event_free(wev); + if (closeev) + event_free(closeev); + if (base) + event_base_free(base); +} + + +static void +test_multiple(void) +{ + struct event ev, ev2; + int i; + + /* Multiple read and write test */ + setup_test("Multiple read/write: "); + memset(rbuf, 0, sizeof(rbuf)); + for (i = 0; i < (int)sizeof(wbuf); i++) + wbuf[i] = i; + + roff = woff = 0; + usepersist = 0; + + event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev); + if (event_add(&ev, NULL) == -1) + exit(1); + event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2); + if (event_add(&ev2, NULL) == -1) + exit(1); + event_dispatch(); + + if (roff == woff) + test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0; + + cleanup_test(); +} + +static void +test_persistent(void) +{ + struct event ev, ev2; + int i; + + /* Multiple read and write test with persist */ + setup_test("Persist read/write: "); + memset(rbuf, 0, sizeof(rbuf)); + for (i = 0; i < (int)sizeof(wbuf); i++) + wbuf[i] = i; + + roff = woff = 0; + usepersist = 1; + + event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev); + if (event_add(&ev, NULL) == -1) + exit(1); + event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2); + if (event_add(&ev2, NULL) == -1) + exit(1); + event_dispatch(); + + if (roff == woff) + test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0; + + cleanup_test(); +} + +static void +test_combined(void) +{ + struct both r1, r2, w1, w2; + + setup_test("Combined read/write: "); + memset(&r1, 0, sizeof(r1)); + memset(&r2, 0, sizeof(r2)); + memset(&w1, 0, sizeof(w1)); + memset(&w2, 0, sizeof(w2)); + + w1.nread = 4096; + w2.nread = 8192; + + event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1); + event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1); + event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2); + event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2); + tt_assert(event_add(&r1.ev, NULL) != -1); + tt_assert(!event_add(&w1.ev, NULL)); + tt_assert(!event_add(&r2.ev, NULL)); + tt_assert(!event_add(&w2.ev, NULL)); + event_dispatch(); + + if (r1.nread == 8192 && r2.nread == 4096) + test_ok = 1; + +end: + cleanup_test(); +} + +static void +test_simpletimeout(void) +{ + struct timeval tv; + struct event ev; + + setup_test("Simple timeout: "); + + tv.tv_usec = 200*1000; + tv.tv_sec = 0; + evutil_timerclear(&tcalled); + evtimer_set(&ev, timeout_cb, NULL); + evtimer_add(&ev, &tv); + + evutil_gettimeofday(&tset, NULL); + event_dispatch(); + test_timeval_diff_eq(&tset, &tcalled, 200); + + test_ok = 1; +end: + cleanup_test(); +} + +static void +periodic_timeout_cb(evutil_socket_t fd, short event, void *arg) +{ + int *count = arg; + + (*count)++; + if (*count == 6) { + /* call loopexit only once - on slow machines(?), it is + * apparently possible for this to get called twice. */ + test_ok = 1; + event_base_loopexit(global_base, NULL); + } +} + +static void +test_persistent_timeout(void) +{ + struct timeval tv; + struct event ev; + int count = 0; + + evutil_timerclear(&tv); + tv.tv_usec = 10000; + + event_assign(&ev, global_base, -1, EV_TIMEOUT|EV_PERSIST, + periodic_timeout_cb, &count); + event_add(&ev, &tv); + + event_dispatch(); + + event_del(&ev); +} + +static void +test_persistent_timeout_jump(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event ev; + int count = 0; + struct timeval msec100 = { 0, 100 * 1000 }; + struct timeval msec50 = { 0, 50 * 1000 }; + struct timeval msec300 = { 0, 300 * 1000 }; + + event_assign(&ev, data->base, -1, EV_PERSIST, periodic_timeout_cb, &count); + event_add(&ev, &msec100); + /* Wait for a bit */ + evutil_usleep_(&msec300); + event_base_loopexit(data->base, &msec50); + event_base_dispatch(data->base); + tt_int_op(count, ==, 1); + +end: + event_del(&ev); +} + +struct persist_active_timeout_called { + int n; + short events[16]; + struct timeval tvs[16]; +}; + +static void +activate_cb(evutil_socket_t fd, short event, void *arg) +{ + struct event *ev = arg; + event_active(ev, EV_READ, 1); +} + +static void +persist_active_timeout_cb(evutil_socket_t fd, short event, void *arg) +{ + struct persist_active_timeout_called *c = arg; + if (c->n < 15) { + c->events[c->n] = event; + evutil_gettimeofday(&c->tvs[c->n], NULL); + ++c->n; + } +} + +static void +test_persistent_active_timeout(void *ptr) +{ + struct timeval tv, tv2, tv_exit, start; + struct event ev; + struct persist_active_timeout_called res; + + struct basic_test_data *data = ptr; + struct event_base *base = data->base; + + memset(&res, 0, sizeof(res)); + + tv.tv_sec = 0; + tv.tv_usec = 200 * 1000; + event_assign(&ev, base, -1, EV_TIMEOUT|EV_PERSIST, + persist_active_timeout_cb, &res); + event_add(&ev, &tv); + + tv2.tv_sec = 0; + tv2.tv_usec = 100 * 1000; + event_base_once(base, -1, EV_TIMEOUT, activate_cb, &ev, &tv2); + + tv_exit.tv_sec = 0; + tv_exit.tv_usec = 600 * 1000; + event_base_loopexit(base, &tv_exit); + + event_base_assert_ok_(base); + evutil_gettimeofday(&start, NULL); + + event_base_dispatch(base); + event_base_assert_ok_(base); + + tt_int_op(res.n, ==, 3); + tt_int_op(res.events[0], ==, EV_READ); + tt_int_op(res.events[1], ==, EV_TIMEOUT); + tt_int_op(res.events[2], ==, EV_TIMEOUT); + test_timeval_diff_eq(&start, &res.tvs[0], 100); + test_timeval_diff_eq(&start, &res.tvs[1], 300); + test_timeval_diff_eq(&start, &res.tvs[2], 500); +end: + event_del(&ev); +} + +struct common_timeout_info { + struct event ev; + struct timeval called_at; + int which; + int count; +}; + +static void +common_timeout_cb(evutil_socket_t fd, short event, void *arg) +{ + struct common_timeout_info *ti = arg; + ++ti->count; + evutil_gettimeofday(&ti->called_at, NULL); + if (ti->count >= 4) + event_del(&ti->ev); +} + +static void +test_common_timeout(void *ptr) +{ + struct basic_test_data *data = ptr; + + struct event_base *base = data->base; + int i; + struct common_timeout_info info[100]; + + struct timeval start; + struct timeval tmp_100_ms = { 0, 100*1000 }; + struct timeval tmp_200_ms = { 0, 200*1000 }; + struct timeval tmp_5_sec = { 5, 0 }; + struct timeval tmp_5M_usec = { 0, 5*1000*1000 }; + + const struct timeval *ms_100, *ms_200, *sec_5; + + ms_100 = event_base_init_common_timeout(base, &tmp_100_ms); + ms_200 = event_base_init_common_timeout(base, &tmp_200_ms); + sec_5 = event_base_init_common_timeout(base, &tmp_5_sec); + tt_assert(ms_100); + tt_assert(ms_200); + tt_assert(sec_5); + tt_ptr_op(event_base_init_common_timeout(base, &tmp_200_ms), + ==, ms_200); + tt_ptr_op(event_base_init_common_timeout(base, ms_200), ==, ms_200); + tt_ptr_op(event_base_init_common_timeout(base, &tmp_5M_usec), ==, sec_5); + tt_int_op(ms_100->tv_sec, ==, 0); + tt_int_op(ms_200->tv_sec, ==, 0); + tt_int_op(sec_5->tv_sec, ==, 5); + tt_int_op(ms_100->tv_usec, ==, 100000|0x50000000); + tt_int_op(ms_200->tv_usec, ==, 200000|0x50100000); + tt_int_op(sec_5->tv_usec, ==, 0|0x50200000); + + memset(info, 0, sizeof(info)); + + for (i=0; i<100; ++i) { + info[i].which = i; + event_assign(&info[i].ev, base, -1, EV_TIMEOUT|EV_PERSIST, + common_timeout_cb, &info[i]); + if (i % 2) { + if ((i%20)==1) { + /* Glass-box test: Make sure we survive the + * transition to non-common timeouts. It's + * a little tricky. */ + event_add(&info[i].ev, ms_200); + event_add(&info[i].ev, &tmp_100_ms); + } else if ((i%20)==3) { + /* Check heap-to-common too. */ + event_add(&info[i].ev, &tmp_200_ms); + event_add(&info[i].ev, ms_100); + } else if ((i%20)==5) { + /* Also check common-to-common. */ + event_add(&info[i].ev, ms_200); + event_add(&info[i].ev, ms_100); + } else { + event_add(&info[i].ev, ms_100); + } + } else { + event_add(&info[i].ev, ms_200); + } + } + + event_base_assert_ok_(base); + evutil_gettimeofday(&start, NULL); + event_base_dispatch(base); + + event_base_assert_ok_(base); + + for (i=0; i<10; ++i) { + tt_int_op(info[i].count, ==, 4); + if (i % 2) { + test_timeval_diff_eq(&start, &info[i].called_at, 400); + } else { + test_timeval_diff_eq(&start, &info[i].called_at, 800); + } + } + + /* Make sure we can free the base with some events in. */ + for (i=0; i<100; ++i) { + if (i % 2) { + event_add(&info[i].ev, ms_100); + } else { + event_add(&info[i].ev, ms_200); + } + } + +end: + event_base_free(data->base); /* need to do this here before info is + * out-of-scope */ + data->base = NULL; +} + +#ifndef _WIN32 + +#define current_base event_global_current_base_ +extern struct event_base *current_base; + +static void +fork_signal_cb(evutil_socket_t fd, short events, void *arg) +{ + event_del(arg); +} + +int child_pair[2] = { -1, -1 }; +static void +simple_child_read_cb(evutil_socket_t fd, short event, void *arg) +{ + char buf[256]; + int len; + + len = read(fd, buf, sizeof(buf)); + if (write(child_pair[0], "", 1) < 0) + tt_fail_perror("write"); + + if (len) { + if (!called) { + if (event_add(arg, NULL) == -1) + exit(1); + } + } else if (called == 1) + test_ok = 1; + + called++; +} + +#define TEST_FORK_EXIT_SUCCESS 76 +static void fork_wait_check(int pid) +{ + int status; + + TT_BLATHER(("Before waitpid")); + +#ifdef WNOWAIT + if ((waitpid(pid, &status, WNOWAIT) == -1 && errno == EINVAL) && +#else + if ( +#endif + waitpid(pid, &status, 0) == -1) { + perror("waitpid"); + exit(1); + } + TT_BLATHER(("After waitpid")); + + if (WEXITSTATUS(status) != TEST_FORK_EXIT_SUCCESS) { + fprintf(stdout, "FAILED (exit): %d\n", WEXITSTATUS(status)); + exit(1); + } +} +static void +test_fork(void) +{ + char c; + struct event ev, sig_ev, usr_ev, existing_ev; + pid_t pid; + + setup_test("After fork: "); + + { + if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, child_pair) == -1) { + fprintf(stderr, "%s: socketpair\n", __func__); + exit(1); + } + + if (evutil_make_socket_nonblocking(child_pair[0]) == -1) { + fprintf(stderr, "fcntl(O_NONBLOCK)"); + exit(1); + } + } + + tt_assert(current_base); + evthread_make_base_notifiable(current_base); + + if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { + tt_fail_perror("write"); + } + + event_set(&ev, pair[1], EV_READ, simple_child_read_cb, &ev); + if (event_add(&ev, NULL) == -1) + exit(1); + + evsignal_set(&sig_ev, SIGCHLD, fork_signal_cb, &sig_ev); + evsignal_add(&sig_ev, NULL); + + evsignal_set(&existing_ev, SIGUSR2, fork_signal_cb, &existing_ev); + evsignal_add(&existing_ev, NULL); + + event_base_assert_ok_(current_base); + TT_BLATHER(("Before fork")); + if ((pid = regress_fork()) == 0) { + /* in the child */ + TT_BLATHER(("In child, before reinit")); + event_base_assert_ok_(current_base); + if (event_reinit(current_base) == -1) { + fprintf(stdout, "FAILED (reinit)\n"); + exit(1); + } + TT_BLATHER(("After reinit")); + event_base_assert_ok_(current_base); + TT_BLATHER(("After assert-ok")); + + evsignal_del(&sig_ev); + + evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev); + evsignal_add(&usr_ev, NULL); + kill(getpid(), SIGUSR1); + kill(getpid(), SIGUSR2); + + called = 0; + + event_dispatch(); + + event_base_free(current_base); + + /* we do not send an EOF; simple_read_cb requires an EOF + * to set test_ok. we just verify that the callback was + * called. */ + exit(test_ok != 0 || called != 2 ? -2 : TEST_FORK_EXIT_SUCCESS); + } + + /** wait until client read first message */ + if (read(child_pair[1], &c, 1) < 0) { + tt_fail_perror("read"); + } + if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { + tt_fail_perror("write"); + } + + fork_wait_check(pid); + + /* test that the current event loop still works */ + if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { + fprintf(stderr, "%s: write\n", __func__); + } + + shutdown(pair[0], EVUTIL_SHUT_WR); + + evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev); + evsignal_add(&usr_ev, NULL); + kill(getpid(), SIGUSR1); + kill(getpid(), SIGUSR2); + + event_dispatch(); + + evsignal_del(&sig_ev); + tt_int_op(test_ok, ==, 1); + + end: + cleanup_test(); + if (child_pair[0] != -1) + evutil_closesocket(child_pair[0]); + if (child_pair[1] != -1) + evutil_closesocket(child_pair[1]); +} + +#ifdef EVENT__HAVE_PTHREADS +static void* del_wait_thread(void *arg) +{ + struct timeval tv_start, tv_end; + + evutil_gettimeofday(&tv_start, NULL); + event_dispatch(); + evutil_gettimeofday(&tv_end, NULL); + + test_timeval_diff_eq(&tv_start, &tv_end, 300); + + end: + return NULL; +} + +static void +del_wait_cb(evutil_socket_t fd, short event, void *arg) +{ + struct timeval delay = { 0, 300*1000 }; + TT_BLATHER(("Sleeping: %i", test_ok)); + evutil_usleep_(&delay); + ++test_ok; +} + +static void +test_del_wait(void) +{ + struct event ev; + pthread_t thread; + + setup_test("event_del will wait: "); + + event_set(&ev, pair[1], EV_READ|EV_PERSIST, del_wait_cb, &ev); + event_add(&ev, NULL); + + pthread_create(&thread, NULL, del_wait_thread, NULL); + + if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { + tt_fail_perror("write"); + } + + { + struct timeval delay = { 0, 30*1000 }; + evutil_usleep_(&delay); + } + + { + struct timeval tv_start, tv_end; + evutil_gettimeofday(&tv_start, NULL); + event_del(&ev); + evutil_gettimeofday(&tv_end, NULL); + test_timeval_diff_eq(&tv_start, &tv_end, 270); + } + + pthread_join(thread, NULL); + + tt_int_op(test_ok, ==, 1); + + end: + ; +} + +static void null_cb(evutil_socket_t fd, short what, void *arg) {} +static void* test_del_notify_thread(void *arg) +{ + event_dispatch(); + return NULL; +} +static void +test_del_notify(void) +{ + struct event ev; + pthread_t thread; + + test_ok = 1; + + event_set(&ev, -1, EV_READ, null_cb, &ev); + event_add(&ev, NULL); + + pthread_create(&thread, NULL, test_del_notify_thread, NULL); + + { + struct timeval delay = { 0, 1000 }; + evutil_usleep_(&delay); + } + + event_del(&ev); + pthread_join(thread, NULL); +} +#endif + +static void +signal_cb_sa(int sig) +{ + test_ok = 2; +} + +static void +signal_cb(evutil_socket_t fd, short event, void *arg) +{ + struct event *ev = arg; + + evsignal_del(ev); + test_ok = 1; +} + +static void +test_simplesignal_impl(int find_reorder) +{ + struct event ev; + struct itimerval itv; + + evsignal_set(&ev, SIGALRM, signal_cb, &ev); + evsignal_add(&ev, NULL); + /* find bugs in which operations are re-ordered */ + if (find_reorder) { + evsignal_del(&ev); + evsignal_add(&ev, NULL); + } + + memset(&itv, 0, sizeof(itv)); + itv.it_value.tv_sec = 0; + itv.it_value.tv_usec = 100000; + if (setitimer(ITIMER_REAL, &itv, NULL) == -1) + goto skip_simplesignal; + + event_dispatch(); + skip_simplesignal: + if (evsignal_del(&ev) == -1) + test_ok = 0; + + cleanup_test(); +} + +static void +test_simplestsignal(void) +{ + setup_test("Simplest one signal: "); + test_simplesignal_impl(0); +} + +static void +test_simplesignal(void) +{ + setup_test("Simple signal: "); + test_simplesignal_impl(1); +} + +static void +test_multiplesignal(void) +{ + struct event ev_one, ev_two; + struct itimerval itv; + + setup_test("Multiple signal: "); + + evsignal_set(&ev_one, SIGALRM, signal_cb, &ev_one); + evsignal_add(&ev_one, NULL); + + evsignal_set(&ev_two, SIGALRM, signal_cb, &ev_two); + evsignal_add(&ev_two, NULL); + + memset(&itv, 0, sizeof(itv)); + itv.it_value.tv_sec = 0; + itv.it_value.tv_usec = 100000; + if (setitimer(ITIMER_REAL, &itv, NULL) == -1) + goto skip_simplesignal; + + event_dispatch(); + + skip_simplesignal: + if (evsignal_del(&ev_one) == -1) + test_ok = 0; + if (evsignal_del(&ev_two) == -1) + test_ok = 0; + + cleanup_test(); +} + +static void +test_immediatesignal(void) +{ + struct event ev; + + test_ok = 0; + evsignal_set(&ev, SIGUSR1, signal_cb, &ev); + evsignal_add(&ev, NULL); + kill(getpid(), SIGUSR1); + event_loop(EVLOOP_NONBLOCK); + evsignal_del(&ev); + cleanup_test(); +} + +static void +test_signal_dealloc(void) +{ + /* make sure that evsignal_event is event_del'ed and pipe closed */ + struct event ev; + struct event_base *base = event_init(); + evsignal_set(&ev, SIGUSR1, signal_cb, &ev); + evsignal_add(&ev, NULL); + evsignal_del(&ev); + event_base_free(base); + /* If we got here without asserting, we're fine. */ + test_ok = 1; + cleanup_test(); +} + +static void +test_signal_pipeloss(void) +{ + /* make sure that the base1 pipe is closed correctly. */ + struct event_base *base1, *base2; + int pipe1; + test_ok = 0; + base1 = event_init(); + pipe1 = base1->sig.ev_signal_pair[0]; + base2 = event_init(); + event_base_free(base2); + event_base_free(base1); + if (close(pipe1) != -1 || errno!=EBADF) { + /* fd must be closed, so second close gives -1, EBADF */ + printf("signal pipe not closed. "); + test_ok = 0; + } else { + test_ok = 1; + } + cleanup_test(); +} + +/* + * make two bases to catch signals, use both of them. this only works + * for event mechanisms that use our signal pipe trick. kqueue handles + * signals internally, and all interested kqueues get all the signals. + */ +static void +test_signal_switchbase(void) +{ + struct event ev1, ev2; + struct event_base *base1, *base2; + int is_kqueue; + test_ok = 0; + base1 = event_init(); + base2 = event_init(); + is_kqueue = !strcmp(event_get_method(),"kqueue"); + evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1); + evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2); + if (event_base_set(base1, &ev1) || + event_base_set(base2, &ev2) || + event_add(&ev1, NULL) || + event_add(&ev2, NULL)) { + fprintf(stderr, "%s: cannot set base, add\n", __func__); + exit(1); + } + + tt_ptr_op(event_get_base(&ev1), ==, base1); + tt_ptr_op(event_get_base(&ev2), ==, base2); + + test_ok = 0; + /* can handle signal before loop is called */ + kill(getpid(), SIGUSR1); + event_base_loop(base2, EVLOOP_NONBLOCK); + if (is_kqueue) { + if (!test_ok) + goto end; + test_ok = 0; + } + event_base_loop(base1, EVLOOP_NONBLOCK); + if (test_ok && !is_kqueue) { + test_ok = 0; + + /* set base1 to handle signals */ + event_base_loop(base1, EVLOOP_NONBLOCK); + kill(getpid(), SIGUSR1); + event_base_loop(base1, EVLOOP_NONBLOCK); + event_base_loop(base2, EVLOOP_NONBLOCK); + } +end: + event_base_free(base1); + event_base_free(base2); + cleanup_test(); +} + +/* + * assert that a signal event removed from the event queue really is + * removed - with no possibility of it's parent handler being fired. + */ +static void +test_signal_assert(void) +{ + struct event ev; + struct event_base *base = event_init(); + test_ok = 0; + /* use SIGCONT so we don't kill ourselves when we signal to nowhere */ + evsignal_set(&ev, SIGCONT, signal_cb, &ev); + evsignal_add(&ev, NULL); + /* + * if evsignal_del() fails to reset the handler, it's current handler + * will still point to evsig_handler(). + */ + evsignal_del(&ev); + + kill(getpid(), SIGCONT); +#if 0 + /* only way to verify we were in evsig_handler() */ + /* XXXX Now there's no longer a good way. */ + if (base->sig.evsig_caught) + test_ok = 0; + else + test_ok = 1; +#else + test_ok = 1; +#endif + + event_base_free(base); + cleanup_test(); + return; +} + +/* + * assert that we restore our previous signal handler properly. + */ +static void +test_signal_restore(void) +{ + struct event ev; + struct event_base *base = event_init(); +#ifdef EVENT__HAVE_SIGACTION + struct sigaction sa; +#endif + + test_ok = 0; +#ifdef EVENT__HAVE_SIGACTION + sa.sa_handler = signal_cb_sa; + sa.sa_flags = 0x0; + sigemptyset(&sa.sa_mask); + if (sigaction(SIGUSR1, &sa, NULL) == -1) + goto out; +#else + if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR) + goto out; +#endif + evsignal_set(&ev, SIGUSR1, signal_cb, &ev); + evsignal_add(&ev, NULL); + evsignal_del(&ev); + + kill(getpid(), SIGUSR1); + /* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */ + if (test_ok != 2) + test_ok = 0; +out: + event_base_free(base); + cleanup_test(); + return; +} + +static void +signal_cb_swp(int sig, short event, void *arg) +{ + called++; + if (called < 5) + kill(getpid(), sig); + else + event_loopexit(NULL); +} +static void +timeout_cb_swp(evutil_socket_t fd, short event, void *arg) +{ + if (called == -1) { + struct timeval tv = {5, 0}; + + called = 0; + evtimer_add((struct event *)arg, &tv); + kill(getpid(), SIGUSR1); + return; + } + test_ok = 0; + event_loopexit(NULL); +} + +static void +test_signal_while_processing(void) +{ + struct event_base *base = event_init(); + struct event ev, ev_timer; + struct timeval tv = {0, 0}; + + setup_test("Receiving a signal while processing other signal: "); + + called = -1; + test_ok = 1; + signal_set(&ev, SIGUSR1, signal_cb_swp, NULL); + signal_add(&ev, NULL); + evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer); + evtimer_add(&ev_timer, &tv); + event_dispatch(); + + event_base_free(base); + cleanup_test(); + return; +} +#endif + +static void +test_free_active_base(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event_base *base1; + struct event ev1; + + base1 = event_init(); + tt_assert(base1); + event_assign(&ev1, base1, data->pair[1], EV_READ, dummy_read_cb, NULL); + event_add(&ev1, NULL); + event_base_free(base1); /* should not crash */ + + base1 = event_init(); + tt_assert(base1); + event_assign(&ev1, base1, data->pair[0], 0, dummy_read_cb, NULL); + event_active(&ev1, EV_READ, 1); + event_base_free(base1); +end: + ; +} + +static void +test_manipulate_active_events(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event_base *base = data->base; + struct event ev1; + + event_assign(&ev1, base, -1, EV_TIMEOUT, dummy_read_cb, NULL); + + /* Make sure an active event is pending. */ + event_active(&ev1, EV_READ, 1); + tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL), + ==, EV_READ); + + /* Make sure that activating an event twice works. */ + event_active(&ev1, EV_WRITE, 1); + tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL), + ==, EV_READ|EV_WRITE); + +end: + event_del(&ev1); +} + +static void +event_selfarg_cb(evutil_socket_t fd, short event, void *arg) +{ + struct event *ev = arg; + struct event_base *base = event_get_base(ev); + event_base_assert_ok_(base); + event_base_loopexit(base, NULL); + tt_want(ev == event_base_get_running_event(base)); +} + +static void +test_event_new_selfarg(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event_base *base = data->base; + struct event *ev = event_new(base, -1, EV_READ, event_selfarg_cb, + event_self_cbarg()); + + event_active(ev, EV_READ, 1); + event_base_dispatch(base); + + event_free(ev); +} + +static void +test_event_assign_selfarg(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event_base *base = data->base; + struct event ev; + + event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, + event_self_cbarg()); + event_active(&ev, EV_READ, 1); + event_base_dispatch(base); +} + +static void +test_event_base_get_num_events(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event_base *base = data->base; + struct event ev; + int event_count_active; + int event_count_virtual; + int event_count_added; + int event_count_active_virtual; + int event_count_active_added; + int event_count_virtual_added; + int event_count_active_added_virtual; + + struct timeval qsec = {0, 100000}; + + event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, + event_self_cbarg()); + + event_add(&ev, &qsec); + event_count_active = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE); + event_count_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_VIRTUAL); + event_count_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_ADDED); + event_count_active_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); + event_count_active_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); + event_count_virtual_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); + event_count_active_added_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE| + EVENT_BASE_COUNT_ADDED| + EVENT_BASE_COUNT_VIRTUAL); + tt_int_op(event_count_active, ==, 0); + tt_int_op(event_count_virtual, ==, 0); + /* libevent itself adds a timeout event, so the event_count is 2 here */ + tt_int_op(event_count_added, ==, 2); + tt_int_op(event_count_active_virtual, ==, 0); + tt_int_op(event_count_active_added, ==, 2); + tt_int_op(event_count_virtual_added, ==, 2); + tt_int_op(event_count_active_added_virtual, ==, 2); + + event_active(&ev, EV_READ, 1); + event_count_active = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE); + event_count_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_VIRTUAL); + event_count_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_ADDED); + event_count_active_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); + event_count_active_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); + event_count_virtual_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); + event_count_active_added_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE| + EVENT_BASE_COUNT_ADDED| + EVENT_BASE_COUNT_VIRTUAL); + tt_int_op(event_count_active, ==, 1); + tt_int_op(event_count_virtual, ==, 0); + tt_int_op(event_count_added, ==, 3); + tt_int_op(event_count_active_virtual, ==, 1); + tt_int_op(event_count_active_added, ==, 4); + tt_int_op(event_count_virtual_added, ==, 3); + tt_int_op(event_count_active_added_virtual, ==, 4); + + event_base_loop(base, 0); + event_count_active = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE); + event_count_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_VIRTUAL); + event_count_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_ADDED); + event_count_active_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); + event_count_active_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); + event_count_virtual_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); + event_count_active_added_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE| + EVENT_BASE_COUNT_ADDED| + EVENT_BASE_COUNT_VIRTUAL); + tt_int_op(event_count_active, ==, 0); + tt_int_op(event_count_virtual, ==, 0); + tt_int_op(event_count_added, ==, 0); + tt_int_op(event_count_active_virtual, ==, 0); + tt_int_op(event_count_active_added, ==, 0); + tt_int_op(event_count_virtual_added, ==, 0); + tt_int_op(event_count_active_added_virtual, ==, 0); + + event_base_add_virtual_(base); + event_count_active = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE); + event_count_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_VIRTUAL); + event_count_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_ADDED); + event_count_active_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); + event_count_active_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); + event_count_virtual_added = event_base_get_num_events(base, + EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); + event_count_active_added_virtual = event_base_get_num_events(base, + EVENT_BASE_COUNT_ACTIVE| + EVENT_BASE_COUNT_ADDED| + EVENT_BASE_COUNT_VIRTUAL); + tt_int_op(event_count_active, ==, 0); + tt_int_op(event_count_virtual, ==, 1); + tt_int_op(event_count_added, ==, 0); + tt_int_op(event_count_active_virtual, ==, 1); + tt_int_op(event_count_active_added, ==, 0); + tt_int_op(event_count_virtual_added, ==, 1); + tt_int_op(event_count_active_added_virtual, ==, 1); + +end: + ; +} + +static void +test_event_base_get_max_events(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event_base *base = data->base; + struct event ev; + struct event ev2; + int event_count_active; + int event_count_virtual; + int event_count_added; + int event_count_active_virtual; + int event_count_active_added; + int event_count_virtual_added; + int event_count_active_added_virtual; + + struct timeval qsec = {0, 100000}; + + event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, + event_self_cbarg()); + event_assign(&ev2, base, -1, EV_READ, event_selfarg_cb, + event_self_cbarg()); + + event_add(&ev, &qsec); + event_add(&ev2, &qsec); + event_del(&ev2); + + event_count_active = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE, 0); + event_count_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_VIRTUAL, 0); + event_count_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_ADDED, 0); + event_count_active_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); + event_count_active_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); + event_count_virtual_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); + event_count_active_added_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | + EVENT_BASE_COUNT_ADDED | + EVENT_BASE_COUNT_VIRTUAL, 0); + + tt_int_op(event_count_active, ==, 0); + tt_int_op(event_count_virtual, ==, 0); + /* libevent itself adds a timeout event, so the event_count is 4 here */ + tt_int_op(event_count_added, ==, 4); + tt_int_op(event_count_active_virtual, ==, 0); + tt_int_op(event_count_active_added, ==, 4); + tt_int_op(event_count_virtual_added, ==, 4); + tt_int_op(event_count_active_added_virtual, ==, 4); + + event_active(&ev, EV_READ, 1); + event_count_active = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE, 0); + event_count_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_VIRTUAL, 0); + event_count_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_ADDED, 0); + event_count_active_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); + event_count_active_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); + event_count_virtual_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); + event_count_active_added_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | + EVENT_BASE_COUNT_ADDED | + EVENT_BASE_COUNT_VIRTUAL, 0); + + tt_int_op(event_count_active, ==, 1); + tt_int_op(event_count_virtual, ==, 0); + tt_int_op(event_count_added, ==, 4); + tt_int_op(event_count_active_virtual, ==, 1); + tt_int_op(event_count_active_added, ==, 5); + tt_int_op(event_count_virtual_added, ==, 4); + tt_int_op(event_count_active_added_virtual, ==, 5); + + event_base_loop(base, 0); + event_count_active = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE, 1); + event_count_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_VIRTUAL, 1); + event_count_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_ADDED, 1); + event_count_active_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); + event_count_active_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); + event_count_virtual_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); + event_count_active_added_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | + EVENT_BASE_COUNT_ADDED | + EVENT_BASE_COUNT_VIRTUAL, 1); + + tt_int_op(event_count_active, ==, 1); + tt_int_op(event_count_virtual, ==, 0); + tt_int_op(event_count_added, ==, 4); + tt_int_op(event_count_active_virtual, ==, 0); + tt_int_op(event_count_active_added, ==, 0); + tt_int_op(event_count_virtual_added, ==, 0); + tt_int_op(event_count_active_added_virtual, ==, 0); + + event_count_active = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE, 0); + event_count_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_VIRTUAL, 0); + event_count_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_ADDED, 0); + tt_int_op(event_count_active, ==, 0); + tt_int_op(event_count_virtual, ==, 0); + tt_int_op(event_count_added, ==, 0); + + event_base_add_virtual_(base); + event_count_active = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE, 0); + event_count_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_VIRTUAL, 0); + event_count_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_ADDED, 0); + event_count_active_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); + event_count_active_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); + event_count_virtual_added = event_base_get_max_events(base, + EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); + event_count_active_added_virtual = event_base_get_max_events(base, + EVENT_BASE_COUNT_ACTIVE | + EVENT_BASE_COUNT_ADDED | + EVENT_BASE_COUNT_VIRTUAL, 0); + + tt_int_op(event_count_active, ==, 0); + tt_int_op(event_count_virtual, ==, 1); + tt_int_op(event_count_added, ==, 0); + tt_int_op(event_count_active_virtual, ==, 1); + tt_int_op(event_count_active_added, ==, 0); + tt_int_op(event_count_virtual_added, ==, 1); + tt_int_op(event_count_active_added_virtual, ==, 1); + +end: + ; +} + +static void +test_bad_assign(void *ptr) +{ + struct event ev; + int r; + /* READ|SIGNAL is not allowed */ + r = event_assign(&ev, NULL, -1, EV_SIGNAL|EV_READ, dummy_read_cb, NULL); + tt_int_op(r,==,-1); + +end: + ; +} + +static int reentrant_cb_run = 0; + +static void +bad_reentrant_run_loop_cb(evutil_socket_t fd, short what, void *ptr) +{ + struct event_base *base = ptr; + int r; + reentrant_cb_run = 1; + /* This reentrant call to event_base_loop should be detected and + * should fail */ + r = event_base_loop(base, 0); + tt_int_op(r, ==, -1); +end: + ; +} + +static void +test_bad_reentrant(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event_base *base = data->base; + struct event ev; + int r; + event_assign(&ev, base, -1, + 0, bad_reentrant_run_loop_cb, base); + + event_active(&ev, EV_WRITE, 1); + r = event_base_loop(base, 0); + tt_int_op(r, ==, 1); + tt_int_op(reentrant_cb_run, ==, 1); +end: + ; +} + +static int n_write_a_byte_cb=0; +static int n_read_and_drain_cb=0; +static int n_activate_other_event_cb=0; +static void +write_a_byte_cb(evutil_socket_t fd, short what, void *arg) +{ + char buf[] = "x"; + if (write(fd, buf, 1) == 1) + ++n_write_a_byte_cb; +} +static void +read_and_drain_cb(evutil_socket_t fd, short what, void *arg) +{ + char buf[128]; + int n; + ++n_read_and_drain_cb; + while ((n = read(fd, buf, sizeof(buf))) > 0) + ; +} + +static void +activate_other_event_cb(evutil_socket_t fd, short what, void *other_) +{ + struct event *ev_activate = other_; + ++n_activate_other_event_cb; + event_active_later_(ev_activate, EV_READ); +} + +static void +test_active_later(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event *ev1 = NULL, *ev2 = NULL; + struct event ev3, ev4; + struct timeval qsec = {0, 100000}; + ev1 = event_new(data->base, data->pair[0], EV_READ|EV_PERSIST, read_and_drain_cb, NULL); + ev2 = event_new(data->base, data->pair[1], EV_WRITE|EV_PERSIST, write_a_byte_cb, NULL); + event_assign(&ev3, data->base, -1, 0, activate_other_event_cb, &ev4); + event_assign(&ev4, data->base, -1, 0, activate_other_event_cb, &ev3); + event_add(ev1, NULL); + event_add(ev2, NULL); + event_active_later_(&ev3, EV_READ); + + event_base_loopexit(data->base, &qsec); + + event_base_loop(data->base, 0); + + TT_BLATHER(("%d write calls, %d read calls, %d activate-other calls.", + n_write_a_byte_cb, n_read_and_drain_cb, n_activate_other_event_cb)); + event_del(&ev3); + event_del(&ev4); + + tt_int_op(n_write_a_byte_cb, ==, n_activate_other_event_cb); + tt_int_op(n_write_a_byte_cb, >, 100); + tt_int_op(n_read_and_drain_cb, >, 100); + tt_int_op(n_activate_other_event_cb, >, 100); + + event_active_later_(&ev4, EV_READ); + event_active(&ev4, EV_READ, 1); /* This should make the event + active immediately. */ + tt_assert((ev4.ev_flags & EVLIST_ACTIVE) != 0); + tt_assert((ev4.ev_flags & EVLIST_ACTIVE_LATER) == 0); + + /* Now leave this one around, so that event_free sees it and removes + * it. */ + event_active_later_(&ev3, EV_READ); + event_base_assert_ok_(data->base); + +end: + if (ev1) + event_free(ev1); + if (ev2) + event_free(ev2); + + event_base_free(data->base); + data->base = NULL; +} + + +static void incr_arg_cb(evutil_socket_t fd, short what, void *arg) +{ + int *intptr = arg; + (void) fd; (void) what; + ++*intptr; +} +static void remove_timers_cb(evutil_socket_t fd, short what, void *arg) +{ + struct event **ep = arg; + (void) fd; (void) what; + event_remove_timer(ep[0]); + event_remove_timer(ep[1]); +} +static void send_a_byte_cb(evutil_socket_t fd, short what, void *arg) +{ + evutil_socket_t *sockp = arg; + (void) fd; (void) what; + if (write(*sockp, "A", 1) < 0) + tt_fail_perror("write"); +} +struct read_not_timeout_param +{ + struct event **ev; + int events; + int count; +}; +static void read_not_timeout_cb(evutil_socket_t fd, short what, void *arg) +{ + struct read_not_timeout_param *rntp = arg; + char c; + ev_ssize_t n; + (void) fd; (void) what; + n = read(fd, &c, 1); + tt_int_op(n, ==, 1); + rntp->events |= what; + ++rntp->count; + if(2 == rntp->count) event_del(rntp->ev[0]); +end: + ; +} + +static void +test_event_remove_timeout(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event_base *base = data->base; + struct event *ev[5]; + int ev1_fired=0; + struct timeval ms25 = { 0, 25*1000 }, + ms40 = { 0, 40*1000 }, + ms75 = { 0, 75*1000 }, + ms125 = { 0, 125*1000 }; + struct read_not_timeout_param rntp = { ev, 0, 0 }; + + event_base_assert_ok_(base); + + ev[0] = event_new(base, data->pair[0], EV_READ|EV_PERSIST, + read_not_timeout_cb, &rntp); + ev[1] = evtimer_new(base, incr_arg_cb, &ev1_fired); + ev[2] = evtimer_new(base, remove_timers_cb, ev); + ev[3] = evtimer_new(base, send_a_byte_cb, &data->pair[1]); + ev[4] = evtimer_new(base, send_a_byte_cb, &data->pair[1]); + tt_assert(base); + event_add(ev[2], &ms25); /* remove timers */ + event_add(ev[4], &ms40); /* write to test if timer re-activates */ + event_add(ev[0], &ms75); /* read */ + event_add(ev[1], &ms75); /* timer */ + event_add(ev[3], &ms125); /* timeout. */ + event_base_assert_ok_(base); + + event_base_dispatch(base); + + tt_int_op(ev1_fired, ==, 0); + tt_int_op(rntp.events, ==, EV_READ); + + event_base_assert_ok_(base); +end: + event_free(ev[0]); + event_free(ev[1]); + event_free(ev[2]); + event_free(ev[3]); + event_free(ev[4]); +} + +static void +test_event_base_new(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event_base *base = 0; + struct event ev1; + struct basic_cb_args args; + + int towrite = (int)strlen(TEST1)+1; + int len = write(data->pair[0], TEST1, towrite); + + if (len < 0) + tt_abort_perror("initial write"); + else if (len != towrite) + tt_abort_printf(("initial write fell short (%d of %d bytes)", + len, towrite)); + + if (shutdown(data->pair[0], EVUTIL_SHUT_WR)) + tt_abort_perror("initial write shutdown"); + + base = event_base_new(); + if (!base) + tt_abort_msg("failed to create event base"); + + args.eb = base; + args.ev = &ev1; + args.callcount = 0; + event_assign(&ev1, base, data->pair[1], + EV_READ|EV_PERSIST, basic_read_cb, &args); + + if (event_add(&ev1, NULL)) + tt_abort_perror("initial event_add"); + + if (event_base_loop(base, 0)) + tt_abort_msg("unsuccessful exit from event loop"); + +end: + if (base) + event_base_free(base); +} + +static void +test_loopexit(void) +{ + struct timeval tv, tv_start, tv_end; + struct event ev; + + setup_test("Loop exit: "); + + tv.tv_usec = 0; + tv.tv_sec = 60*60*24; + evtimer_set(&ev, timeout_cb, NULL); + evtimer_add(&ev, &tv); + + tv.tv_usec = 300*1000; + tv.tv_sec = 0; + event_loopexit(&tv); + + evutil_gettimeofday(&tv_start, NULL); + event_dispatch(); + evutil_gettimeofday(&tv_end, NULL); + + evtimer_del(&ev); + + tt_assert(event_base_got_exit(global_base)); + tt_assert(!event_base_got_break(global_base)); + + test_timeval_diff_eq(&tv_start, &tv_end, 300); + + test_ok = 1; +end: + cleanup_test(); +} + +static void +test_loopexit_multiple(void) +{ + struct timeval tv, tv_start, tv_end; + struct event_base *base; + + setup_test("Loop Multiple exit: "); + + base = event_base_new(); + + tv.tv_usec = 200*1000; + tv.tv_sec = 0; + event_base_loopexit(base, &tv); + + tv.tv_usec = 0; + tv.tv_sec = 3; + event_base_loopexit(base, &tv); + + evutil_gettimeofday(&tv_start, NULL); + event_base_dispatch(base); + evutil_gettimeofday(&tv_end, NULL); + + tt_assert(event_base_got_exit(base)); + tt_assert(!event_base_got_break(base)); + + event_base_free(base); + + test_timeval_diff_eq(&tv_start, &tv_end, 200); + + test_ok = 1; + +end: + cleanup_test(); +} + +static void +break_cb(evutil_socket_t fd, short events, void *arg) +{ + test_ok = 1; + event_loopbreak(); +} + +static void +fail_cb(evutil_socket_t fd, short events, void *arg) +{ + test_ok = 0; +} + +static void +test_loopbreak(void) +{ + struct event ev1, ev2; + struct timeval tv; + + setup_test("Loop break: "); + + tv.tv_sec = 0; + tv.tv_usec = 0; + evtimer_set(&ev1, break_cb, NULL); + evtimer_add(&ev1, &tv); + evtimer_set(&ev2, fail_cb, NULL); + evtimer_add(&ev2, &tv); + + event_dispatch(); + + tt_assert(!event_base_got_exit(global_base)); + tt_assert(event_base_got_break(global_base)); + + evtimer_del(&ev1); + evtimer_del(&ev2); + +end: + cleanup_test(); +} + +static struct event *readd_test_event_last_added = NULL; +static void +re_add_read_cb(evutil_socket_t fd, short event, void *arg) +{ + char buf[256]; + struct event *ev_other = arg; + ev_ssize_t n_read; + + readd_test_event_last_added = ev_other; + + n_read = read(fd, buf, sizeof(buf)); + + if (n_read < 0) { + tt_fail_perror("read"); + event_base_loopbreak(event_get_base(ev_other)); + } else { + event_add(ev_other, NULL); + ++test_ok; + } +} +static void +test_nonpersist_readd(void *_data) +{ + struct event ev1, ev2; + struct basic_test_data *data = _data; + + memset(&ev1, 0, sizeof(ev1)); + memset(&ev2, 0, sizeof(ev2)); + + tt_assert(!event_assign(&ev1, data->base, data->pair[0], EV_READ, re_add_read_cb, &ev2)); + tt_assert(!event_assign(&ev2, data->base, data->pair[1], EV_READ, re_add_read_cb, &ev1)); + + tt_int_op(write(data->pair[0], "Hello", 5), ==, 5); + tt_int_op(write(data->pair[1], "Hello", 5), ==, 5); + + tt_int_op(event_add(&ev1, NULL), ==, 0); + tt_int_op(event_add(&ev2, NULL), ==, 0); + tt_int_op(event_base_loop(data->base, EVLOOP_ONCE), ==, 0); + tt_int_op(test_ok, ==, 2); + + /* At this point, we executed both callbacks. Whichever one got + * called first added the second, but the second then immediately got + * deleted before its callback was called. At this point, though, it + * re-added the first. + */ + tt_assert(readd_test_event_last_added); + if (readd_test_event_last_added == &ev1) { + tt_assert(event_pending(&ev1, EV_READ, NULL) && !event_pending(&ev2, EV_READ, NULL)); + } else { + tt_assert(event_pending(&ev2, EV_READ, NULL) && !event_pending(&ev1, EV_READ, NULL)); + } + +end: + if (event_initialized(&ev1)) + event_del(&ev1); + if (event_initialized(&ev2)) + event_del(&ev2); +} + +struct test_pri_event { + struct event ev; + int count; +}; + +static void +test_priorities_cb(evutil_socket_t fd, short what, void *arg) +{ + struct test_pri_event *pri = arg; + struct timeval tv; + + if (pri->count == 3) { + event_loopexit(NULL); + return; + } + + pri->count++; + + evutil_timerclear(&tv); + event_add(&pri->ev, &tv); +} + +static void +test_priorities_impl(int npriorities) +{ + struct test_pri_event one, two; + struct timeval tv; + + TT_BLATHER(("Testing Priorities %d: ", npriorities)); + + event_base_priority_init(global_base, npriorities); + + memset(&one, 0, sizeof(one)); + memset(&two, 0, sizeof(two)); + + timeout_set(&one.ev, test_priorities_cb, &one); + if (event_priority_set(&one.ev, 0) == -1) { + fprintf(stderr, "%s: failed to set priority", __func__); + exit(1); + } + + timeout_set(&two.ev, test_priorities_cb, &two); + if (event_priority_set(&two.ev, npriorities - 1) == -1) { + fprintf(stderr, "%s: failed to set priority", __func__); + exit(1); + } + + evutil_timerclear(&tv); + + if (event_add(&one.ev, &tv) == -1) + exit(1); + if (event_add(&two.ev, &tv) == -1) + exit(1); + + event_dispatch(); + + event_del(&one.ev); + event_del(&two.ev); + + if (npriorities == 1) { + if (one.count == 3 && two.count == 3) + test_ok = 1; + } else if (npriorities == 2) { + /* Two is called once because event_loopexit is priority 1 */ + if (one.count == 3 && two.count == 1) + test_ok = 1; + } else { + if (one.count == 3 && two.count == 0) + test_ok = 1; + } +} + +static void +test_priorities(void) +{ + test_priorities_impl(1); + if (test_ok) + test_priorities_impl(2); + if (test_ok) + test_priorities_impl(3); +} + +/* priority-active-inversion: activate a higher-priority event, and make sure + * it keeps us from running a lower-priority event first. */ +static int n_pai_calls = 0; +static struct event pai_events[3]; + +static void +prio_active_inversion_cb(evutil_socket_t fd, short what, void *arg) +{ + int *call_order = arg; + *call_order = n_pai_calls++; + if (n_pai_calls == 1) { + /* This should activate later, even though it shares a + priority with us. */ + event_active(&pai_events[1], EV_READ, 1); + /* This should activate next, since its priority is higher, + even though we activated it second. */ + event_active(&pai_events[2], EV_TIMEOUT, 1); + } +} + +static void +test_priority_active_inversion(void *data_) +{ + struct basic_test_data *data = data_; + struct event_base *base = data->base; + int call_order[3]; + int i; + tt_int_op(event_base_priority_init(base, 8), ==, 0); + + n_pai_calls = 0; + memset(call_order, 0, sizeof(call_order)); + + for (i=0;i<3;++i) { + event_assign(&pai_events[i], data->base, -1, 0, + prio_active_inversion_cb, &call_order[i]); + } + + event_priority_set(&pai_events[0], 4); + event_priority_set(&pai_events[1], 4); + event_priority_set(&pai_events[2], 0); + + event_active(&pai_events[0], EV_WRITE, 1); + + event_base_dispatch(base); + tt_int_op(n_pai_calls, ==, 3); + tt_int_op(call_order[0], ==, 0); + tt_int_op(call_order[1], ==, 2); + tt_int_op(call_order[2], ==, 1); +end: + ; +} + + +static void +test_multiple_cb(evutil_socket_t fd, short event, void *arg) +{ + if (event & EV_READ) + test_ok |= 1; + else if (event & EV_WRITE) + test_ok |= 2; +} + +static void +test_multiple_events_for_same_fd(void) +{ + struct event e1, e2; + + setup_test("Multiple events for same fd: "); + + event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL); + event_add(&e1, NULL); + event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL); + event_add(&e2, NULL); + event_loop(EVLOOP_ONCE); + event_del(&e2); + + if (write(pair[1], TEST1, strlen(TEST1)+1) < 0) { + tt_fail_perror("write"); + } + + event_loop(EVLOOP_ONCE); + event_del(&e1); + + if (test_ok != 3) + test_ok = 0; + + cleanup_test(); +} + +int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf); +int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf); +int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t number); +int evtag_decode_tag(ev_uint32_t *pnumber, struct evbuffer *evbuf); + +static void +read_once_cb(evutil_socket_t fd, short event, void *arg) +{ + char buf[256]; + int len; + + len = read(fd, buf, sizeof(buf)); + + if (called) { + test_ok = 0; + } else if (len) { + /* Assumes global pair[0] can be used for writing */ + if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { + tt_fail_perror("write"); + test_ok = 0; + } else { + test_ok = 1; + } + } + + called++; +} + +static void +test_want_only_once(void) +{ + struct event ev; + struct timeval tv; + + /* Very simple read test */ + setup_test("Want read only once: "); + + if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { + tt_fail_perror("write"); + } + + /* Setup the loop termination */ + evutil_timerclear(&tv); + tv.tv_usec = 300*1000; + event_loopexit(&tv); + + event_set(&ev, pair[1], EV_READ, read_once_cb, &ev); + if (event_add(&ev, NULL) == -1) + exit(1); + event_dispatch(); + + cleanup_test(); +} + +#define TEST_MAX_INT 6 + +static void +evtag_int_test(void *ptr) +{ + struct evbuffer *tmp = evbuffer_new(); + ev_uint32_t integers[TEST_MAX_INT] = { + 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000 + }; + ev_uint32_t integer; + ev_uint64_t big_int; + int i; + + evtag_init(); + + for (i = 0; i < TEST_MAX_INT; i++) { + int oldlen, newlen; + oldlen = (int)EVBUFFER_LENGTH(tmp); + evtag_encode_int(tmp, integers[i]); + newlen = (int)EVBUFFER_LENGTH(tmp); + TT_BLATHER(("encoded 0x%08x with %d bytes", + (unsigned)integers[i], newlen - oldlen)); + big_int = integers[i]; + big_int *= 1000000000; /* 1 billion */ + evtag_encode_int64(tmp, big_int); + } + + for (i = 0; i < TEST_MAX_INT; i++) { + tt_int_op(evtag_decode_int(&integer, tmp), !=, -1); + tt_uint_op(integer, ==, integers[i]); + tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1); + tt_assert((big_int / 1000000000) == integers[i]); + } + + tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0); +end: + evbuffer_free(tmp); +} + +static void +evtag_fuzz(void *ptr) +{ + unsigned char buffer[4096]; + struct evbuffer *tmp = evbuffer_new(); + struct timeval tv; + int i, j; + + int not_failed = 0; + + evtag_init(); + + for (j = 0; j < 100; j++) { + for (i = 0; i < (int)sizeof(buffer); i++) + buffer[i] = test_weakrand(); + evbuffer_drain(tmp, -1); + evbuffer_add(tmp, buffer, sizeof(buffer)); + + if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) + not_failed++; + } + + /* The majority of decodes should fail */ + tt_int_op(not_failed, <, 10); + + /* Now insert some corruption into the tag length field */ + evbuffer_drain(tmp, -1); + evutil_timerclear(&tv); + tv.tv_sec = 1; + evtag_marshal_timeval(tmp, 0, &tv); + evbuffer_add(tmp, buffer, sizeof(buffer)); + + ((char *)EVBUFFER_DATA(tmp))[1] = '\xff'; + if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) { + tt_abort_msg("evtag_unmarshal_timeval should have failed"); + } + +end: + evbuffer_free(tmp); +} + +static void +evtag_tag_encoding(void *ptr) +{ + struct evbuffer *tmp = evbuffer_new(); + ev_uint32_t integers[TEST_MAX_INT] = { + 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000 + }; + ev_uint32_t integer; + int i; + + evtag_init(); + + for (i = 0; i < TEST_MAX_INT; i++) { + int oldlen, newlen; + oldlen = (int)EVBUFFER_LENGTH(tmp); + evtag_encode_tag(tmp, integers[i]); + newlen = (int)EVBUFFER_LENGTH(tmp); + TT_BLATHER(("encoded 0x%08x with %d bytes", + (unsigned)integers[i], newlen - oldlen)); + } + + for (i = 0; i < TEST_MAX_INT; i++) { + tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1); + tt_uint_op(integer, ==, integers[i]); + } + + tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0); + +end: + evbuffer_free(tmp); +} + +static void +evtag_test_peek(void *ptr) +{ + struct evbuffer *tmp = evbuffer_new(); + ev_uint32_t u32; + + evtag_marshal_int(tmp, 30, 0); + evtag_marshal_string(tmp, 40, "Hello world"); + + tt_int_op(evtag_peek(tmp, &u32), ==, 1); + tt_int_op(u32, ==, 30); + tt_int_op(evtag_peek_length(tmp, &u32), ==, 0); + tt_int_op(u32, ==, 1+1+1); + tt_int_op(evtag_consume(tmp), ==, 0); + + tt_int_op(evtag_peek(tmp, &u32), ==, 1); + tt_int_op(u32, ==, 40); + tt_int_op(evtag_peek_length(tmp, &u32), ==, 0); + tt_int_op(u32, ==, 1+1+11); + tt_int_op(evtag_payload_length(tmp, &u32), ==, 0); + tt_int_op(u32, ==, 11); + +end: + evbuffer_free(tmp); +} + + +static void +test_methods(void *ptr) +{ + const char **methods = event_get_supported_methods(); + struct event_config *cfg = NULL; + struct event_base *base = NULL; + const char *backend; + int n_methods = 0; + + tt_assert(methods); + + backend = methods[0]; + while (*methods != NULL) { + TT_BLATHER(("Support method: %s", *methods)); + ++methods; + ++n_methods; + } + + cfg = event_config_new(); + assert(cfg != NULL); + + tt_int_op(event_config_avoid_method(cfg, backend), ==, 0); + event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV); + + base = event_base_new_with_config(cfg); + if (n_methods > 1) { + tt_assert(base); + tt_str_op(backend, !=, event_base_get_method(base)); + } else { + tt_assert(base == NULL); + } + +end: + if (base) + event_base_free(base); + if (cfg) + event_config_free(cfg); +} + +static void +test_version(void *arg) +{ + const char *vstr; + ev_uint32_t vint; + int major, minor, patch, n; + + vstr = event_get_version(); + vint = event_get_version_number(); + + tt_assert(vstr); + tt_assert(vint); + + tt_str_op(vstr, ==, LIBEVENT_VERSION); + tt_int_op(vint, ==, LIBEVENT_VERSION_NUMBER); + + n = sscanf(vstr, "%d.%d.%d", &major, &minor, &patch); + tt_assert(3 == n); + tt_int_op((vint&0xffffff00), ==, ((major<<24)|(minor<<16)|(patch<<8))); +end: + ; +} + +static void +test_base_features(void *arg) +{ + struct event_base *base = NULL; + struct event_config *cfg = NULL; + + cfg = event_config_new(); + + tt_assert(0 == event_config_require_features(cfg, EV_FEATURE_ET)); + + base = event_base_new_with_config(cfg); + if (base) { + tt_int_op(EV_FEATURE_ET, ==, + event_base_get_features(base) & EV_FEATURE_ET); + } else { + base = event_base_new(); + tt_int_op(0, ==, event_base_get_features(base) & EV_FEATURE_ET); + } + +end: + if (base) + event_base_free(base); + if (cfg) + event_config_free(cfg); +} + +#ifdef EVENT__HAVE_SETENV +#define SETENV_OK +#elif !defined(EVENT__HAVE_SETENV) && defined(EVENT__HAVE_PUTENV) +static void setenv(const char *k, const char *v, int o_) +{ + char b[256]; + evutil_snprintf(b, sizeof(b), "%s=%s",k,v); + putenv(b); +} +#define SETENV_OK +#endif + +#ifdef EVENT__HAVE_UNSETENV +#define UNSETENV_OK +#elif !defined(EVENT__HAVE_UNSETENV) && defined(EVENT__HAVE_PUTENV) +static void unsetenv(const char *k) +{ + char b[256]; + evutil_snprintf(b, sizeof(b), "%s=",k); + putenv(b); +} +#define UNSETENV_OK +#endif + +#if defined(SETENV_OK) && defined(UNSETENV_OK) +static void +methodname_to_envvar(const char *mname, char *buf, size_t buflen) +{ + char *cp; + evutil_snprintf(buf, buflen, "EVENT_NO%s", mname); + for (cp = buf; *cp; ++cp) { + *cp = EVUTIL_TOUPPER_(*cp); + } +} +#endif + +static void +test_base_environ(void *arg) +{ + struct event_base *base = NULL; + struct event_config *cfg = NULL; + +#if defined(SETENV_OK) && defined(UNSETENV_OK) + const char **basenames; + int i, n_methods=0; + char varbuf[128]; + const char *defaultname, *ignoreenvname; + + /* See if unsetenv works before we rely on it. */ + setenv("EVENT_NOWAFFLES", "1", 1); + unsetenv("EVENT_NOWAFFLES"); + if (getenv("EVENT_NOWAFFLES") != NULL) { +#ifndef EVENT__HAVE_UNSETENV + TT_DECLARE("NOTE", ("Can't fake unsetenv; skipping test")); +#else + TT_DECLARE("NOTE", ("unsetenv doesn't work; skipping test")); +#endif + tt_skip(); + } + + basenames = event_get_supported_methods(); + for (i = 0; basenames[i]; ++i) { + methodname_to_envvar(basenames[i], varbuf, sizeof(varbuf)); + unsetenv(varbuf); + ++n_methods; + } + + base = event_base_new(); + tt_assert(base); + + defaultname = event_base_get_method(base); + TT_BLATHER(("default is <%s>", defaultname)); + event_base_free(base); + base = NULL; + + /* Can we disable the method with EVENT_NOfoo ? */ + if (!strcmp(defaultname, "epoll (with changelist)")) { + setenv("EVENT_NOEPOLL", "1", 1); + ignoreenvname = "epoll"; + } else { + methodname_to_envvar(defaultname, varbuf, sizeof(varbuf)); + setenv(varbuf, "1", 1); + ignoreenvname = defaultname; + } + + /* Use an empty cfg rather than NULL so a failure doesn't exit() */ + cfg = event_config_new(); + base = event_base_new_with_config(cfg); + event_config_free(cfg); + cfg = NULL; + if (n_methods == 1) { + tt_assert(!base); + } else { + tt_assert(base); + tt_str_op(defaultname, !=, event_base_get_method(base)); + event_base_free(base); + base = NULL; + } + + /* Can we disable looking at the environment with IGNORE_ENV ? */ + cfg = event_config_new(); + event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV); + base = event_base_new_with_config(cfg); + tt_assert(base); + tt_str_op(ignoreenvname, ==, event_base_get_method(base)); +#else + tt_skip(); +#endif + +end: + if (base) + event_base_free(base); + if (cfg) + event_config_free(cfg); +} + +static void +read_called_once_cb(evutil_socket_t fd, short event, void *arg) +{ + tt_int_op(event, ==, EV_READ); + called += 1; +end: + ; +} + +static void +timeout_called_once_cb(evutil_socket_t fd, short event, void *arg) +{ + tt_int_op(event, ==, EV_TIMEOUT); + called += 100; +end: + ; +} + +static void +immediate_called_twice_cb(evutil_socket_t fd, short event, void *arg) +{ + tt_int_op(event, ==, EV_TIMEOUT); + called += 1000; +end: + ; +} + +static void +test_event_once(void *ptr) +{ + struct basic_test_data *data = ptr; + struct timeval tv; + int r; + + tv.tv_sec = 0; + tv.tv_usec = 50*1000; + called = 0; + r = event_base_once(data->base, data->pair[0], EV_READ, + read_called_once_cb, NULL, NULL); + tt_int_op(r, ==, 0); + r = event_base_once(data->base, -1, EV_TIMEOUT, + timeout_called_once_cb, NULL, &tv); + tt_int_op(r, ==, 0); + r = event_base_once(data->base, -1, 0, NULL, NULL, NULL); + tt_int_op(r, <, 0); + r = event_base_once(data->base, -1, EV_TIMEOUT, + immediate_called_twice_cb, NULL, NULL); + tt_int_op(r, ==, 0); + tv.tv_sec = 0; + tv.tv_usec = 0; + r = event_base_once(data->base, -1, EV_TIMEOUT, + immediate_called_twice_cb, NULL, &tv); + tt_int_op(r, ==, 0); + + if (write(data->pair[1], TEST1, strlen(TEST1)+1) < 0) { + tt_fail_perror("write"); + } + + shutdown(data->pair[1], EVUTIL_SHUT_WR); + + event_base_dispatch(data->base); + + tt_int_op(called, ==, 2101); +end: + ; +} + +static void +test_event_once_never(void *ptr) +{ + struct basic_test_data *data = ptr; + struct timeval tv; + + /* Have one trigger in 10 seconds (don't worry, because) */ + tv.tv_sec = 10; + tv.tv_usec = 0; + called = 0; + event_base_once(data->base, -1, EV_TIMEOUT, + timeout_called_once_cb, NULL, &tv); + + /* But shut down the base in 75 msec. */ + tv.tv_sec = 0; + tv.tv_usec = 75*1000; + event_base_loopexit(data->base, &tv); + + event_base_dispatch(data->base); + + tt_int_op(called, ==, 0); +end: + ; +} + +static void +test_event_pending(void *ptr) +{ + struct basic_test_data *data = ptr; + struct event *r=NULL, *w=NULL, *t=NULL; + struct timeval tv, now, tv2; + + tv.tv_sec = 0; + tv.tv_usec = 500 * 1000; + r = event_new(data->base, data->pair[0], EV_READ, simple_read_cb, + NULL); + w = event_new(data->base, data->pair[1], EV_WRITE, simple_write_cb, + NULL); + t = evtimer_new(data->base, timeout_cb, NULL); + + tt_assert(r); + tt_assert(w); + tt_assert(t); + + evutil_gettimeofday(&now, NULL); + event_add(r, NULL); + event_add(t, &tv); + + tt_assert( event_pending(r, EV_READ, NULL)); + tt_assert(!event_pending(w, EV_WRITE, NULL)); + tt_assert(!event_pending(r, EV_WRITE, NULL)); + tt_assert( event_pending(r, EV_READ|EV_WRITE, NULL)); + tt_assert(!event_pending(r, EV_TIMEOUT, NULL)); + tt_assert( event_pending(t, EV_TIMEOUT, NULL)); + tt_assert( event_pending(t, EV_TIMEOUT, &tv2)); + + tt_assert(evutil_timercmp(&tv2, &now, >)); + + test_timeval_diff_eq(&now, &tv2, 500); + +end: + if (r) { + event_del(r); + event_free(r); + } + if (w) { + event_del(w); + event_free(w); + } + if (t) { + event_del(t); + event_free(t); + } +} + +static void +dfd_cb(evutil_socket_t fd, short e, void *data) +{ + *(int*)data = (int)e; +} + +static void +test_event_closed_fd_poll(void *arg) +{ + struct timeval tv; + struct event *e; + struct basic_test_data *data = (struct basic_test_data *)arg; + int i = 0; + + if (strcmp(event_base_get_method(data->base), "poll")) { + tinytest_set_test_skipped_(); + return; + } + + e = event_new(data->base, data->pair[0], EV_READ, dfd_cb, &i); + tt_assert(e); + + tv.tv_sec = 0; + tv.tv_usec = 500 * 1000; + event_add(e, &tv); + tt_assert(event_pending(e, EV_READ, NULL)); + close(data->pair[0]); + data->pair[0] = -1; /** avoids double-close */ + event_base_loop(data->base, EVLOOP_ONCE); + tt_int_op(i, ==, EV_READ); + +end: + if (e) { + event_del(e); + event_free(e); + } +} + +#ifndef _WIN32 +/* You can't do this test on windows, since dup2 doesn't work on sockets */ + +/* Regression test for our workaround for a fun epoll/linux related bug + * where fd2 = dup(fd1); add(fd2); close(fd2); dup2(fd1,fd2); add(fd2) + * will get you an EEXIST */ +static void +test_dup_fd(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct event *ev1=NULL, *ev2=NULL; + int fd, dfd=-1; + int ev1_got, ev2_got; + + tt_int_op(write(data->pair[0], "Hello world", + strlen("Hello world")), >, 0); + fd = data->pair[1]; + + dfd = dup(fd); + tt_int_op(dfd, >=, 0); + + ev1 = event_new(base, fd, EV_READ|EV_PERSIST, dfd_cb, &ev1_got); + ev2 = event_new(base, dfd, EV_READ|EV_PERSIST, dfd_cb, &ev2_got); + ev1_got = ev2_got = 0; + event_add(ev1, NULL); + event_add(ev2, NULL); + event_base_loop(base, EVLOOP_ONCE); + tt_int_op(ev1_got, ==, EV_READ); + tt_int_op(ev2_got, ==, EV_READ); + + /* Now close and delete dfd then dispatch. We need to do the + * dispatch here so that when we add it later, we think there + * was an intermediate delete. */ + close(dfd); + event_del(ev2); + ev1_got = ev2_got = 0; + event_base_loop(base, EVLOOP_ONCE); + tt_want_int_op(ev1_got, ==, EV_READ); + tt_int_op(ev2_got, ==, 0); + + /* Re-duplicate the fd. We need to get the same duplicated + * value that we closed to provoke the epoll quirk. Also, we + * need to change the events to write, or else the old lingering + * read event will make the test pass whether the change was + * successful or not. */ + tt_int_op(dup2(fd, dfd), ==, dfd); + event_free(ev2); + ev2 = event_new(base, dfd, EV_WRITE|EV_PERSIST, dfd_cb, &ev2_got); + event_add(ev2, NULL); + ev1_got = ev2_got = 0; + event_base_loop(base, EVLOOP_ONCE); + tt_want_int_op(ev1_got, ==, EV_READ); + tt_int_op(ev2_got, ==, EV_WRITE); + +end: + if (ev1) + event_free(ev1); + if (ev2) + event_free(ev2); + if (dfd >= 0) + close(dfd); +} +#endif + +#ifdef EVENT__DISABLE_MM_REPLACEMENT +static void +test_mm_functions(void *arg) +{ + tinytest_set_test_skipped_(); +} +#else +static int +check_dummy_mem_ok(void *mem_) +{ + char *mem = mem_; + mem -= 16; + return !memcmp(mem, "{[]}", 16); +} + +static void * +dummy_malloc(size_t len) +{ + char *mem = malloc(len+16); + memcpy(mem, "{[]}", 16); + return mem+16; +} + +static void * +dummy_realloc(void *mem_, size_t len) +{ + char *mem = mem_; + if (!mem) + return dummy_malloc(len); + tt_want(check_dummy_mem_ok(mem_)); + mem -= 16; + mem = realloc(mem, len+16); + return mem+16; +} + +static void +dummy_free(void *mem_) +{ + char *mem = mem_; + tt_want(check_dummy_mem_ok(mem_)); + mem -= 16; + free(mem); +} + +static void +test_mm_functions(void *arg) +{ + struct event_base *b = NULL; + struct event_config *cfg = NULL; + event_set_mem_functions(dummy_malloc, dummy_realloc, dummy_free); + cfg = event_config_new(); + event_config_avoid_method(cfg, "Nonesuch"); + b = event_base_new_with_config(cfg); + tt_assert(b); + tt_assert(check_dummy_mem_ok(b)); +end: + if (cfg) + event_config_free(cfg); + if (b) + event_base_free(b); +} +#endif + +static void +many_event_cb(evutil_socket_t fd, short event, void *arg) +{ + int *calledp = arg; + *calledp += 1; +} + +static void +test_many_events(void *arg) +{ + /* Try 70 events that should all be ready at once. This will + * exercise the "resize" code on most of the backends, and will make + * sure that we can get past the 64-handle limit of some windows + * functions. */ +#define MANY 70 + + struct basic_test_data *data = arg; + struct event_base *base = data->base; + int one_at_a_time = data->setup_data != NULL; + evutil_socket_t sock[MANY]; + struct event *ev[MANY]; + int called[MANY]; + int i; + int loopflags = EVLOOP_NONBLOCK, evflags=0; + if (one_at_a_time) { + loopflags |= EVLOOP_ONCE; + evflags = EV_PERSIST; + } + + memset(sock, 0xff, sizeof(sock)); + memset(ev, 0, sizeof(ev)); + memset(called, 0, sizeof(called)); + + for (i = 0; i < MANY; ++i) { + /* We need an event that will hit the backend, and that will + * be ready immediately. "Send a datagram" is an easy + * instance of that. */ + sock[i] = socket(AF_INET, SOCK_DGRAM, 0); + tt_assert(sock[i] >= 0); + tt_assert(!evutil_make_socket_nonblocking(sock[i])); + called[i] = 0; + ev[i] = event_new(base, sock[i], EV_WRITE|evflags, + many_event_cb, &called[i]); + event_add(ev[i], NULL); + if (one_at_a_time) + event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE); + } + + event_base_loop(base, loopflags); + + for (i = 0; i < MANY; ++i) { + if (one_at_a_time) + tt_int_op(called[i], ==, MANY - i + 1); + else + tt_int_op(called[i], ==, 1); + } + +end: + for (i = 0; i < MANY; ++i) { + if (ev[i]) + event_free(ev[i]); + if (sock[i] >= 0) + evutil_closesocket(sock[i]); + } +#undef MANY +} + +static void +test_struct_event_size(void *arg) +{ + tt_int_op(event_get_struct_event_size(), <=, sizeof(struct event)); +end: + ; +} + +static void +test_get_assignment(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct event *ev1 = NULL; + const char *str = "foo"; + + struct event_base *b; + evutil_socket_t s; + short what; + event_callback_fn cb; + void *cb_arg; + + ev1 = event_new(base, data->pair[1], EV_READ, dummy_read_cb, (void*)str); + event_get_assignment(ev1, &b, &s, &what, &cb, &cb_arg); + + tt_ptr_op(b, ==, base); + tt_fd_op(s, ==, data->pair[1]); + tt_int_op(what, ==, EV_READ); + tt_ptr_op(cb, ==, dummy_read_cb); + tt_ptr_op(cb_arg, ==, str); + + /* Now make sure this doesn't crash. */ + event_get_assignment(ev1, NULL, NULL, NULL, NULL, NULL); + +end: + if (ev1) + event_free(ev1); +} + +struct foreach_helper { + int count; + const struct event *ev; +}; + +static int +foreach_count_cb(const struct event_base *base, const struct event *ev, void *arg) +{ + struct foreach_helper *h = event_get_callback_arg(ev); + struct timeval *tv = arg; + if (event_get_callback(ev) != timeout_cb) + return 0; + tt_ptr_op(event_get_base(ev), ==, base); + tt_int_op(tv->tv_sec, ==, 10); + h->ev = ev; + h->count++; + return 0; +end: + return -1; +} + +static int +foreach_find_cb(const struct event_base *base, const struct event *ev, void *arg) +{ + const struct event **ev_out = arg; + struct foreach_helper *h = event_get_callback_arg(ev); + if (event_get_callback(ev) != timeout_cb) + return 0; + if (h->count == 99) { + *ev_out = ev; + return 101; + } + return 0; +} + +static void +test_event_foreach(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct event *ev[5]; + struct foreach_helper visited[5]; + int i; + struct timeval ten_sec = {10,0}; + const struct event *ev_found = NULL; + + for (i = 0; i < 5; ++i) { + visited[i].count = 0; + visited[i].ev = NULL; + ev[i] = event_new(base, -1, 0, timeout_cb, &visited[i]); + } + + tt_int_op(-1, ==, event_base_foreach_event(NULL, foreach_count_cb, NULL)); + tt_int_op(-1, ==, event_base_foreach_event(base, NULL, NULL)); + + event_add(ev[0], &ten_sec); + event_add(ev[1], &ten_sec); + event_active(ev[1], EV_TIMEOUT, 1); + event_active(ev[2], EV_TIMEOUT, 1); + event_add(ev[3], &ten_sec); + /* Don't touch ev[4]. */ + + tt_int_op(0, ==, event_base_foreach_event(base, foreach_count_cb, + &ten_sec)); + tt_int_op(1, ==, visited[0].count); + tt_int_op(1, ==, visited[1].count); + tt_int_op(1, ==, visited[2].count); + tt_int_op(1, ==, visited[3].count); + tt_ptr_op(ev[0], ==, visited[0].ev); + tt_ptr_op(ev[1], ==, visited[1].ev); + tt_ptr_op(ev[2], ==, visited[2].ev); + tt_ptr_op(ev[3], ==, visited[3].ev); + + visited[2].count = 99; + tt_int_op(101, ==, event_base_foreach_event(base, foreach_find_cb, + &ev_found)); + tt_ptr_op(ev_found, ==, ev[2]); + +end: + for (i=0; i<5; ++i) { + event_free(ev[i]); + } +} + +static struct event_base *cached_time_base = NULL; +static int cached_time_reset = 0; +static int cached_time_sleep = 0; +static void +cache_time_cb(evutil_socket_t fd, short what, void *arg) +{ + struct timeval *tv = arg; + tt_int_op(0, ==, event_base_gettimeofday_cached(cached_time_base, tv)); + if (cached_time_sleep) { + struct timeval delay = { 0, 30*1000 }; + evutil_usleep_(&delay); + } + if (cached_time_reset) { + event_base_update_cache_time(cached_time_base); + } +end: + ; +} + +static void +test_gettimeofday_cached(void *arg) +{ + struct basic_test_data *data = arg; + struct event_config *cfg = NULL; + struct event_base *base = NULL; + struct timeval tv1, tv2, tv3, now; + struct event *ev1=NULL, *ev2=NULL, *ev3=NULL; + int cached_time_disable = strstr(data->setup_data, "disable") != NULL; + + cfg = event_config_new(); + if (cached_time_disable) { + event_config_set_flag(cfg, EVENT_BASE_FLAG_NO_CACHE_TIME); + } + cached_time_base = base = event_base_new_with_config(cfg); + tt_assert(base); + + /* Try gettimeofday_cached outside of an event loop. */ + evutil_gettimeofday(&now, NULL); + tt_int_op(0, ==, event_base_gettimeofday_cached(NULL, &tv1)); + tt_int_op(0, ==, event_base_gettimeofday_cached(base, &tv2)); + tt_int_op(timeval_msec_diff(&tv1, &tv2), <, 10); + tt_int_op(timeval_msec_diff(&tv1, &now), <, 10); + + cached_time_reset = strstr(data->setup_data, "reset") != NULL; + cached_time_sleep = strstr(data->setup_data, "sleep") != NULL; + + ev1 = event_new(base, -1, 0, cache_time_cb, &tv1); + ev2 = event_new(base, -1, 0, cache_time_cb, &tv2); + ev3 = event_new(base, -1, 0, cache_time_cb, &tv3); + + event_active(ev1, EV_TIMEOUT, 1); + event_active(ev2, EV_TIMEOUT, 1); + event_active(ev3, EV_TIMEOUT, 1); + + event_base_dispatch(base); + + if (cached_time_reset && cached_time_sleep) { + tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10); + tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10); + } else if (cached_time_disable && cached_time_sleep) { + tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10); + tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10); + } else if (! cached_time_disable) { + tt_assert(evutil_timercmp(&tv1, &tv2, ==)); + tt_assert(evutil_timercmp(&tv2, &tv3, ==)); + } + +end: + if (ev1) + event_free(ev1); + if (ev2) + event_free(ev2); + if (ev3) + event_free(ev3); + if (base) + event_base_free(base); + if (cfg) + event_config_free(cfg); +} + +static void +tabf_cb(evutil_socket_t fd, short what, void *arg) +{ + int *ptr = arg; + *ptr = what; + *ptr += 0x10000; +} + +static void +test_evmap_invalid_slots(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct event *ev1 = NULL, *ev2 = NULL; + int e1, e2; +#ifndef _WIN32 + struct event *ev3 = NULL, *ev4 = NULL; + int e3, e4; +#endif + + ev1 = evsignal_new(base, -1, dummy_read_cb, (void *)base); + ev2 = evsignal_new(base, NSIG, dummy_read_cb, (void *)base); + tt_assert(ev1); + tt_assert(ev2); + e1 = event_add(ev1, NULL); + e2 = event_add(ev2, NULL); + tt_int_op(e1, !=, 0); + tt_int_op(e2, !=, 0); +#ifndef _WIN32 + ev3 = event_new(base, INT_MAX, EV_READ, dummy_read_cb, (void *)base); + ev4 = event_new(base, INT_MAX / 2, EV_READ, dummy_read_cb, (void *)base); + tt_assert(ev3); + tt_assert(ev4); + e3 = event_add(ev3, NULL); + e4 = event_add(ev4, NULL); + tt_int_op(e3, !=, 0); + tt_int_op(e4, !=, 0); +#endif + +end: + event_free(ev1); + event_free(ev2); +#ifndef _WIN32 + event_free(ev3); + event_free(ev4); +#endif +} + +static void +test_active_by_fd(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct event *ev1 = NULL, *ev2 = NULL, *ev3 = NULL, *ev4 = NULL; + int e1,e2,e3,e4; +#ifndef _WIN32 + struct event *evsig = NULL; + int es; +#endif + struct timeval tenmin = { 600, 0 }; + + /* Ensure no crash on nonexistent FD. */ + event_base_active_by_fd(base, 1000, EV_READ); + + /* Ensure no crash on bogus FD. */ + event_base_active_by_fd(base, -1, EV_READ); + + /* Ensure no crash on nonexistent/bogus signal. */ + event_base_active_by_signal(base, 1000); + event_base_active_by_signal(base, -1); + + event_base_assert_ok_(base); + + e1 = e2 = e3 = e4 = 0; + ev1 = event_new(base, data->pair[0], EV_READ, tabf_cb, &e1); + ev2 = event_new(base, data->pair[0], EV_WRITE, tabf_cb, &e2); + ev3 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e3); + ev4 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e4); + tt_assert(ev1); + tt_assert(ev2); + tt_assert(ev3); + tt_assert(ev4); +#ifndef _WIN32 + evsig = event_new(base, SIGHUP, EV_SIGNAL, tabf_cb, &es); + tt_assert(evsig); + event_add(evsig, &tenmin); +#endif + + event_add(ev1, &tenmin); + event_add(ev2, NULL); + event_add(ev3, NULL); + event_add(ev4, &tenmin); + + + event_base_assert_ok_(base); + + /* Trigger 2, 3, 4 */ + event_base_active_by_fd(base, data->pair[0], EV_WRITE); + event_base_active_by_fd(base, data->pair[1], EV_READ); + event_base_active_by_fd(base, data->pair[1], EV_TIMEOUT); +#ifndef _WIN32 + event_base_active_by_signal(base, SIGHUP); +#endif + + event_base_assert_ok_(base); + + event_base_loop(base, EVLOOP_ONCE); + + tt_int_op(e1, ==, 0); + tt_int_op(e2, ==, EV_WRITE | 0x10000); + tt_int_op(e3, ==, EV_READ | 0x10000); + /* Mask out EV_WRITE here, since it could be genuinely writeable. */ + tt_int_op((e4 & ~EV_WRITE), ==, EV_READ | EV_TIMEOUT | 0x10000); +#ifndef _WIN32 + tt_int_op(es, ==, EV_SIGNAL | 0x10000); +#endif + +end: + if (ev1) + event_free(ev1); + if (ev2) + event_free(ev2); + if (ev3) + event_free(ev3); + if (ev4) + event_free(ev4); +#ifndef _WIN32 + if (evsig) + event_free(evsig); +#endif +} + +struct testcase_t main_testcases[] = { + /* Some converted-over tests */ + { "methods", test_methods, TT_FORK, NULL, NULL }, + { "version", test_version, 0, NULL, NULL }, + BASIC(base_features, TT_FORK|TT_NO_LOGS), + { "base_environ", test_base_environ, TT_FORK, NULL, NULL }, + + BASIC(event_base_new, TT_FORK|TT_NEED_SOCKETPAIR), + BASIC(free_active_base, TT_FORK|TT_NEED_SOCKETPAIR), + + BASIC(manipulate_active_events, TT_FORK|TT_NEED_BASE), + BASIC(event_new_selfarg, TT_FORK|TT_NEED_BASE), + BASIC(event_assign_selfarg, TT_FORK|TT_NEED_BASE), + BASIC(event_base_get_num_events, TT_FORK|TT_NEED_BASE), + BASIC(event_base_get_max_events, TT_FORK|TT_NEED_BASE), + BASIC(evmap_invalid_slots, TT_FORK|TT_NEED_BASE), + + BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS), + BASIC(bad_reentrant, TT_FORK|TT_NEED_BASE|TT_NO_LOGS), + BASIC(active_later, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR|TT_RETRIABLE), + BASIC(event_remove_timeout, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), + + /* These are still using the old API */ + LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE), + { "persistent_timeout_jump", test_persistent_timeout_jump, TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "persistent_active_timeout", test_persistent_active_timeout, + TT_FORK|TT_NEED_BASE|TT_RETRIABLE, &basic_setup, NULL }, + LEGACY(priorities, TT_FORK|TT_NEED_BASE), + BASIC(priority_active_inversion, TT_FORK|TT_NEED_BASE), + { "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE, + &basic_setup, NULL }, + + /* These legacy tests may not all need all of these flags. */ + LEGACY(simpleread, TT_ISOLATED), + LEGACY(simpleread_multiple, TT_ISOLATED), + LEGACY(simplewrite, TT_ISOLATED), + { "simpleclose", test_simpleclose, TT_FORK, &basic_setup, + NULL }, + LEGACY(multiple, TT_ISOLATED), + LEGACY(persistent, TT_ISOLATED), + LEGACY(combined, TT_ISOLATED), + LEGACY(simpletimeout, TT_ISOLATED), + LEGACY(loopbreak, TT_ISOLATED), + LEGACY(loopexit, TT_ISOLATED), + LEGACY(loopexit_multiple, TT_ISOLATED), + { "nonpersist_readd", test_nonpersist_readd, TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE, &basic_setup, NULL }, + LEGACY(multiple_events_for_same_fd, TT_ISOLATED), + LEGACY(want_only_once, TT_ISOLATED), + { "event_once", test_event_once, TT_ISOLATED, &basic_setup, NULL }, + { "event_once_never", test_event_once_never, TT_ISOLATED, &basic_setup, NULL }, + { "event_pending", test_event_pending, TT_ISOLATED, &basic_setup, + NULL }, + { "event_closed_fd_poll", test_event_closed_fd_poll, TT_ISOLATED, &basic_setup, + NULL }, + +#ifndef _WIN32 + { "dup_fd", test_dup_fd, TT_ISOLATED, &basic_setup, NULL }, +#endif + { "mm_functions", test_mm_functions, TT_FORK, NULL, NULL }, + { "many_events", test_many_events, TT_ISOLATED, &basic_setup, NULL }, + { "many_events_slow_add", test_many_events, TT_ISOLATED, &basic_setup, (void*)1 }, + + { "struct_event_size", test_struct_event_size, 0, NULL, NULL }, + BASIC(get_assignment, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), + + BASIC(event_foreach, TT_FORK|TT_NEED_BASE), + { "gettimeofday_cached", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"" }, + { "gettimeofday_cached_sleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep" }, + { "gettimeofday_cached_reset", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep reset" }, + { "gettimeofday_cached_disabled", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep disable" }, + { "gettimeofday_cached_disabled_nosleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"disable" }, + + BASIC(active_by_fd, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), + +#ifndef _WIN32 + LEGACY(fork, TT_ISOLATED), +#endif +#ifdef EVENT__HAVE_PTHREADS + /** TODO: support win32 */ + LEGACY(del_wait, TT_ISOLATED|TT_NEED_THREADS|TT_RETRIABLE), + LEGACY(del_notify, TT_ISOLATED|TT_NEED_THREADS), +#endif + + END_OF_TESTCASES +}; + +struct testcase_t evtag_testcases[] = { + { "int", evtag_int_test, TT_FORK, NULL, NULL }, + { "fuzz", evtag_fuzz, TT_FORK, NULL, NULL }, + { "encoding", evtag_tag_encoding, TT_FORK, NULL, NULL }, + { "peek", evtag_test_peek, 0, NULL, NULL }, + + END_OF_TESTCASES +}; + +struct testcase_t signal_testcases[] = { +#ifndef _WIN32 + LEGACY(simplestsignal, TT_ISOLATED), + LEGACY(simplesignal, TT_ISOLATED), + LEGACY(multiplesignal, TT_ISOLATED), + LEGACY(immediatesignal, TT_ISOLATED), + LEGACY(signal_dealloc, TT_ISOLATED), + LEGACY(signal_pipeloss, TT_ISOLATED), + LEGACY(signal_switchbase, TT_ISOLATED|TT_NO_LOGS), + LEGACY(signal_restore, TT_ISOLATED), + LEGACY(signal_assert, TT_ISOLATED), + LEGACY(signal_while_processing, TT_ISOLATED), +#endif + END_OF_TESTCASES +}; + diff --git a/probe-busybox/libevent-2.1.11-stable/test/regress.h b/probe-busybox/libevent-2.1.11-stable/test/regress.h new file mode 100644 index 00000000..643b82ba --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/regress.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2000-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef REGRESS_H_INCLUDED_ +#define REGRESS_H_INCLUDED_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tinytest.h" +#include "tinytest_macros.h" + +extern struct testcase_t main_testcases[]; +extern struct testcase_t evtag_testcases[]; +extern struct testcase_t evbuffer_testcases[]; +extern struct testcase_t finalize_testcases[]; +extern struct testcase_t bufferevent_testcases[]; +extern struct testcase_t bufferevent_iocp_testcases[]; +extern struct testcase_t util_testcases[]; +extern struct testcase_t signal_testcases[]; +extern struct testcase_t http_testcases[]; +extern struct testcase_t http_iocp_testcases[]; +extern struct testcase_t dns_testcases[]; +extern struct testcase_t rpc_testcases[]; +extern struct testcase_t edgetriggered_testcases[]; +extern struct testcase_t minheap_testcases[]; +extern struct testcase_t iocp_testcases[]; +extern struct testcase_t ssl_testcases[]; +extern struct testcase_t listener_testcases[]; +extern struct testcase_t listener_iocp_testcases[]; +extern struct testcase_t thread_testcases[]; + +extern struct evutil_weakrand_state test_weakrand_state; + +#define test_weakrand() (evutil_weakrand_(&test_weakrand_state)) + +void regress_threads(void *); +void test_bufferevent_zlib(void *); + +/* Helpers to wrap old testcases */ +extern evutil_socket_t pair[2]; +extern int test_ok; +extern int called; +extern struct event_base *global_base; +extern int in_legacy_test_wrapper; + +int regress_make_tmpfile(const void *data, size_t datalen, char **filename_out); + +struct basic_test_data { + struct event_base *base; + evutil_socket_t pair[2]; + + void (*legacy_test_fn)(void); + + void *setup_data; +}; +extern const struct testcase_setup_t basic_setup; + + +extern const struct testcase_setup_t legacy_setup; +void run_legacy_test_fn(void *ptr); + +extern int libevent_tests_running_in_debug_mode; + +/* A couple of flags that basic/legacy_setup can support. */ +#define TT_NEED_SOCKETPAIR TT_FIRST_USER_FLAG +#define TT_NEED_BASE (TT_FIRST_USER_FLAG<<1) +#define TT_NEED_DNS (TT_FIRST_USER_FLAG<<2) +#define TT_LEGACY (TT_FIRST_USER_FLAG<<3) +#define TT_NEED_THREADS (TT_FIRST_USER_FLAG<<4) +#define TT_NO_LOGS (TT_FIRST_USER_FLAG<<5) +#define TT_ENABLE_IOCP_FLAG (TT_FIRST_USER_FLAG<<6) +#define TT_ENABLE_IOCP (TT_ENABLE_IOCP_FLAG|TT_NEED_THREADS) + +/* All the flags that a legacy test needs. */ +#define TT_ISOLATED TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE + + +#define BASIC(name,flags) \ + { #name, test_## name, flags, &basic_setup, NULL } + +#define LEGACY(name,flags) \ + { #name, run_legacy_test_fn, flags|TT_LEGACY, &legacy_setup, \ + test_## name } + +struct evutil_addrinfo; +struct evutil_addrinfo *ai_find_by_family(struct evutil_addrinfo *ai, int f); +struct evutil_addrinfo *ai_find_by_protocol(struct evutil_addrinfo *ai, int p); +int test_ai_eq_(const struct evutil_addrinfo *ai, const char *sockaddr_port, + int socktype, int protocol, int line); + +#define test_ai_eq(ai, str, s, p) do { \ + if (test_ai_eq_((ai), (str), (s), (p), __LINE__)<0) \ + goto end; \ + } while (0) + +#define test_timeval_diff_leq(tv1, tv2, diff, tolerance) \ + tt_int_op(labs(timeval_msec_diff((tv1), (tv2)) - diff), <=, tolerance) + +#define test_timeval_diff_eq(tv1, tv2, diff) \ + test_timeval_diff_leq((tv1), (tv2), (diff), 50) + +long timeval_msec_diff(const struct timeval *start, const struct timeval *end); + +#ifndef _WIN32 +pid_t regress_fork(void); +#endif + +#ifdef EVENT__HAVE_OPENSSL +#include +EVP_PKEY *ssl_getkey(void); +X509 *ssl_getcert(EVP_PKEY *key); +SSL_CTX *get_ssl_ctx(void); +void init_ssl(void); +#endif + +void * basic_test_setup(const struct testcase_t *testcase); +int basic_test_cleanup(const struct testcase_t *testcase, void *ptr); + +#ifdef __cplusplus +} +#endif + +#endif /* REGRESS_H_INCLUDED_ */ diff --git a/probe-busybox/libevent-2.1.11-stable/test/regress.rpc b/probe-busybox/libevent-2.1.11-stable/test/regress.rpc new file mode 100644 index 00000000..0ee904e9 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/regress.rpc @@ -0,0 +1,25 @@ +/* tests data packing and unpacking */ + +struct msg { + string /* sender */ from_name = 1; /* be verbose */ + string to_name = 2; + optional struct[kill] attack = 3; + array struct[run] run = 4; +} + +struct kill { + string weapon = 0x10121; + string action = 2; + array int how_often = 3; +} + +struct run { + string how = 1; + optional bytes some_bytes = 2; + + bytes fixed_bytes[24] = 3; + array string notes = 4; + + optional int64 large_number = 5; + array int other_numbers = 6; +} diff --git a/probe-busybox/libevent-2.1.11-stable/test/regress_buffer.c b/probe-busybox/libevent-2.1.11-stable/test/regress_buffer.c new file mode 100644 index 00000000..8ac4b6e0 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/regress_buffer.c @@ -0,0 +1,2790 @@ +/* + * Copyright (c) 2003-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "util-internal.h" + +#ifdef _WIN32 +#include +#include +#endif + +#include "event2/event-config.h" + +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#ifndef _WIN32 +#include +#include +#include +#include +#include +#endif +#include +#include +#include +#include +#include + +#include "event2/event.h" +#include "event2/buffer.h" +#include "event2/buffer_compat.h" +#include "event2/util.h" + +#include "defer-internal.h" +#include "evbuffer-internal.h" +#include "log-internal.h" + +#include "regress.h" + +#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) + +/* Validates that an evbuffer is good. Returns false if it isn't, true if it + * is*/ +static int +evbuffer_validate_(struct evbuffer *buf) +{ + struct evbuffer_chain *chain; + size_t sum = 0; + int found_last_with_datap = 0; + + if (buf->first == NULL) { + tt_assert(buf->last == NULL); + tt_assert(buf->total_len == 0); + } + + chain = buf->first; + + tt_assert(buf->last_with_datap); + if (buf->last_with_datap == &buf->first) + found_last_with_datap = 1; + + while (chain != NULL) { + if (&chain->next == buf->last_with_datap) + found_last_with_datap = 1; + sum += chain->off; + if (chain->next == NULL) { + tt_assert(buf->last == chain); + } + tt_assert(chain->buffer_len >= chain->misalign + chain->off); + chain = chain->next; + } + + if (buf->first) + tt_assert(*buf->last_with_datap); + + if (*buf->last_with_datap) { + chain = *buf->last_with_datap; + if (chain->off == 0 || buf->total_len == 0) { + tt_assert(chain->off == 0) + tt_assert(chain == buf->first); + tt_assert(buf->total_len == 0); + } + chain = chain->next; + while (chain != NULL) { + tt_assert(chain->off == 0); + chain = chain->next; + } + } else { + tt_assert(buf->last_with_datap == &buf->first); + } + tt_assert(found_last_with_datap); + + tt_assert(sum == buf->total_len); + return 1; + end: + return 0; +} + +static void +evbuffer_get_waste(struct evbuffer *buf, size_t *allocatedp, size_t *wastedp, size_t *usedp) +{ + struct evbuffer_chain *chain; + size_t a, w, u; + int n = 0; + u = a = w = 0; + + chain = buf->first; + /* skip empty at start */ + while (chain && chain->off==0) { + ++n; + a += chain->buffer_len; + chain = chain->next; + } + /* first nonempty chain: stuff at the end only is wasted. */ + if (chain) { + ++n; + a += chain->buffer_len; + u += chain->off; + if (chain->next && chain->next->off) + w += (size_t)(chain->buffer_len - (chain->misalign + chain->off)); + chain = chain->next; + } + /* subsequent nonempty chains */ + while (chain && chain->off) { + ++n; + a += chain->buffer_len; + w += (size_t)chain->misalign; + u += chain->off; + if (chain->next && chain->next->off) + w += (size_t) (chain->buffer_len - (chain->misalign + chain->off)); + chain = chain->next; + } + /* subsequent empty chains */ + while (chain) { + ++n; + a += chain->buffer_len; + } + *allocatedp = a; + *wastedp = w; + *usedp = u; +} + +#define evbuffer_validate(buf) \ + TT_STMT_BEGIN if (!evbuffer_validate_(buf)) TT_DIE(("Buffer format invalid")); TT_STMT_END + +static void +test_evbuffer(void *ptr) +{ + static char buffer[512], *tmp; + struct evbuffer *evb = evbuffer_new(); + struct evbuffer *evb_two = evbuffer_new(); + size_t sz_tmp; + int i; + + evbuffer_validate(evb); + evbuffer_add_printf(evb, "%s/%d", "hello", 1); + evbuffer_validate(evb); + + tt_assert(evbuffer_get_length(evb) == 7); + tt_assert(!memcmp((char*)EVBUFFER_DATA(evb), "hello/1", 1)); + + evbuffer_add_buffer(evb, evb_two); + evbuffer_validate(evb); + + evbuffer_drain(evb, strlen("hello/")); + evbuffer_validate(evb); + tt_assert(evbuffer_get_length(evb) == 1); + tt_assert(!memcmp((char*)EVBUFFER_DATA(evb), "1", 1)); + + evbuffer_add_printf(evb_two, "%s", "/hello"); + evbuffer_validate(evb); + evbuffer_add_buffer(evb, evb_two); + evbuffer_validate(evb); + + tt_assert(evbuffer_get_length(evb_two) == 0); + tt_assert(evbuffer_get_length(evb) == 7); + tt_assert(!memcmp((char*)EVBUFFER_DATA(evb), "1/hello", 7)); + + memset(buffer, 0, sizeof(buffer)); + evbuffer_add(evb, buffer, sizeof(buffer)); + evbuffer_validate(evb); + tt_assert(evbuffer_get_length(evb) == 7 + 512); + + tmp = (char *)evbuffer_pullup(evb, 7 + 512); + tt_assert(tmp); + tt_assert(!strncmp(tmp, "1/hello", 7)); + tt_assert(!memcmp(tmp + 7, buffer, sizeof(buffer))); + evbuffer_validate(evb); + + evbuffer_prepend(evb, "something", 9); + evbuffer_validate(evb); + evbuffer_prepend(evb, "else", 4); + evbuffer_validate(evb); + + tmp = (char *)evbuffer_pullup(evb, 4 + 9 + 7); + tt_assert(!strncmp(tmp, "elsesomething1/hello", 4 + 9 + 7)); + evbuffer_validate(evb); + + evbuffer_drain(evb, -1); + evbuffer_validate(evb); + evbuffer_drain(evb_two, -1); + evbuffer_validate(evb); + + for (i = 0; i < 3; ++i) { + evbuffer_add(evb_two, buffer, sizeof(buffer)); + evbuffer_validate(evb_two); + evbuffer_add_buffer(evb, evb_two); + evbuffer_validate(evb); + evbuffer_validate(evb_two); + } + + tt_assert(evbuffer_get_length(evb_two) == 0); + tt_assert(evbuffer_get_length(evb) == i * sizeof(buffer)); + + /* test remove buffer */ + sz_tmp = (size_t)(sizeof(buffer)*2.5); + evbuffer_remove_buffer(evb, evb_two, sz_tmp); + tt_assert(evbuffer_get_length(evb_two) == sz_tmp); + tt_assert(evbuffer_get_length(evb) == sizeof(buffer) / 2); + evbuffer_validate(evb); + + if (memcmp(evbuffer_pullup( + evb, -1), buffer, sizeof(buffer) / 2) != 0 || + memcmp(evbuffer_pullup( + evb_two, -1), buffer, sizeof(buffer)) != 0) + tt_abort_msg("Pullup did not preserve content"); + + evbuffer_validate(evb); + + + /* testing one-vector reserve and commit */ + { + struct evbuffer_iovec v[1]; + char *buf; + int i, j, r; + + for (i = 0; i < 3; ++i) { + r = evbuffer_reserve_space(evb, 10000, v, 1); + tt_int_op(r, ==, 1); + tt_assert(v[0].iov_len >= 10000); + tt_assert(v[0].iov_base != NULL); + + evbuffer_validate(evb); + buf = v[0].iov_base; + for (j = 0; j < 10000; ++j) { + buf[j] = j; + } + evbuffer_validate(evb); + + tt_int_op(evbuffer_commit_space(evb, v, 1), ==, 0); + evbuffer_validate(evb); + + tt_assert(evbuffer_get_length(evb) >= 10000); + + evbuffer_drain(evb, j * 5000); + evbuffer_validate(evb); + } + } + + end: + evbuffer_free(evb); + evbuffer_free(evb_two); +} + +static void +no_cleanup(const void *data, size_t datalen, void *extra) +{ +} + +static void +test_evbuffer_remove_buffer_with_empty(void *ptr) +{ + struct evbuffer *src = evbuffer_new(); + struct evbuffer *dst = evbuffer_new(); + char buf[2] = { 'A', 'A' }; + + evbuffer_validate(src); + evbuffer_validate(dst); + + /* setup the buffers */ + /* we need more data in src than we will move later */ + evbuffer_add_reference(src, buf, sizeof(buf), no_cleanup, NULL); + evbuffer_add_reference(src, buf, sizeof(buf), no_cleanup, NULL); + /* we need one buffer in dst and one empty buffer at the end */ + evbuffer_add(dst, buf, sizeof(buf)); + evbuffer_add_reference(dst, buf, 0, no_cleanup, NULL); + + evbuffer_validate(src); + evbuffer_validate(dst); + + tt_mem_op(evbuffer_pullup(src, -1), ==, "AAAA", 4); + tt_mem_op(evbuffer_pullup(dst, -1), ==, "AA", 2); + + /* move three bytes over */ + evbuffer_remove_buffer(src, dst, 3); + + evbuffer_validate(src); + evbuffer_validate(dst); + + tt_mem_op(evbuffer_pullup(src, -1), ==, "A", 1); + tt_mem_op(evbuffer_pullup(dst, -1), ==, "AAAAA", 5); + + end: + evbuffer_free(src); + evbuffer_free(dst); +} + +static void +test_evbuffer_remove_buffer_with_empty2(void *ptr) +{ + struct evbuffer *src = evbuffer_new(); + struct evbuffer *dst = evbuffer_new(); + struct evbuffer *buf = evbuffer_new(); + + evbuffer_add(buf, "foo", 3); + evbuffer_add_reference(buf, "foo", 3, NULL, NULL); + + evbuffer_add_reference(src, "foo", 3, NULL, NULL); + evbuffer_add_reference(src, NULL, 0, NULL, NULL); + evbuffer_add_buffer(src, buf); + + evbuffer_add(buf, "foo", 3); + evbuffer_add_reference(buf, "foo", 3, NULL, NULL); + + evbuffer_add_reference(dst, "foo", 3, NULL, NULL); + evbuffer_add_reference(dst, NULL, 0, NULL, NULL); + evbuffer_add_buffer(dst, buf); + + tt_int_op(evbuffer_get_length(src), ==, 9); + tt_int_op(evbuffer_get_length(dst), ==, 9); + + evbuffer_validate(src); + evbuffer_validate(dst); + + tt_mem_op(evbuffer_pullup(src, -1), ==, "foofoofoo", 9); + tt_mem_op(evbuffer_pullup(dst, -1), ==, "foofoofoo", 9); + + evbuffer_remove_buffer(src, dst, 8); + + evbuffer_validate(src); + evbuffer_validate(dst); + + tt_int_op(evbuffer_get_length(src), ==, 1); + tt_int_op(evbuffer_get_length(dst), ==, 17); + + tt_mem_op(evbuffer_pullup(src, -1), ==, "o", 1); + tt_mem_op(evbuffer_pullup(dst, -1), ==, "foofoofoofoofoofo", 17); + + end: + evbuffer_free(src); + evbuffer_free(dst); + evbuffer_free(buf); +} + +static void +test_evbuffer_remove_buffer_with_empty3(void *ptr) +{ + struct evbuffer *src = evbuffer_new(); + struct evbuffer *dst = evbuffer_new(); + struct evbuffer *buf = evbuffer_new(); + + evbuffer_add(buf, "foo", 3); + evbuffer_add_reference(buf, NULL, 0, NULL, NULL); + + evbuffer_add_reference(src, "foo", 3, NULL, NULL); + evbuffer_add_reference(src, NULL, 0, NULL, NULL); + evbuffer_prepend_buffer(src, buf); + + evbuffer_add(buf, "foo", 3); + evbuffer_add_reference(buf, NULL, 0, NULL, NULL); + + evbuffer_add_reference(dst, "foo", 3, NULL, NULL); + evbuffer_add_reference(dst, NULL, 0, NULL, NULL); + evbuffer_prepend_buffer(dst, buf); + + tt_int_op(evbuffer_get_length(src), ==, 6); + tt_int_op(evbuffer_get_length(dst), ==, 6); + + evbuffer_validate(src); + evbuffer_validate(dst); + + tt_mem_op(evbuffer_pullup(src, -1), ==, "foofoo", 6); + tt_mem_op(evbuffer_pullup(dst, -1), ==, "foofoo", 6); + + evbuffer_remove_buffer(src, dst, 5); + + evbuffer_validate(src); + evbuffer_validate(dst); + + tt_int_op(evbuffer_get_length(src), ==, 1); + tt_int_op(evbuffer_get_length(dst), ==, 11); + + tt_mem_op(evbuffer_pullup(src, -1), ==, "o", 1); + tt_mem_op(evbuffer_pullup(dst, -1), ==, "foofoofoofo", 11); + + end: + evbuffer_free(src); + evbuffer_free(dst); + evbuffer_free(buf); +} + +static void +test_evbuffer_remove_buffer_with_empty_front(void *ptr) +{ + struct evbuffer *buf1 = NULL, *buf2 = NULL; + + buf1 = evbuffer_new(); + tt_assert(buf1); + + buf2 = evbuffer_new(); + tt_assert(buf2); + + tt_int_op(evbuffer_add_reference(buf1, "foo", 3, NULL, NULL), ==, 0); + tt_int_op(evbuffer_prepend(buf1, "", 0), ==, 0); + tt_int_op(evbuffer_remove_buffer(buf1, buf2, 1), ==, 1); + tt_int_op(evbuffer_add(buf1, "bar", 3), ==, 0); + tt_mem_op(evbuffer_pullup(buf1, -1), ==, "oobar", 5); + + evbuffer_validate(buf1); + evbuffer_validate(buf2); + + end: + if (buf1) + evbuffer_free(buf1); + if (buf2) + evbuffer_free(buf2); +} + +static void +test_evbuffer_remove_buffer_adjust_last_with_datap_with_empty(void *ptr) +{ + struct evbuffer *buf1 = NULL, *buf2 = NULL; + + buf1 = evbuffer_new(); + tt_assert(buf1); + + buf2 = evbuffer_new(); + tt_assert(buf2); + + tt_int_op(evbuffer_add(buf1, "aaaaaa", 6), ==, 0); + + // buf1: aaaaaab + // buf2: + { + struct evbuffer_iovec iovecs[2]; + /** we want two chains, to leave one chain empty */ + tt_int_op(evbuffer_reserve_space(buf1, 971, iovecs, 2), ==, 2); + tt_int_op(iovecs[0].iov_len, >=, 1); + tt_int_op(iovecs[1].iov_len, >=, 1); + tt_assert(*(char *)(iovecs[0].iov_base) = 'b'); + tt_assert(iovecs[0].iov_len = 1); + tt_int_op(evbuffer_commit_space(buf1, iovecs, 1), ==, 0); + } + + // buf1: aaaaaab + // buf2: dddcc + tt_int_op(evbuffer_add(buf2, "cc", 2), ==, 0); + tt_int_op(evbuffer_prepend(buf2, "ddd", 3), ==, 0); + + // buf1: + // buf2: aaaaaabdddcc + tt_int_op(evbuffer_prepend_buffer(buf2, buf1), ==, 0); + + // buf1: aaaaaabdddcc + // buf2: + tt_int_op(evbuffer_add_buffer(buf1, buf2), ==, 0); + + // buf1: c + // buf2: aaaaaabdddc + tt_int_op(evbuffer_remove_buffer(buf1, buf2, 11), ==, 11); + + // This fails today, we observe "aaaaaabcddd" instead! + tt_mem_op(evbuffer_pullup(buf2, -1), ==, "aaaaaabdddc", 11); + + evbuffer_validate(buf1); + evbuffer_validate(buf2); + + end: + if (buf1) + evbuffer_free(buf1); + if (buf2) + evbuffer_free(buf2); +} + +static void +test_evbuffer_add_buffer_with_empty(void *ptr) +{ + struct evbuffer *src = evbuffer_new(); + struct evbuffer *dst = evbuffer_new(); + struct evbuffer *buf = evbuffer_new(); + + evbuffer_add(buf, "foo", 3); + + evbuffer_add_reference(src, "foo", 3, NULL, NULL); + evbuffer_add_reference(src, NULL, 0, NULL, NULL); + evbuffer_add_buffer(src, buf); + + evbuffer_add(buf, "foo", 3); + + evbuffer_add_reference(dst, "foo", 3, NULL, NULL); + evbuffer_add_reference(dst, NULL, 0, NULL, NULL); + evbuffer_add_buffer(dst, buf); + + tt_int_op(evbuffer_get_length(src), ==, 6); + tt_int_op(evbuffer_get_length(dst), ==, 6); + + evbuffer_validate(src); + evbuffer_validate(dst); + + end: + evbuffer_free(src); + evbuffer_free(dst); + evbuffer_free(buf); +} + +static void +test_evbuffer_add_buffer_with_empty2(void *ptr) +{ + struct evbuffer *src = evbuffer_new(); + struct evbuffer *dst = evbuffer_new(); + struct evbuffer *buf = evbuffer_new(); + + evbuffer_add(buf, "foo", 3); + + evbuffer_add_reference(src, NULL, 0, NULL, NULL); + evbuffer_add_buffer(src, buf); + + evbuffer_add(buf, "foo", 3); + + evbuffer_add_reference(dst, NULL, 0, NULL, NULL); + evbuffer_add_buffer(dst, buf); + + tt_int_op(evbuffer_get_length(src), ==, 3); + tt_int_op(evbuffer_get_length(dst), ==, 3); + + evbuffer_validate(src); + evbuffer_validate(dst); + + end: + evbuffer_free(src); + evbuffer_free(dst); + evbuffer_free(buf); +} + +static void +test_evbuffer_reserve2(void *ptr) +{ + /* Test the two-vector cases of reserve/commit. */ + struct evbuffer *buf = evbuffer_new(); + int n, i; + struct evbuffer_iovec v[2]; + size_t remaining; + char *cp, *cp2; + + /* First chunk will necessarily be one chunk. Use 512 bytes of it.*/ + n = evbuffer_reserve_space(buf, 1024, v, 2); + tt_int_op(n, ==, 1); + tt_int_op(evbuffer_get_length(buf), ==, 0); + tt_assert(v[0].iov_base != NULL); + tt_int_op(v[0].iov_len, >=, 1024); + memset(v[0].iov_base, 'X', 512); + cp = v[0].iov_base; + remaining = v[0].iov_len - 512; + v[0].iov_len = 512; + evbuffer_validate(buf); + tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1)); + tt_int_op(evbuffer_get_length(buf), ==, 512); + evbuffer_validate(buf); + + /* Ask for another same-chunk request, in an existing chunk. Use 8 + * bytes of it. */ + n = evbuffer_reserve_space(buf, 32, v, 2); + tt_int_op(n, ==, 1); + tt_assert(cp + 512 == v[0].iov_base); + tt_int_op(remaining, ==, v[0].iov_len); + memset(v[0].iov_base, 'Y', 8); + v[0].iov_len = 8; + tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1)); + tt_int_op(evbuffer_get_length(buf), ==, 520); + remaining -= 8; + evbuffer_validate(buf); + + /* Now ask for a request that will be split. Use only one byte of it, + though. */ + n = evbuffer_reserve_space(buf, remaining+64, v, 2); + tt_int_op(n, ==, 2); + tt_assert(cp + 520 == v[0].iov_base); + tt_int_op(remaining, ==, v[0].iov_len); + tt_assert(v[1].iov_base); + tt_assert(v[1].iov_len >= 64); + cp2 = v[1].iov_base; + memset(v[0].iov_base, 'Z', 1); + v[0].iov_len = 1; + tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1)); + tt_int_op(evbuffer_get_length(buf), ==, 521); + remaining -= 1; + evbuffer_validate(buf); + + /* Now ask for a request that will be split. Use some of the first + * part and some of the second. */ + n = evbuffer_reserve_space(buf, remaining+64, v, 2); + evbuffer_validate(buf); + tt_int_op(n, ==, 2); + tt_assert(cp + 521 == v[0].iov_base); + tt_int_op(remaining, ==, v[0].iov_len); + tt_assert(v[1].iov_base == cp2); + tt_assert(v[1].iov_len >= 64); + memset(v[0].iov_base, 'W', 400); + v[0].iov_len = 400; + memset(v[1].iov_base, 'x', 60); + v[1].iov_len = 60; + tt_int_op(0, ==, evbuffer_commit_space(buf, v, 2)); + tt_int_op(evbuffer_get_length(buf), ==, 981); + evbuffer_validate(buf); + + /* Now peek to make sure stuff got made how we like. */ + memset(v,0,sizeof(v)); + n = evbuffer_peek(buf, -1, NULL, v, 2); + tt_int_op(n, ==, 2); + tt_int_op(v[0].iov_len, ==, 921); + tt_int_op(v[1].iov_len, ==, 60); + + cp = v[0].iov_base; + for (i=0; i<512; ++i) + tt_int_op(cp[i], ==, 'X'); + for (i=512; i<520; ++i) + tt_int_op(cp[i], ==, 'Y'); + for (i=520; i<521; ++i) + tt_int_op(cp[i], ==, 'Z'); + for (i=521; i<921; ++i) + tt_int_op(cp[i], ==, 'W'); + + cp = v[1].iov_base; + for (i=0; i<60; ++i) + tt_int_op(cp[i], ==, 'x'); + +end: + evbuffer_free(buf); +} + +static void +test_evbuffer_reserve_many(void *ptr) +{ + /* This is a glass-box test to handle expanding a buffer with more + * chunks and reallocating chunks as needed */ + struct evbuffer *buf = evbuffer_new(); + struct evbuffer_iovec v[8]; + int n; + size_t sz; + int add_data = ptr && !strcmp(ptr, "add"); + int fill_first = ptr && !strcmp(ptr, "fill"); + char *cp1, *cp2; + + /* When reserving the the first chunk, we just allocate it */ + n = evbuffer_reserve_space(buf, 128, v, 2); + evbuffer_validate(buf); + tt_int_op(n, ==, 1); + tt_assert(v[0].iov_len >= 128); + sz = v[0].iov_len; + cp1 = v[0].iov_base; + if (add_data) { + *(char*)v[0].iov_base = 'X'; + v[0].iov_len = 1; + n = evbuffer_commit_space(buf, v, 1); + tt_int_op(n, ==, 0); + } else if (fill_first) { + memset(v[0].iov_base, 'X', v[0].iov_len); + n = evbuffer_commit_space(buf, v, 1); + tt_int_op(n, ==, 0); + n = evbuffer_reserve_space(buf, 128, v, 2); + tt_int_op(n, ==, 1); + sz = v[0].iov_len; + tt_assert(v[0].iov_base != cp1); + cp1 = v[0].iov_base; + } + + /* Make another chunk get added. */ + n = evbuffer_reserve_space(buf, sz+128, v, 2); + evbuffer_validate(buf); + tt_int_op(n, ==, 2); + sz = v[0].iov_len + v[1].iov_len; + tt_int_op(sz, >=, v[0].iov_len+128); + if (add_data) { + tt_assert(v[0].iov_base == cp1 + 1); + } else { + tt_assert(v[0].iov_base == cp1); + } + cp1 = v[0].iov_base; + cp2 = v[1].iov_base; + + /* And a third chunk. */ + n = evbuffer_reserve_space(buf, sz+128, v, 3); + evbuffer_validate(buf); + tt_int_op(n, ==, 3); + tt_assert(cp1 == v[0].iov_base); + tt_assert(cp2 == v[1].iov_base); + sz = v[0].iov_len + v[1].iov_len + v[2].iov_len; + + /* Now force a reallocation by asking for more space in only 2 + * buffers. */ + n = evbuffer_reserve_space(buf, sz+128, v, 2); + evbuffer_validate(buf); + if (add_data) { + tt_int_op(n, ==, 2); + tt_assert(cp1 == v[0].iov_base); + } else { + tt_int_op(n, ==, 1); + } + +end: + evbuffer_free(buf); +} + +static void +test_evbuffer_reserve_with_empty(void *ptr) +{ + struct evbuffer *buf; + struct evbuffer_iovec v[2]; + + tt_assert(buf = evbuffer_new()); + evbuffer_add(buf, "a", 1); + tt_int_op(evbuffer_reserve_space(buf, 1<<12, v, 2), ==, 2); + v[0].iov_len = 1; + *(char *)v[0].iov_base = 'b'; + tt_int_op(evbuffer_commit_space(buf, v, 1), ==, 0); + evbuffer_add(buf, "c", 1); + tt_mem_op(evbuffer_pullup(buf, -1), ==, "abc", 2); + + evbuffer_validate(buf); + + end: + if (buf) + evbuffer_free(buf); +} + +/* regression for evbuffer_expand_fast_() with invalid last_with_datap that has + * been left after evbuffer_prepend() with empty chain in it */ +static void +test_evbuffer_reserve_invalid_last_with_datap(void *ptr) +{ + struct evbuffer *buf = NULL; + struct evbuffer_iovec vec[2]; + const int nvec = ARRAY_SIZE(vec); + int i, avec; + + buf = evbuffer_new(); + tt_assert(buf); + + /* prepend with an empty chain */ + evbuffer_add_reference(buf, "", 0, NULL, NULL); + evbuffer_prepend(buf, "foo", 3); + /* after invalid last_with_datap will create new chain */ + evbuffer_add(buf, "", 0); + /* we need to create at least 2 "used" (in evbuffer_expand_fast_()) chains */ + tt_int_op(avec = evbuffer_reserve_space(buf, 1<<12, vec, nvec), >=, 1); + for (i = 0; i < avec; ++i) + vec[i].iov_len = 0; + tt_int_op(evbuffer_commit_space(buf, vec, avec), ==, 0); + + /* and an actual problem, that triggers an assert(chain == buf->first) in + * evbuffer_expand_fast_() */ + tt_int_op(evbuffer_reserve_space(buf, 1<<13, vec, nvec), >=, 1); + + evbuffer_validate(buf); + +end: + if (buf) + evbuffer_free(buf); +} + +static void +test_evbuffer_expand(void *ptr) +{ + char data[4096]; + struct evbuffer *buf; + size_t a,w,u; + void *buffer; + + memset(data, 'X', sizeof(data)); + + /* Make sure that expand() works on an empty buffer */ + buf = evbuffer_new(); + tt_int_op(evbuffer_expand(buf, 20000), ==, 0); + evbuffer_validate(buf); + a=w=u=0; + evbuffer_get_waste(buf, &a,&w,&u); + tt_assert(w == 0); + tt_assert(u == 0); + tt_assert(a >= 20000); + tt_assert(buf->first); + tt_assert(buf->first == buf->last); + tt_assert(buf->first->off == 0); + tt_assert(buf->first->buffer_len >= 20000); + + /* Make sure that expand() works as a no-op when there's enough + * contiguous space already. */ + buffer = buf->first->buffer; + evbuffer_add(buf, data, 1024); + tt_int_op(evbuffer_expand(buf, 1024), ==, 0); + tt_assert(buf->first->buffer == buffer); + evbuffer_validate(buf); + evbuffer_free(buf); + + /* Make sure that expand() can work by moving misaligned data + * when it makes sense to do so. */ + buf = evbuffer_new(); + evbuffer_add(buf, data, 400); + { + int n = (int)(buf->first->buffer_len - buf->first->off - 1); + tt_assert(n < (int)sizeof(data)); + evbuffer_add(buf, data, n); + } + tt_assert(buf->first == buf->last); + tt_assert(buf->first->off == buf->first->buffer_len - 1); + evbuffer_drain(buf, buf->first->off - 1); + tt_assert(1 == evbuffer_get_length(buf)); + tt_assert(buf->first->misalign > 0); + tt_assert(buf->first->off == 1); + buffer = buf->first->buffer; + tt_assert(evbuffer_expand(buf, 40) == 0); + tt_assert(buf->first == buf->last); + tt_assert(buf->first->off == 1); + tt_assert(buf->first->buffer == buffer); + tt_assert(buf->first->misalign == 0); + evbuffer_validate(buf); + evbuffer_free(buf); + + /* add, expand, pull-up: This used to crash libevent. */ + buf = evbuffer_new(); + + evbuffer_add(buf, data, sizeof(data)); + evbuffer_add(buf, data, sizeof(data)); + evbuffer_add(buf, data, sizeof(data)); + + evbuffer_validate(buf); + evbuffer_expand(buf, 1024); + evbuffer_validate(buf); + evbuffer_pullup(buf, -1); + evbuffer_validate(buf); + +end: + evbuffer_free(buf); +} + +static void +test_evbuffer_expand_overflow(void *ptr) +{ + struct evbuffer *buf; + + buf = evbuffer_new(); + evbuffer_add(buf, "1", 1); + evbuffer_expand(buf, EVBUFFER_CHAIN_MAX); + evbuffer_validate(buf); + + evbuffer_expand(buf, EV_SIZE_MAX); + evbuffer_validate(buf); + +end: + evbuffer_free(buf); +} + +static void +test_evbuffer_add1(void *ptr) +{ + struct evbuffer *buf; + char *str; + + buf = evbuffer_new(); + evbuffer_add(buf, "1", 1); + evbuffer_validate(buf); + evbuffer_expand(buf, 2048); + evbuffer_validate(buf); + evbuffer_add(buf, "2", 1); + evbuffer_validate(buf); + evbuffer_add_printf(buf, "3"); + evbuffer_validate(buf); + + tt_assert(evbuffer_get_length(buf) == 3); + str = (char *)evbuffer_pullup(buf, -1); + tt_assert(str[0] == '1'); + tt_assert(str[1] == '2'); + tt_assert(str[2] == '3'); +end: + evbuffer_free(buf); +} + +static void +test_evbuffer_add2(void *ptr) +{ + struct evbuffer *buf; + static char data[4096]; + int data_len = MIN_BUFFER_SIZE-EVBUFFER_CHAIN_SIZE-10; + char *str; + int len; + + memset(data, 'P', sizeof(data)); + buf = evbuffer_new(); + evbuffer_add(buf, data, data_len); + evbuffer_validate(buf); + evbuffer_expand(buf, 100); + evbuffer_validate(buf); + evbuffer_add(buf, "2", 1); + evbuffer_validate(buf); + evbuffer_add_printf(buf, "3"); + evbuffer_validate(buf); + + len = evbuffer_get_length(buf); + tt_assert(len == data_len+2); + str = (char *)evbuffer_pullup(buf, -1); + tt_assert(str[len-3] == 'P'); + tt_assert(str[len-2] == '2'); + tt_assert(str[len-1] == '3'); +end: + evbuffer_free(buf); +} + +static int reference_cb_called; +static void +reference_cb(const void *data, size_t len, void *extra) +{ + tt_str_op(data, ==, "this is what we add as read-only memory."); + tt_int_op(len, ==, strlen(data)); + tt_want(extra == (void *)0xdeadaffe); + ++reference_cb_called; +end: + ; +} + +static void +test_evbuffer_reference(void *ptr) +{ + struct evbuffer *src = evbuffer_new(); + struct evbuffer *dst = evbuffer_new(); + struct evbuffer_iovec v[1]; + const char *data = "this is what we add as read-only memory."; + reference_cb_called = 0; + + tt_assert(evbuffer_add_reference(src, data, strlen(data), + reference_cb, (void *)0xdeadaffe) != -1); + + evbuffer_reserve_space(dst, strlen(data), v, 1); + tt_assert(evbuffer_remove(src, v[0].iov_base, 10) != -1); + + evbuffer_validate(src); + evbuffer_validate(dst); + + /* make sure that we don't write data at the beginning */ + evbuffer_prepend(src, "aaaaa", 5); + evbuffer_validate(src); + evbuffer_drain(src, 5); + + tt_assert(evbuffer_remove(src, ((char*)(v[0].iov_base)) + 10, + strlen(data) - 10) != -1); + + v[0].iov_len = strlen(data); + + evbuffer_commit_space(dst, v, 1); + evbuffer_validate(src); + evbuffer_validate(dst); + + tt_int_op(reference_cb_called, ==, 1); + + tt_assert(!memcmp(evbuffer_pullup(dst, strlen(data)), + data, strlen(data))); + evbuffer_validate(dst); + + end: + evbuffer_free(dst); + evbuffer_free(src); +} + +static void +test_evbuffer_reference2(void *ptr) +{ + struct evbuffer *buf; + static char data[4096]; + int data_len = MIN_BUFFER_SIZE-EVBUFFER_CHAIN_SIZE-10; + char *str; + int len; + + memset(data, 'P', sizeof(data)); + buf = evbuffer_new(); + evbuffer_add(buf, data, data_len); + evbuffer_validate(buf); + evbuffer_expand(buf, 100); + evbuffer_validate(buf); + evbuffer_add_reference(buf, "2", 1, no_cleanup, NULL); + evbuffer_validate(buf); + evbuffer_add_printf(buf, "3"); + evbuffer_validate(buf); + + len = evbuffer_get_length(buf); + tt_assert(len == data_len+2); + str = (char *)evbuffer_pullup(buf, -1); + tt_assert(str[len-3] == 'P'); + tt_assert(str[len-2] == '2'); + tt_assert(str[len-1] == '3'); +end: + evbuffer_free(buf); +} + +static struct event_base *addfile_test_event_base; +static int addfile_test_done_writing; +static int addfile_test_total_written; +static int addfile_test_total_read; + +static void +addfile_test_writecb(evutil_socket_t fd, short what, void *arg) +{ + struct evbuffer *b = arg; + int r; + evbuffer_validate(b); + while (evbuffer_get_length(b)) { + r = evbuffer_write(b, fd); + if (r > 0) { + addfile_test_total_written += r; + TT_BLATHER(("Wrote %d/%d bytes", r, addfile_test_total_written)); + } else { + int e = evutil_socket_geterror(fd); + if (EVUTIL_ERR_RW_RETRIABLE(e)) + return; + tt_fail_perror("write"); + event_base_loopexit(addfile_test_event_base,NULL); + } + evbuffer_validate(b); + } + addfile_test_done_writing = 1; + return; +end: + event_base_loopexit(addfile_test_event_base,NULL); +} + +static void +addfile_test_readcb(evutil_socket_t fd, short what, void *arg) +{ + struct evbuffer *b = arg; + int e, r = 0; + do { + r = evbuffer_read(b, fd, 1024); + if (r > 0) { + addfile_test_total_read += r; + TT_BLATHER(("Read %d/%d bytes", r, addfile_test_total_read)); + } + } while (r > 0); + if (r < 0) { + e = evutil_socket_geterror(fd); + if (! EVUTIL_ERR_RW_RETRIABLE(e)) { + tt_fail_perror("read"); + event_base_loopexit(addfile_test_event_base,NULL); + } + } + if (addfile_test_done_writing && + addfile_test_total_read >= addfile_test_total_written) { + event_base_loopexit(addfile_test_event_base,NULL); + } +} + +static void +test_evbuffer_add_file(void *ptr) +{ + struct basic_test_data *testdata = ptr; + const char *impl = testdata->setup_data; + struct evbuffer *src = evbuffer_new(), *dest = evbuffer_new(); + char *tmpfilename = NULL; + char *data = NULL; + const char *expect_data; + size_t datalen, expect_len; + const char *compare; + int fd = -1; + int want_ismapping = -1, want_cansendfile = -1; + unsigned flags = 0; + int use_segment = 1, use_bigfile = 0, map_from_offset = 0, + view_from_offset = 0; + struct evbuffer_file_segment *seg = NULL; + ev_off_t starting_offset = 0, mapping_len = -1; + ev_off_t segment_offset = 0, segment_len = -1; + struct event *rev=NULL, *wev=NULL; + struct event_base *base = testdata->base; + evutil_socket_t pair[2] = {-1, -1}; + struct evutil_weakrand_state seed = { 123456789U }; + + /* This test is highly parameterized based on substrings of its + * argument. The strings are: */ + tt_assert(impl); + if (strstr(impl, "nosegment")) { + /* If nosegment is set, use the older evbuffer_add_file + * interface */ + use_segment = 0; + } + if (strstr(impl, "bigfile")) { + /* If bigfile is set, use a 512K file. Else use a smaller + * one. */ + use_bigfile = 1; + } + if (strstr(impl, "map_offset")) { + /* If map_offset is set, we build the file segment starting + * from a point other than byte 0 and ending somewhere other + * than the last byte. Otherwise we map the whole thing */ + map_from_offset = 1; + } + if (strstr(impl, "offset_in_segment")) { + /* If offset_in_segment is set, we add a subsection of the + * file semgment starting from a point other than byte 0 of + * the segment. */ + view_from_offset = 1; + } + if (strstr(impl, "sendfile")) { + /* If sendfile is set, we try to use a sendfile/splice style + * backend. */ + flags = EVBUF_FS_DISABLE_MMAP; + want_cansendfile = 1; + want_ismapping = 0; + } else if (strstr(impl, "mmap")) { + /* If sendfile is set, we try to use a mmap/CreateFileMapping + * style backend. */ + flags = EVBUF_FS_DISABLE_SENDFILE; + want_ismapping = 1; + want_cansendfile = 0; + } else if (strstr(impl, "linear")) { + /* If linear is set, we try to use a read-the-whole-thing + * backend. */ + flags = EVBUF_FS_DISABLE_SENDFILE|EVBUF_FS_DISABLE_MMAP; + want_ismapping = 0; + want_cansendfile = 0; + } else if (strstr(impl, "default")) { + /* The caller doesn't care which backend we use. */ + ; + } else { + /* The caller must choose a backend. */ + TT_DIE(("Didn't recognize the implementation")); + } + + if (use_bigfile) { + unsigned int i; + datalen = 1024*512; + data = malloc(1024*512); + tt_assert(data); + for (i = 0; i < datalen; ++i) + data[i] = (char)evutil_weakrand_(&seed); + } else { + data = strdup("here is a relatively small string."); + tt_assert(data); + datalen = strlen(data); + } + + fd = regress_make_tmpfile(data, datalen, &tmpfilename); + + if (map_from_offset) { + starting_offset = datalen/4 + 1; + mapping_len = datalen / 2 - 1; + expect_data = data + starting_offset; + expect_len = mapping_len; + } else { + expect_data = data; + expect_len = datalen; + } + if (view_from_offset) { + tt_assert(use_segment); /* Can't do this with add_file*/ + segment_offset = expect_len / 3; + segment_len = expect_len / 2; + expect_data = expect_data + segment_offset; + expect_len = segment_len; + } + + if (use_segment) { + seg = evbuffer_file_segment_new(fd, starting_offset, + mapping_len, flags); + tt_assert(seg); + if (want_ismapping >= 0) { + if (seg->is_mapping != (unsigned)want_ismapping) + tt_skip(); + } + if (want_cansendfile >= 0) { + if (seg->can_sendfile != (unsigned)want_cansendfile) + tt_skip(); + } + } + + /* Say that it drains to a fd so that we can use sendfile. */ + evbuffer_set_flags(src, EVBUFFER_FLAG_DRAINS_TO_FD); + +#if defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) + /* We need to use a pair of AF_INET sockets, since Solaris + doesn't support sendfile() over AF_UNIX. */ + if (evutil_ersatz_socketpair_(AF_INET, SOCK_STREAM, 0, pair) == -1) + tt_abort_msg("ersatz_socketpair failed"); +#else + if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) + tt_abort_msg("socketpair failed"); +#endif + evutil_make_socket_nonblocking(pair[0]); + evutil_make_socket_nonblocking(pair[1]); + + tt_assert(fd != -1); + + if (use_segment) { + tt_assert(evbuffer_add_file_segment(src, seg, + segment_offset, segment_len)!=-1); + } else { + tt_assert(evbuffer_add_file(src, fd, starting_offset, + mapping_len) != -1); + } + + evbuffer_validate(src); + + addfile_test_event_base = base; + addfile_test_done_writing = 0; + addfile_test_total_written = 0; + addfile_test_total_read = 0; + + wev = event_new(base, pair[0], EV_WRITE|EV_PERSIST, + addfile_test_writecb, src); + rev = event_new(base, pair[1], EV_READ|EV_PERSIST, + addfile_test_readcb, dest); + + event_add(wev, NULL); + event_add(rev, NULL); + event_base_dispatch(base); + + evbuffer_validate(src); + evbuffer_validate(dest); + + tt_assert(addfile_test_done_writing); + tt_int_op(addfile_test_total_written, ==, expect_len); + tt_int_op(addfile_test_total_read, ==, expect_len); + + compare = (char *)evbuffer_pullup(dest, expect_len); + tt_assert(compare != NULL); + if (memcmp(compare, expect_data, expect_len)) { + tt_abort_msg("Data from add_file differs."); + } + + evbuffer_validate(dest); + end: + if (data) + free(data); + if (seg) + evbuffer_file_segment_free(seg); + if (src) + evbuffer_free(src); + if (dest) + evbuffer_free(dest); + if (pair[0] >= 0) + evutil_closesocket(pair[0]); + if (pair[1] >= 0) + evutil_closesocket(pair[1]); + if (wev) + event_free(wev); + if (rev) + event_free(rev); + if (tmpfilename) { + unlink(tmpfilename); + free(tmpfilename); + } +} + +static int file_segment_cleanup_cb_called_count = 0; +static struct evbuffer_file_segment const* file_segment_cleanup_cb_called_with = NULL; +static int file_segment_cleanup_cb_called_with_flags = 0; +static void* file_segment_cleanup_cb_called_with_arg = NULL; +static void +file_segment_cleanup_cp(struct evbuffer_file_segment const* seg, int flags, void* arg) +{ + ++file_segment_cleanup_cb_called_count; + file_segment_cleanup_cb_called_with = seg; + file_segment_cleanup_cb_called_with_flags = flags; + file_segment_cleanup_cb_called_with_arg = arg; +} + +static void +test_evbuffer_file_segment_add_cleanup_cb(void* ptr) +{ + char *tmpfilename = NULL; + int fd = -1; + struct evbuffer *evb = NULL; + struct evbuffer_file_segment *seg = NULL, *segptr; + char const* arg = "token"; + + fd = regress_make_tmpfile("file_segment_test_file", 22, &tmpfilename); + tt_int_op(fd, >=, 0); + + evb = evbuffer_new(); + tt_assert(evb); + + segptr = seg = evbuffer_file_segment_new(fd, 0, -1, 0); + tt_assert(seg); + + evbuffer_file_segment_add_cleanup_cb( + seg, &file_segment_cleanup_cp, (void*)arg); + + tt_assert(fd != -1); + + tt_assert(evbuffer_add_file_segment(evb, seg, 0, -1)!=-1); + + evbuffer_validate(evb); + + tt_int_op(file_segment_cleanup_cb_called_count, ==, 0); + evbuffer_file_segment_free(seg); + seg = NULL; /* Prevent double-free. */ + + tt_int_op(file_segment_cleanup_cb_called_count, ==, 0); + evbuffer_free(evb); + evb = NULL; /* pevent double-free */ + + tt_int_op(file_segment_cleanup_cb_called_count, ==, 1); + tt_assert(file_segment_cleanup_cb_called_with == segptr); + tt_assert(file_segment_cleanup_cb_called_with_flags == 0); + tt_assert(file_segment_cleanup_cb_called_with_arg == (void*)arg); + +end: + if (evb) + evbuffer_free(evb); + if (seg) + evbuffer_file_segment_free(seg); + if (tmpfilename) { + unlink(tmpfilename); + free(tmpfilename); + } +} + +#ifndef EVENT__DISABLE_MM_REPLACEMENT +static void * +failing_malloc(size_t how_much) +{ + errno = ENOMEM; + return NULL; +} +#endif + +static void +test_evbuffer_readln(void *ptr) +{ + struct evbuffer *evb = evbuffer_new(); + struct evbuffer *evb_tmp = evbuffer_new(); + const char *s; + char *cp = NULL; + size_t sz; + +#define tt_line_eq(content) \ + TT_STMT_BEGIN \ + if (!cp || sz != strlen(content) || strcmp(cp, content)) { \ + TT_DIE(("Wanted %s; got %s [%d]", content, cp, (int)sz)); \ + } \ + TT_STMT_END + + /* Test EOL_ANY. */ + s = "complex silly newline\r\n\n\r\n\n\rmore\0\n"; + evbuffer_add(evb, s, strlen(s)+2); + evbuffer_validate(evb); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY); + tt_line_eq("complex silly newline"); + free(cp); + evbuffer_validate(evb); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY); + if (!cp || sz != 5 || memcmp(cp, "more\0\0", 6)) + tt_abort_msg("Not as expected"); + tt_uint_op(evbuffer_get_length(evb), ==, 0); + evbuffer_validate(evb); + s = "\nno newline"; + evbuffer_add(evb, s, strlen(s)); + free(cp); + evbuffer_validate(evb); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY); + tt_line_eq(""); + free(cp); + evbuffer_validate(evb); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY); + tt_assert(!cp); + evbuffer_validate(evb); + evbuffer_drain(evb, evbuffer_get_length(evb)); + tt_assert(evbuffer_get_length(evb) == 0); + evbuffer_validate(evb); + + /* Test EOL_CRLF */ + s = "Line with\rin the middle\nLine with good crlf\r\n\nfinal\n"; + evbuffer_add(evb, s, strlen(s)); + evbuffer_validate(evb); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF); + tt_line_eq("Line with\rin the middle"); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF); + tt_line_eq("Line with good crlf"); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF); + tt_line_eq(""); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF); + tt_line_eq("final"); + s = "x"; + evbuffer_validate(evb); + evbuffer_add(evb, s, 1); + evbuffer_validate(evb); + free(cp); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF); + tt_assert(!cp); + evbuffer_validate(evb); + + /* Test CRLF_STRICT */ + s = " and a bad crlf\nand a good one\r\n\r\nMore\r"; + evbuffer_add(evb, s, strlen(s)); + evbuffer_validate(evb); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_line_eq("x and a bad crlf\nand a good one"); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_line_eq(""); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_assert(!cp); + evbuffer_validate(evb); + evbuffer_add(evb, "\n", 1); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_line_eq("More"); + free(cp); + tt_assert(evbuffer_get_length(evb) == 0); + evbuffer_validate(evb); + + s = "An internal CR\r is not an eol\r\nNor is a lack of one"; + evbuffer_add(evb, s, strlen(s)); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_line_eq("An internal CR\r is not an eol"); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_assert(!cp); + evbuffer_validate(evb); + + evbuffer_add(evb, "\r\n", 2); + evbuffer_validate(evb); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_line_eq("Nor is a lack of one"); + free(cp); + tt_assert(evbuffer_get_length(evb) == 0); + evbuffer_validate(evb); + + /* Test LF */ + s = "An\rand a nl\n\nText"; + evbuffer_add(evb, s, strlen(s)); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF); + tt_line_eq("An\rand a nl"); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF); + tt_line_eq(""); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF); + tt_assert(!cp); + free(cp); + evbuffer_add(evb, "\n", 1); + evbuffer_validate(evb); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF); + tt_line_eq("Text"); + free(cp); + evbuffer_validate(evb); + + /* Test NUL */ + tt_int_op(evbuffer_get_length(evb), ==, 0); + { + char x[] = + "NUL\n\0\0" + "The all-zeros character which may serve\0" + "to accomplish time fill\0and media fill"; + /* Add all but the final NUL of x. */ + evbuffer_add(evb, x, sizeof(x)-1); + } + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL); + tt_line_eq("NUL\n"); + free(cp); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL); + tt_line_eq(""); + free(cp); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL); + tt_line_eq("The all-zeros character which may serve"); + free(cp); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL); + tt_line_eq("to accomplish time fill"); + free(cp); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL); + tt_ptr_op(cp, ==, NULL); + evbuffer_drain(evb, -1); + + /* Test CRLF_STRICT - across boundaries*/ + s = " and a bad crlf\nand a good one\r"; + evbuffer_add(evb_tmp, s, strlen(s)); + evbuffer_validate(evb); + evbuffer_add_buffer(evb, evb_tmp); + evbuffer_validate(evb); + s = "\n\r"; + evbuffer_add(evb_tmp, s, strlen(s)); + evbuffer_validate(evb); + evbuffer_add_buffer(evb, evb_tmp); + evbuffer_validate(evb); + s = "\nMore\r"; + evbuffer_add(evb_tmp, s, strlen(s)); + evbuffer_validate(evb); + evbuffer_add_buffer(evb, evb_tmp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_line_eq(" and a bad crlf\nand a good one"); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_line_eq(""); + free(cp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_assert(!cp); + free(cp); + evbuffer_validate(evb); + evbuffer_add(evb, "\n", 1); + evbuffer_validate(evb); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT); + tt_line_eq("More"); + free(cp); cp = NULL; + evbuffer_validate(evb); + tt_assert(evbuffer_get_length(evb) == 0); + + /* Test memory problem*/ + s = "one line\ntwo line\nblue line"; + evbuffer_add(evb_tmp, s, strlen(s)); + evbuffer_validate(evb); + evbuffer_add_buffer(evb, evb_tmp); + evbuffer_validate(evb); + + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF); + tt_line_eq("one line"); + free(cp); cp = NULL; + evbuffer_validate(evb); + + /* the next call to readline should fail */ +#ifndef EVENT__DISABLE_MM_REPLACEMENT + event_set_mem_functions(failing_malloc, realloc, free); + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF); + tt_assert(cp == NULL); + evbuffer_validate(evb); + + /* now we should get the next line back */ + event_set_mem_functions(malloc, realloc, free); +#endif + cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF); + tt_line_eq("two line"); + free(cp); cp = NULL; + evbuffer_validate(evb); + + end: + evbuffer_free(evb); + evbuffer_free(evb_tmp); + if (cp) free(cp); +} + +static void +test_evbuffer_search_eol(void *ptr) +{ + struct evbuffer *buf = evbuffer_new(); + struct evbuffer_ptr ptr1, ptr2; + const char *s; + size_t eol_len; + + s = "string! \r\n\r\nx\n"; + evbuffer_add(buf, s, strlen(s)); + eol_len = -1; + ptr1 = evbuffer_search_eol(buf, NULL, &eol_len, EVBUFFER_EOL_CRLF); + tt_int_op(ptr1.pos, ==, 8); + tt_int_op(eol_len, ==, 2); + + eol_len = -1; + ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_CRLF); + tt_int_op(ptr2.pos, ==, 8); + tt_int_op(eol_len, ==, 2); + + evbuffer_ptr_set(buf, &ptr1, 1, EVBUFFER_PTR_ADD); + eol_len = -1; + ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_CRLF); + tt_int_op(ptr2.pos, ==, 9); + tt_int_op(eol_len, ==, 1); + + eol_len = -1; + ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_CRLF_STRICT); + tt_int_op(ptr2.pos, ==, 10); + tt_int_op(eol_len, ==, 2); + + eol_len = -1; + ptr1 = evbuffer_search_eol(buf, NULL, &eol_len, EVBUFFER_EOL_LF); + tt_int_op(ptr1.pos, ==, 9); + tt_int_op(eol_len, ==, 1); + + eol_len = -1; + ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_LF); + tt_int_op(ptr2.pos, ==, 9); + tt_int_op(eol_len, ==, 1); + + evbuffer_ptr_set(buf, &ptr1, 1, EVBUFFER_PTR_ADD); + eol_len = -1; + ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_LF); + tt_int_op(ptr2.pos, ==, 11); + tt_int_op(eol_len, ==, 1); + + tt_assert(evbuffer_ptr_set(buf, &ptr1, evbuffer_get_length(buf), EVBUFFER_PTR_SET) == 0); + eol_len = -1; + ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_LF); + tt_int_op(ptr2.pos, ==, -1); + tt_int_op(eol_len, ==, 0); + +end: + evbuffer_free(buf); +} + +static void +test_evbuffer_iterative(void *ptr) +{ + struct evbuffer *buf = evbuffer_new(); + const char *abc = "abcdefghijklmnopqrstvuwxyzabcdefghijklmnopqrstvuwxyzabcdefghijklmnopqrstvuwxyzabcdefghijklmnopqrstvuwxyz"; + unsigned i, j, sum, n; + + sum = 0; + n = 0; + for (i = 0; i < 1000; ++i) { + for (j = 1; j < strlen(abc); ++j) { + char format[32]; + evutil_snprintf(format, sizeof(format), "%%%u.%us", j, j); + evbuffer_add_printf(buf, format, abc); + + /* Only check for rep violations every so often. + Walking over the whole list of chains can get + pretty expensive as it gets long. + */ + if ((n % 337) == 0) + evbuffer_validate(buf); + + sum += j; + n++; + } + } + evbuffer_validate(buf); + + tt_uint_op(sum, ==, evbuffer_get_length(buf)); + + { + size_t a,w,u; + a=w=u=0; + evbuffer_get_waste(buf, &a, &w, &u); + if (0) + printf("Allocated: %u.\nWasted: %u.\nUsed: %u.", + (unsigned)a, (unsigned)w, (unsigned)u); + tt_assert( ((double)w)/a < .125); + } + end: + evbuffer_free(buf); + +} + +static void +test_evbuffer_find(void *ptr) +{ + unsigned char* p; + const char* test1 = "1234567890\r\n"; + const char* test2 = "1234567890\r"; +#define EVBUFFER_INITIAL_LENGTH 256 + char test3[EVBUFFER_INITIAL_LENGTH]; + unsigned int i; + struct evbuffer * buf = evbuffer_new(); + + tt_assert(buf); + + /* make sure evbuffer_find doesn't match past the end of the buffer */ + evbuffer_add(buf, (unsigned char*)test1, strlen(test1)); + evbuffer_validate(buf); + evbuffer_drain(buf, strlen(test1)); + evbuffer_validate(buf); + evbuffer_add(buf, (unsigned char*)test2, strlen(test2)); + evbuffer_validate(buf); + p = evbuffer_find(buf, (unsigned char*)"\r\n", 2); + tt_want(p == NULL); + + /* + * drain the buffer and do another find; in r309 this would + * read past the allocated buffer causing a valgrind error. + */ + evbuffer_drain(buf, strlen(test2)); + evbuffer_validate(buf); + for (i = 0; i < EVBUFFER_INITIAL_LENGTH; ++i) + test3[i] = 'a'; + test3[EVBUFFER_INITIAL_LENGTH - 1] = 'x'; + evbuffer_add(buf, (unsigned char *)test3, EVBUFFER_INITIAL_LENGTH); + evbuffer_validate(buf); + p = evbuffer_find(buf, (unsigned char *)"xy", 2); + tt_want(p == NULL); + + /* simple test for match at end of allocated buffer */ + p = evbuffer_find(buf, (unsigned char *)"ax", 2); + tt_assert(p != NULL); + tt_want(strncmp((char*)p, "ax", 2) == 0); + +end: + if (buf) + evbuffer_free(buf); +} + +static void +test_evbuffer_ptr_set(void *ptr) +{ + struct evbuffer *buf = evbuffer_new(); + struct evbuffer_ptr pos; + struct evbuffer_iovec v[1]; + + tt_assert(buf); + + tt_int_op(evbuffer_get_length(buf), ==, 0); + + tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0); + tt_assert(pos.pos == 0); + tt_assert(evbuffer_ptr_set(buf, &pos, 1, EVBUFFER_PTR_ADD) == -1); + tt_assert(pos.pos == -1); + tt_assert(evbuffer_ptr_set(buf, &pos, 1, EVBUFFER_PTR_SET) == -1); + tt_assert(pos.pos == -1); + + /* create some chains */ + evbuffer_reserve_space(buf, 5000, v, 1); + v[0].iov_len = 5000; + memset(v[0].iov_base, 1, v[0].iov_len); + evbuffer_commit_space(buf, v, 1); + evbuffer_validate(buf); + + evbuffer_reserve_space(buf, 4000, v, 1); + v[0].iov_len = 4000; + memset(v[0].iov_base, 2, v[0].iov_len); + evbuffer_commit_space(buf, v, 1); + + evbuffer_reserve_space(buf, 3000, v, 1); + v[0].iov_len = 3000; + memset(v[0].iov_base, 3, v[0].iov_len); + evbuffer_commit_space(buf, v, 1); + evbuffer_validate(buf); + + tt_int_op(evbuffer_get_length(buf), ==, 12000); + + tt_assert(evbuffer_ptr_set(buf, &pos, 13000, EVBUFFER_PTR_SET) == -1); + tt_assert(pos.pos == -1); + tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0); + tt_assert(pos.pos == 0); + tt_assert(evbuffer_ptr_set(buf, &pos, 13000, EVBUFFER_PTR_ADD) == -1); + + tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0); + tt_assert(pos.pos == 0); + tt_assert(evbuffer_ptr_set(buf, &pos, 10000, EVBUFFER_PTR_ADD) == 0); + tt_assert(pos.pos == 10000); + tt_assert(evbuffer_ptr_set(buf, &pos, 1000, EVBUFFER_PTR_ADD) == 0); + tt_assert(pos.pos == 11000); + tt_assert(evbuffer_ptr_set(buf, &pos, 1000, EVBUFFER_PTR_ADD) == 0); + tt_assert(pos.pos == 12000); + tt_assert(evbuffer_ptr_set(buf, &pos, 1000, EVBUFFER_PTR_ADD) == -1); + tt_assert(pos.pos == -1); + +end: + if (buf) + evbuffer_free(buf); +} + +static void +test_evbuffer_search(void *ptr) +{ + struct evbuffer *buf = evbuffer_new(); + struct evbuffer *tmp = evbuffer_new(); + struct evbuffer_ptr pos, end; + + tt_assert(buf); + tt_assert(tmp); + + pos = evbuffer_search(buf, "x", 1, NULL); + tt_int_op(pos.pos, ==, -1); + tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0); + pos = evbuffer_search(buf, "x", 1, &pos); + tt_int_op(pos.pos, ==, -1); + tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0); + pos = evbuffer_search_range(buf, "x", 1, &pos, &pos); + tt_int_op(pos.pos, ==, -1); + tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0); + pos = evbuffer_search_range(buf, "x", 1, &pos, NULL); + tt_int_op(pos.pos, ==, -1); + + /* set up our chains */ + evbuffer_add_printf(tmp, "hello"); /* 5 chars */ + evbuffer_add_buffer(buf, tmp); + evbuffer_add_printf(tmp, "foo"); /* 3 chars */ + evbuffer_add_buffer(buf, tmp); + evbuffer_add_printf(tmp, "cat"); /* 3 chars */ + evbuffer_add_buffer(buf, tmp); + evbuffer_add_printf(tmp, "attack"); + evbuffer_add_buffer(buf, tmp); + + pos = evbuffer_search(buf, "attack", 6, NULL); + tt_int_op(pos.pos, ==, 11); + pos = evbuffer_search(buf, "attacker", 8, NULL); + tt_int_op(pos.pos, ==, -1); + + /* test continuing search */ + pos = evbuffer_search(buf, "oc", 2, NULL); + tt_int_op(pos.pos, ==, 7); + pos = evbuffer_search(buf, "cat", 3, &pos); + tt_int_op(pos.pos, ==, 8); + pos = evbuffer_search(buf, "tacking", 7, &pos); + tt_int_op(pos.pos, ==, -1); + + evbuffer_ptr_set(buf, &pos, 5, EVBUFFER_PTR_SET); + pos = evbuffer_search(buf, "foo", 3, &pos); + tt_int_op(pos.pos, ==, 5); + + evbuffer_ptr_set(buf, &pos, 2, EVBUFFER_PTR_ADD); + pos = evbuffer_search(buf, "tat", 3, &pos); + tt_int_op(pos.pos, ==, 10); + + /* test bounded search. */ + /* Set "end" to the first t in "attack". */ + evbuffer_ptr_set(buf, &end, 12, EVBUFFER_PTR_SET); + pos = evbuffer_search_range(buf, "foo", 3, NULL, &end); + tt_int_op(pos.pos, ==, 5); + pos = evbuffer_search_range(buf, "foocata", 7, NULL, &end); + tt_int_op(pos.pos, ==, 5); + pos = evbuffer_search_range(buf, "foocatat", 8, NULL, &end); + tt_int_op(pos.pos, ==, -1); + pos = evbuffer_search_range(buf, "ack", 3, NULL, &end); + tt_int_op(pos.pos, ==, -1); + + /* Set "end" after the last byte in the buffer. */ + tt_assert(evbuffer_ptr_set(buf, &end, 17, EVBUFFER_PTR_SET) == 0); + + pos = evbuffer_search_range(buf, "attack", 6, NULL, &end); + tt_int_op(pos.pos, ==, 11); + tt_assert(evbuffer_ptr_set(buf, &pos, 11, EVBUFFER_PTR_SET) == 0); + pos = evbuffer_search_range(buf, "attack", 6, &pos, &end); + tt_int_op(pos.pos, ==, 11); + tt_assert(evbuffer_ptr_set(buf, &pos, 17, EVBUFFER_PTR_SET) == 0); + pos = evbuffer_search_range(buf, "attack", 6, &pos, &end); + tt_int_op(pos.pos, ==, -1); + tt_assert(evbuffer_ptr_set(buf, &pos, 17, EVBUFFER_PTR_SET) == 0); + pos = evbuffer_search_range(buf, "attack", 6, &pos, NULL); + tt_int_op(pos.pos, ==, -1); + +end: + if (buf) + evbuffer_free(buf); + if (tmp) + evbuffer_free(tmp); +} + +static void +log_change_callback(struct evbuffer *buffer, + const struct evbuffer_cb_info *cbinfo, + void *arg) +{ + + size_t old_len = cbinfo->orig_size; + size_t new_len = old_len + cbinfo->n_added - cbinfo->n_deleted; + struct evbuffer *out = arg; + evbuffer_add_printf(out, "%lu->%lu; ", (unsigned long)old_len, + (unsigned long)new_len); +} +static void +self_draining_callback(struct evbuffer *evbuffer, size_t old_len, + size_t new_len, void *arg) +{ + if (new_len > old_len) + evbuffer_drain(evbuffer, new_len); +} + +static void +test_evbuffer_callbacks(void *ptr) +{ + struct evbuffer *buf = evbuffer_new(); + struct evbuffer *buf_out1 = evbuffer_new(); + struct evbuffer *buf_out2 = evbuffer_new(); + struct evbuffer_cb_entry *cb1, *cb2; + + tt_assert(buf); + tt_assert(buf_out1); + tt_assert(buf_out2); + + cb1 = evbuffer_add_cb(buf, log_change_callback, buf_out1); + cb2 = evbuffer_add_cb(buf, log_change_callback, buf_out2); + + /* Let's run through adding and deleting some stuff from the buffer + * and turning the callbacks on and off and removing them. The callback + * adds a summary of length changes to buf_out1/buf_out2 when called. */ + /* size: 0-> 36. */ + evbuffer_add_printf(buf, "The %d magic words are spotty pudding", 2); + evbuffer_validate(buf); + evbuffer_cb_clear_flags(buf, cb2, EVBUFFER_CB_ENABLED); + evbuffer_drain(buf, 10); /*36->26*/ + evbuffer_validate(buf); + evbuffer_prepend(buf, "Hello", 5);/*26->31*/ + evbuffer_cb_set_flags(buf, cb2, EVBUFFER_CB_ENABLED); + evbuffer_add_reference(buf, "Goodbye", 7, NULL, NULL); /*31->38*/ + evbuffer_remove_cb_entry(buf, cb1); + evbuffer_validate(buf); + evbuffer_drain(buf, evbuffer_get_length(buf)); /*38->0*/; + tt_assert(-1 == evbuffer_remove_cb(buf, log_change_callback, NULL)); + evbuffer_add(buf, "X", 1); /* 0->1 */ + tt_assert(!evbuffer_remove_cb(buf, log_change_callback, buf_out2)); + evbuffer_validate(buf); + + tt_str_op((const char *) evbuffer_pullup(buf_out1, -1), ==, + "0->36; 36->26; 26->31; 31->38; "); + tt_str_op((const char *) evbuffer_pullup(buf_out2, -1), ==, + "0->36; 31->38; 38->0; 0->1; "); + evbuffer_drain(buf_out1, evbuffer_get_length(buf_out1)); + evbuffer_drain(buf_out2, evbuffer_get_length(buf_out2)); + /* Let's test the obsolete buffer_setcb function too. */ + cb1 = evbuffer_add_cb(buf, log_change_callback, buf_out1); + tt_assert(cb1 != NULL); + cb2 = evbuffer_add_cb(buf, log_change_callback, buf_out2); + tt_assert(cb2 != NULL); + tt_int_op(evbuffer_setcb(buf, self_draining_callback, NULL), ==, 0); + evbuffer_add_printf(buf, "This should get drained right away."); + tt_uint_op(evbuffer_get_length(buf), ==, 0); + tt_uint_op(evbuffer_get_length(buf_out1), ==, 0); + tt_uint_op(evbuffer_get_length(buf_out2), ==, 0); + tt_int_op(evbuffer_setcb(buf, NULL, NULL), ==, 0); + evbuffer_add_printf(buf, "This will not."); + tt_str_op((const char *) evbuffer_pullup(buf, -1), ==, "This will not."); + evbuffer_validate(buf); + evbuffer_drain(buf, evbuffer_get_length(buf)); + evbuffer_validate(buf); +#if 0 + /* Now let's try a suspended callback. */ + cb1 = evbuffer_add_cb(buf, log_change_callback, buf_out1); + cb2 = evbuffer_add_cb(buf, log_change_callback, buf_out2); + evbuffer_cb_suspend(buf,cb2); + evbuffer_prepend(buf,"Hello world",11); /*0->11*/ + evbuffer_validate(buf); + evbuffer_cb_suspend(buf,cb1); + evbuffer_add(buf,"more",4); /* 11->15 */ + evbuffer_cb_unsuspend(buf,cb2); + evbuffer_drain(buf, 4); /* 15->11 */ + evbuffer_cb_unsuspend(buf,cb1); + evbuffer_drain(buf, evbuffer_get_length(buf)); /* 11->0 */ + + tt_str_op(evbuffer_pullup(buf_out1, -1), ==, + "0->11; 11->11; 11->0; "); + tt_str_op(evbuffer_pullup(buf_out2, -1), ==, + "0->15; 15->11; 11->0; "); +#endif + + /* the next call to readline should fail */ +#ifndef EVENT__DISABLE_MM_REPLACEMENT + event_set_mem_functions(failing_malloc, realloc, free); + tt_int_op(evbuffer_setcb(buf, self_draining_callback, NULL), ==, -1); + evbuffer_validate(buf); + event_set_mem_functions(malloc, realloc, free); +#endif + + end: + if (buf) + evbuffer_free(buf); + if (buf_out1) + evbuffer_free(buf_out1); + if (buf_out2) + evbuffer_free(buf_out2); +} + +static int ref_done_cb_called_count = 0; +static void *ref_done_cb_called_with = NULL; +static const void *ref_done_cb_called_with_data = NULL; +static size_t ref_done_cb_called_with_len = 0; +static void ref_done_cb(const void *data, size_t len, void *info) +{ + ++ref_done_cb_called_count; + ref_done_cb_called_with = info; + ref_done_cb_called_with_data = data; + ref_done_cb_called_with_len = len; +} + +static void +test_evbuffer_add_reference(void *ptr) +{ + const char chunk1[] = "If you have found the answer to such a problem"; + const char chunk2[] = "you ought to write it up for publication"; + /* -- Knuth's "Notes on the Exercises" from TAOCP */ + char tmp[16]; + size_t len1 = strlen(chunk1), len2=strlen(chunk2); + + struct evbuffer *buf1 = NULL, *buf2 = NULL; + + buf1 = evbuffer_new(); + tt_assert(buf1); + + evbuffer_add_reference(buf1, chunk1, len1, ref_done_cb, (void*)111); + evbuffer_add(buf1, ", ", 2); + evbuffer_add_reference(buf1, chunk2, len2, ref_done_cb, (void*)222); + tt_int_op(evbuffer_get_length(buf1), ==, len1+len2+2); + + /* Make sure we can drain a little from a reference. */ + tt_int_op(evbuffer_remove(buf1, tmp, 6), ==, 6); + tt_int_op(memcmp(tmp, "If you", 6), ==, 0); + tt_int_op(evbuffer_remove(buf1, tmp, 5), ==, 5); + tt_int_op(memcmp(tmp, " have", 5), ==, 0); + + /* Make sure that prepending does not meddle with immutable data */ + tt_int_op(evbuffer_prepend(buf1, "I have ", 7), ==, 0); + tt_int_op(memcmp(chunk1, "If you", 6), ==, 0); + evbuffer_validate(buf1); + + /* Make sure that when the chunk is over, the callback is invoked. */ + evbuffer_drain(buf1, 7); /* Remove prepended stuff. */ + evbuffer_drain(buf1, len1-11-1); /* remove all but one byte of chunk1 */ + tt_int_op(ref_done_cb_called_count, ==, 0); + evbuffer_remove(buf1, tmp, 1); + tt_int_op(tmp[0], ==, 'm'); + tt_assert(ref_done_cb_called_with == (void*)111); + tt_assert(ref_done_cb_called_with_data == chunk1); + tt_assert(ref_done_cb_called_with_len == len1); + tt_int_op(ref_done_cb_called_count, ==, 1); + evbuffer_validate(buf1); + + /* Drain some of the remaining chunk, then add it to another buffer */ + evbuffer_drain(buf1, 6); /* Remove the ", you ". */ + buf2 = evbuffer_new(); + tt_assert(buf2); + tt_int_op(ref_done_cb_called_count, ==, 1); + evbuffer_add(buf2, "I ", 2); + + evbuffer_add_buffer(buf2, buf1); + tt_int_op(ref_done_cb_called_count, ==, 1); + evbuffer_remove(buf2, tmp, 16); + tt_int_op(memcmp("I ought to write", tmp, 16), ==, 0); + evbuffer_drain(buf2, evbuffer_get_length(buf2)); + tt_int_op(ref_done_cb_called_count, ==, 2); + tt_assert(ref_done_cb_called_with == (void*)222); + evbuffer_validate(buf2); + + /* Now add more stuff to buf1 and make sure that it gets removed on + * free. */ + evbuffer_add(buf1, "You shake and shake the ", 24); + evbuffer_add_reference(buf1, "ketchup bottle", 14, ref_done_cb, + (void*)3333); + evbuffer_add(buf1, ". Nothing comes and then a lot'll.", 35); + evbuffer_free(buf1); + buf1 = NULL; + tt_int_op(ref_done_cb_called_count, ==, 3); + tt_assert(ref_done_cb_called_with == (void*)3333); + +end: + if (buf1) + evbuffer_free(buf1); + if (buf2) + evbuffer_free(buf2); +} + +static void +test_evbuffer_multicast(void *ptr) +{ + const char chunk1[] = "If you have found the answer to such a problem"; + const char chunk2[] = "you ought to write it up for publication"; + /* -- Knuth's "Notes on the Exercises" from TAOCP */ + char tmp[16]; + size_t len1 = strlen(chunk1), len2=strlen(chunk2); + + struct evbuffer *buf1 = NULL, *buf2 = NULL; + + buf1 = evbuffer_new(); + tt_assert(buf1); + + evbuffer_add(buf1, chunk1, len1); + evbuffer_add(buf1, ", ", 2); + evbuffer_add(buf1, chunk2, len2); + tt_int_op(evbuffer_get_length(buf1), ==, len1+len2+2); + + buf2 = evbuffer_new(); + tt_assert(buf2); + + tt_int_op(evbuffer_add_buffer_reference(buf2, buf1), ==, 0); + /* nested references are not allowed */ + tt_int_op(evbuffer_add_buffer_reference(buf2, buf2), ==, -1); + tt_int_op(evbuffer_add_buffer_reference(buf1, buf2), ==, -1); + + /* both buffers contain the same amount of data */ + tt_int_op(evbuffer_get_length(buf1), ==, evbuffer_get_length(buf1)); + + /* Make sure we can drain a little from the first buffer. */ + tt_int_op(evbuffer_remove(buf1, tmp, 6), ==, 6); + tt_int_op(memcmp(tmp, "If you", 6), ==, 0); + tt_int_op(evbuffer_remove(buf1, tmp, 5), ==, 5); + tt_int_op(memcmp(tmp, " have", 5), ==, 0); + + /* Make sure that prepending does not meddle with immutable data */ + tt_int_op(evbuffer_prepend(buf1, "I have ", 7), ==, 0); + tt_int_op(memcmp(chunk1, "If you", 6), ==, 0); + evbuffer_validate(buf1); + + /* Make sure we can drain a little from the second buffer. */ + tt_int_op(evbuffer_remove(buf2, tmp, 6), ==, 6); + tt_int_op(memcmp(tmp, "If you", 6), ==, 0); + tt_int_op(evbuffer_remove(buf2, tmp, 5), ==, 5); + tt_int_op(memcmp(tmp, " have", 5), ==, 0); + + /* Make sure that prepending does not meddle with immutable data */ + tt_int_op(evbuffer_prepend(buf2, "I have ", 7), ==, 0); + tt_int_op(memcmp(chunk1, "If you", 6), ==, 0); + evbuffer_validate(buf2); + + /* Make sure the data can be read from the second buffer when the first is freed */ + evbuffer_free(buf1); + buf1 = NULL; + + tt_int_op(evbuffer_remove(buf2, tmp, 6), ==, 6); + tt_int_op(memcmp(tmp, "I have", 6), ==, 0); + + tt_int_op(evbuffer_remove(buf2, tmp, 6), ==, 6); + tt_int_op(memcmp(tmp, " foun", 6), ==, 0); + +end: + if (buf1) + evbuffer_free(buf1); + if (buf2) + evbuffer_free(buf2); +} + +static void +test_evbuffer_multicast_drain(void *ptr) +{ + const char chunk1[] = "If you have found the answer to such a problem"; + const char chunk2[] = "you ought to write it up for publication"; + /* -- Knuth's "Notes on the Exercises" from TAOCP */ + size_t len1 = strlen(chunk1), len2=strlen(chunk2); + + struct evbuffer *buf1 = NULL, *buf2 = NULL; + + buf1 = evbuffer_new(); + tt_assert(buf1); + + evbuffer_add(buf1, chunk1, len1); + evbuffer_add(buf1, ", ", 2); + evbuffer_add(buf1, chunk2, len2); + tt_int_op(evbuffer_get_length(buf1), ==, len1+len2+2); + + buf2 = evbuffer_new(); + tt_assert(buf2); + + tt_int_op(evbuffer_add_buffer_reference(buf2, buf1), ==, 0); + tt_int_op(evbuffer_get_length(buf2), ==, len1+len2+2); + tt_int_op(evbuffer_drain(buf1, evbuffer_get_length(buf1)), ==, 0); + tt_int_op(evbuffer_get_length(buf2), ==, len1+len2+2); + tt_int_op(evbuffer_drain(buf2, evbuffer_get_length(buf2)), ==, 0); + evbuffer_validate(buf1); + evbuffer_validate(buf2); + +end: + if (buf1) + evbuffer_free(buf1); + if (buf2) + evbuffer_free(buf2); +} + +static void +check_prepend(struct evbuffer *buffer, + const struct evbuffer_cb_info *cbinfo, + void *arg) +{ + tt_int_op(cbinfo->orig_size, ==, 3); + tt_int_op(cbinfo->n_added, ==, 8096); + tt_int_op(cbinfo->n_deleted, ==, 0); +end: + ; +} +/* Some cases that we didn't get in test_evbuffer() above, for more coverage. */ +static void +test_evbuffer_prepend(void *ptr) +{ + struct evbuffer *buf1 = NULL, *buf2 = NULL; + char tmp[128], *buffer = malloc(8096); + int n; + + buf1 = evbuffer_new(); + tt_assert(buf1); + + /* Case 0: The evbuffer is entirely empty. */ + evbuffer_prepend(buf1, "This string has 29 characters", 29); + evbuffer_validate(buf1); + + /* Case 1: Prepend goes entirely in new chunk. */ + evbuffer_prepend(buf1, "Short.", 6); + evbuffer_validate(buf1); + + /* Case 2: prepend goes entirely in first chunk. */ + evbuffer_drain(buf1, 6+11); + evbuffer_prepend(buf1, "it", 2); + evbuffer_validate(buf1); + tt_assert(!memcmp(buf1->first->buffer+buf1->first->misalign, + "it has", 6)); + + /* Case 3: prepend is split over multiple chunks. */ + evbuffer_prepend(buf1, "It is no longer true to say ", 28); + evbuffer_validate(buf1); + n = evbuffer_remove(buf1, tmp, sizeof(tmp)-1); + tt_int_op(n, >=, 0); + tmp[n]='\0'; + tt_str_op(tmp,==,"It is no longer true to say it has 29 characters"); + + buf2 = evbuffer_new(); + tt_assert(buf2); + + /* Case 4: prepend a buffer to an empty buffer. */ + n = 999; + evbuffer_add_printf(buf1, "Here is string %d. ", n++); + evbuffer_prepend_buffer(buf2, buf1); + evbuffer_validate(buf2); + + /* Case 5: prepend a buffer to a nonempty buffer. */ + evbuffer_add_printf(buf1, "Here is string %d. ", n++); + evbuffer_prepend_buffer(buf2, buf1); + evbuffer_validate(buf2); + evbuffer_validate(buf1); + n = evbuffer_remove(buf2, tmp, sizeof(tmp)-1); + tt_int_op(n, >=, 0); + tmp[n]='\0'; + tt_str_op(tmp,==,"Here is string 1000. Here is string 999. "); + + /* Case 5: evbuffer_prepend() will need a new buffer, with callbacks */ + memset(buffer, 'A', 8096); + evbuffer_free(buf2); + buf2 = evbuffer_new(); + tt_assert(buf2); + evbuffer_prepend(buf2, "foo", 3); + evbuffer_add_cb(buf2, check_prepend, NULL); + evbuffer_prepend(buf2, buffer, 8096); + evbuffer_remove_cb(buf2, check_prepend, NULL); + evbuffer_validate(buf2); + tt_nstr_op(8096,(char *)evbuffer_pullup(buf2, 8096),==,buffer); + evbuffer_drain(buf2, 8096); + tt_nstr_op(3,(char *)evbuffer_pullup(buf2, 3),==,"foo"); + evbuffer_drain(buf2, 3); + +end: + free(buffer); + if (buf1) + evbuffer_free(buf1); + if (buf2) + evbuffer_free(buf2); + +} + +static void +test_evbuffer_empty_reference_prepend(void *ptr) +{ + struct evbuffer *buf = NULL; + + buf = evbuffer_new(); + tt_assert(buf); + + /** empty chain could leave invalid last_with_datap */ + evbuffer_add_reference(buf, "", 0, NULL, NULL); + evbuffer_validate(buf); + evbuffer_prepend(buf, "foo", 3); + + evbuffer_validate(buf); + tt_assert(!strncmp((char *)evbuffer_pullup(buf, -1), "foo", 3)); + evbuffer_validate(buf); + +end: + if (buf) + evbuffer_free(buf); +} +static void +test_evbuffer_empty_reference_prepend_buffer(void *ptr) +{ + struct evbuffer *buf1 = NULL, *buf2 = NULL; + + buf1 = evbuffer_new(); + tt_assert(buf1); + buf2 = evbuffer_new(); + tt_assert(buf2); + + /** empty chain could leave invalid last_with_datap */ + evbuffer_add_reference(buf1, "", 0, NULL, NULL); + evbuffer_validate(buf1); + evbuffer_add(buf2, "foo", 3); + evbuffer_validate(buf2); + evbuffer_prepend_buffer(buf2, buf1); + evbuffer_validate(buf2); + + tt_assert(!strncmp((char *)evbuffer_pullup(buf2, -1), "foo", 3)); + evbuffer_validate(buf2); + + tt_assert(!strncmp((char *)evbuffer_pullup(buf1, -1), "", 0)); + evbuffer_validate(buf2); + +end: + if (buf1) + evbuffer_free(buf1); + if (buf2) + evbuffer_free(buf2); +} + +static void +test_evbuffer_peek_first_gt(void *info) +{ + struct evbuffer *buf = NULL, *tmp_buf = NULL; + struct evbuffer_ptr ptr; + struct evbuffer_iovec v[2]; + + buf = evbuffer_new(); + tmp_buf = evbuffer_new(); + evbuffer_add_printf(tmp_buf, "Contents of chunk 100\n"); + evbuffer_add_buffer(buf, tmp_buf); + evbuffer_add_printf(tmp_buf, "Contents of chunk 1\n"); + evbuffer_add_buffer(buf, tmp_buf); + + evbuffer_ptr_set(buf, &ptr, 0, EVBUFFER_PTR_SET); + + /** The only case that matters*/ + tt_int_op(evbuffer_peek(buf, -1, &ptr, NULL, 0), ==, 2); + /** Just in case */ + tt_int_op(evbuffer_peek(buf, -1, &ptr, v, 2), ==, 2); + + evbuffer_ptr_set(buf, &ptr, 20, EVBUFFER_PTR_ADD); + tt_int_op(evbuffer_peek(buf, -1, &ptr, NULL, 0), ==, 2); + tt_int_op(evbuffer_peek(buf, -1, &ptr, v, 2), ==, 2); + tt_int_op(evbuffer_peek(buf, 2, &ptr, NULL, 0), ==, 1); + tt_int_op(evbuffer_peek(buf, 2, &ptr, v, 2), ==, 1); + tt_int_op(evbuffer_peek(buf, 3, &ptr, NULL, 0), ==, 2); + tt_int_op(evbuffer_peek(buf, 3, &ptr, v, 2), ==, 2); + +end: + if (buf) + evbuffer_free(buf); + if (tmp_buf) + evbuffer_free(tmp_buf); +} + +static void +test_evbuffer_peek(void *info) +{ + struct evbuffer *buf = NULL, *tmp_buf = NULL; + int i; + struct evbuffer_iovec v[20]; + struct evbuffer_ptr ptr; + +#define tt_iov_eq(v, s) \ + tt_int_op((v)->iov_len, ==, strlen(s)); \ + tt_assert(!memcmp((v)->iov_base, (s), strlen(s))) + + /* Let's make a very fragmented buffer. */ + buf = evbuffer_new(); + tmp_buf = evbuffer_new(); + for (i = 0; i < 16; ++i) { + evbuffer_add_printf(tmp_buf, "Contents of chunk [%d]\n", i); + evbuffer_add_buffer(buf, tmp_buf); + } + + /* How many chunks do we need for everything? */ + i = evbuffer_peek(buf, -1, NULL, NULL, 0); + tt_int_op(i, ==, 16); + + /* Simple peek: get everything. */ + i = evbuffer_peek(buf, -1, NULL, v, 20); + tt_int_op(i, ==, 16); /* we used only 16 chunks. */ + tt_iov_eq(&v[0], "Contents of chunk [0]\n"); + tt_iov_eq(&v[3], "Contents of chunk [3]\n"); + tt_iov_eq(&v[12], "Contents of chunk [12]\n"); + tt_iov_eq(&v[15], "Contents of chunk [15]\n"); + + /* Just get one chunk worth. */ + memset(v, 0, sizeof(v)); + i = evbuffer_peek(buf, -1, NULL, v, 1); + tt_int_op(i, ==, 1); + tt_iov_eq(&v[0], "Contents of chunk [0]\n"); + tt_assert(v[1].iov_base == NULL); + + /* Suppose we want at least the first 40 bytes. */ + memset(v, 0, sizeof(v)); + i = evbuffer_peek(buf, 40, NULL, v, 16); + tt_int_op(i, ==, 2); + tt_iov_eq(&v[0], "Contents of chunk [0]\n"); + tt_iov_eq(&v[1], "Contents of chunk [1]\n"); + tt_assert(v[2].iov_base == NULL); + + /* How many chunks do we need for 100 bytes? */ + memset(v, 0, sizeof(v)); + i = evbuffer_peek(buf, 100, NULL, NULL, 0); + tt_int_op(i, ==, 5); + tt_assert(v[0].iov_base == NULL); + + /* Now we ask for more bytes than we provide chunks for */ + memset(v, 0, sizeof(v)); + i = evbuffer_peek(buf, 60, NULL, v, 1); + tt_int_op(i, ==, 3); + tt_iov_eq(&v[0], "Contents of chunk [0]\n"); + tt_assert(v[1].iov_base == NULL); + + /* Now we ask for more bytes than the buffer has. */ + memset(v, 0, sizeof(v)); + i = evbuffer_peek(buf, 65536, NULL, v, 20); + tt_int_op(i, ==, 16); /* we used only 16 chunks. */ + tt_iov_eq(&v[0], "Contents of chunk [0]\n"); + tt_iov_eq(&v[3], "Contents of chunk [3]\n"); + tt_iov_eq(&v[12], "Contents of chunk [12]\n"); + tt_iov_eq(&v[15], "Contents of chunk [15]\n"); + tt_assert(v[16].iov_base == NULL); + + /* What happens if we try an empty buffer? */ + memset(v, 0, sizeof(v)); + i = evbuffer_peek(tmp_buf, -1, NULL, v, 20); + tt_int_op(i, ==, 0); + tt_assert(v[0].iov_base == NULL); + memset(v, 0, sizeof(v)); + i = evbuffer_peek(tmp_buf, 50, NULL, v, 20); + tt_int_op(i, ==, 0); + tt_assert(v[0].iov_base == NULL); + + /* Okay, now time to have fun with pointers. */ + memset(v, 0, sizeof(v)); + evbuffer_ptr_set(buf, &ptr, 30, EVBUFFER_PTR_SET); + i = evbuffer_peek(buf, 50, &ptr, v, 20); + tt_int_op(i, ==, 3); + tt_iov_eq(&v[0], " of chunk [1]\n"); + tt_iov_eq(&v[1], "Contents of chunk [2]\n"); + tt_iov_eq(&v[2], "Contents of chunk [3]\n"); /*more than we asked for*/ + + /* advance to the start of another chain. */ + memset(v, 0, sizeof(v)); + evbuffer_ptr_set(buf, &ptr, 14, EVBUFFER_PTR_ADD); + i = evbuffer_peek(buf, 44, &ptr, v, 20); + tt_int_op(i, ==, 2); + tt_iov_eq(&v[0], "Contents of chunk [2]\n"); + tt_iov_eq(&v[1], "Contents of chunk [3]\n"); /*more than we asked for*/ + + /* peek at the end of the buffer */ + memset(v, 0, sizeof(v)); + tt_assert(evbuffer_ptr_set(buf, &ptr, evbuffer_get_length(buf), EVBUFFER_PTR_SET) == 0); + i = evbuffer_peek(buf, 44, &ptr, v, 20); + tt_int_op(i, ==, 0); + tt_assert(v[0].iov_base == NULL); + +end: + if (buf) + evbuffer_free(buf); + if (tmp_buf) + evbuffer_free(tmp_buf); +} + +/* Check whether evbuffer freezing works right. This is called twice, + once with the argument "start" and once with the argument "end". + When we test "start", we freeze the start of an evbuffer and make sure + that modifying the start of the buffer doesn't work. When we test + "end", we freeze the end of an evbuffer and make sure that modifying + the end of the buffer doesn't work. + */ +static void +test_evbuffer_freeze(void *ptr) +{ + struct evbuffer *buf = NULL, *tmp_buf=NULL; + const char string[] = /* Year's End, Richard Wilbur */ + "I've known the wind by water banks to shake\n" + "The late leaves down, which frozen where they fell\n" + "And held in ice as dancers in a spell\n" + "Fluttered all winter long into a lake..."; + const int start = !strcmp(ptr, "start"); + char *cp; + char charbuf[128]; + int r; + size_t orig_length; + struct evbuffer_iovec v[1]; + + if (!start) + tt_str_op(ptr, ==, "end"); + + buf = evbuffer_new(); + tmp_buf = evbuffer_new(); + tt_assert(tmp_buf); + + evbuffer_add(buf, string, strlen(string)); + evbuffer_freeze(buf, start); /* Freeze the start or the end.*/ + +#define FREEZE_EQ(a, startcase, endcase) \ + do { \ + if (start) { \ + tt_int_op((a), ==, (startcase)); \ + } else { \ + tt_int_op((a), ==, (endcase)); \ + } \ + } while (0) + + + orig_length = evbuffer_get_length(buf); + + /* These functions all manipulate the end of buf. */ + r = evbuffer_add(buf, "abc", 0); + FREEZE_EQ(r, 0, -1); + r = evbuffer_reserve_space(buf, 10, v, 1); + FREEZE_EQ(r, 1, -1); + if (r == 1) { + memset(v[0].iov_base, 'X', 10); + v[0].iov_len = 10; + } + r = evbuffer_commit_space(buf, v, 1); + FREEZE_EQ(r, 0, -1); + r = evbuffer_add_reference(buf, string, 5, NULL, NULL); + FREEZE_EQ(r, 0, -1); + r = evbuffer_add_printf(buf, "Hello %s", "world"); + FREEZE_EQ(r, 11, -1); + /* TODO: test add_buffer, add_file, read */ + + if (!start) + tt_int_op(orig_length, ==, evbuffer_get_length(buf)); + + orig_length = evbuffer_get_length(buf); + + /* These functions all manipulate the start of buf. */ + r = evbuffer_remove(buf, charbuf, 1); + FREEZE_EQ(r, -1, 1); + r = evbuffer_drain(buf, 3); + FREEZE_EQ(r, -1, 0); + r = evbuffer_prepend(buf, "dummy", 5); + FREEZE_EQ(r, -1, 0); + cp = evbuffer_readln(buf, NULL, EVBUFFER_EOL_LF); + FREEZE_EQ(cp==NULL, 1, 0); + if (cp) + free(cp); + /* TODO: Test remove_buffer, add_buffer, write, prepend_buffer */ + + if (start) + tt_int_op(orig_length, ==, evbuffer_get_length(buf)); + +end: + if (buf) + evbuffer_free(buf); + + if (tmp_buf) + evbuffer_free(tmp_buf); +} + +static void +test_evbuffer_add_iovec(void * ptr) +{ + struct evbuffer * buf = NULL; + struct evbuffer_iovec vec[4]; + const char * data[] = { + "Guilt resembles a sword with two edges.", + "On the one hand, it cuts for Justice, imposing practical morality upon those who fear it.", + "Conscience does not always adhere to rational judgment.", + "Guilt is always a self-imposed burden, but it is not always rightly imposed." + /* -- R.A. Salvatore, _Sojurn_ */ + }; + size_t expected_length = 0; + size_t returned_length = 0; + int i; + + buf = evbuffer_new(); + + tt_assert(buf); + + for (i = 0; i < 4; i++) { + vec[i].iov_len = strlen(data[i]); + vec[i].iov_base = (char*) data[i]; + expected_length += vec[i].iov_len; + } + + returned_length = evbuffer_add_iovec(buf, vec, 4); + + tt_int_op(returned_length, ==, evbuffer_get_length(buf)); + tt_int_op(evbuffer_get_length(buf), ==, expected_length); + + for (i = 0; i < 4; i++) { + char charbuf[1024]; + + memset(charbuf, 0, 1024); + evbuffer_remove(buf, charbuf, strlen(data[i])); + tt_assert(strcmp(charbuf, data[i]) == 0); + } + + tt_assert(evbuffer_get_length(buf) == 0); +end: + if (buf) { + evbuffer_free(buf); + } +} + +static void +test_evbuffer_copyout(void *dummy) +{ + const char string[] = + "Still they skirmish to and fro, men my messmates on the snow " + "When we headed off the aurochs turn for turn; " + "When the rich Allobrogenses never kept amanuenses, " + "And our only plots were piled in lakes at Berne."; + /* -- Kipling, "In The Neolithic Age" */ + char tmp[1024]; + struct evbuffer_ptr ptr; + struct evbuffer *buf; + + (void)dummy; + + buf = evbuffer_new(); + tt_assert(buf); + + tt_int_op(strlen(string), ==, 206); + + /* Ensure separate chains */ + evbuffer_add_reference(buf, string, 80, no_cleanup, NULL); + evbuffer_add_reference(buf, string+80, 80, no_cleanup, NULL); + evbuffer_add(buf, string+160, strlen(string)-160); + + tt_int_op(206, ==, evbuffer_get_length(buf)); + + /* First, let's test plain old copyout. */ + + /* Copy a little from the beginning. */ + tt_int_op(10, ==, evbuffer_copyout(buf, tmp, 10)); + tt_int_op(0, ==, memcmp(tmp, "Still they", 10)); + + /* Now copy more than a little from the beginning */ + memset(tmp, 0, sizeof(tmp)); + tt_int_op(100, ==, evbuffer_copyout(buf, tmp, 100)); + tt_int_op(0, ==, memcmp(tmp, string, 100)); + + /* Copy too much; ensure truncation. */ + memset(tmp, 0, sizeof(tmp)); + tt_int_op(206, ==, evbuffer_copyout(buf, tmp, 230)); + tt_int_op(0, ==, memcmp(tmp, string, 206)); + + /* That was supposed to be nondestructive, btw */ + tt_int_op(206, ==, evbuffer_get_length(buf)); + + /* Now it's time to test copyout_from! First, let's start in the + * first chain. */ + evbuffer_ptr_set(buf, &ptr, 15, EVBUFFER_PTR_SET); + memset(tmp, 0, sizeof(tmp)); + tt_int_op(10, ==, evbuffer_copyout_from(buf, &ptr, tmp, 10)); + tt_int_op(0, ==, memcmp(tmp, "mish to an", 10)); + + /* Right up to the end of the first chain */ + memset(tmp, 0, sizeof(tmp)); + tt_int_op(65, ==, evbuffer_copyout_from(buf, &ptr, tmp, 65)); + tt_int_op(0, ==, memcmp(tmp, string+15, 65)); + + /* Span into the second chain */ + memset(tmp, 0, sizeof(tmp)); + tt_int_op(90, ==, evbuffer_copyout_from(buf, &ptr, tmp, 90)); + tt_int_op(0, ==, memcmp(tmp, string+15, 90)); + + /* Span into the third chain */ + memset(tmp, 0, sizeof(tmp)); + tt_int_op(160, ==, evbuffer_copyout_from(buf, &ptr, tmp, 160)); + tt_int_op(0, ==, memcmp(tmp, string+15, 160)); + + /* Overrun */ + memset(tmp, 0, sizeof(tmp)); + tt_int_op(206-15, ==, evbuffer_copyout_from(buf, &ptr, tmp, 999)); + tt_int_op(0, ==, memcmp(tmp, string+15, 206-15)); + + /* That was supposed to be nondestructive, too */ + tt_int_op(206, ==, evbuffer_get_length(buf)); + +end: + if (buf) + evbuffer_free(buf); +} + +static void * +setup_passthrough(const struct testcase_t *testcase) +{ + return testcase->setup_data; +} +static int +cleanup_passthrough(const struct testcase_t *testcase, void *ptr) +{ + (void) ptr; + return 1; +} + +static const struct testcase_setup_t nil_setup = { + setup_passthrough, + cleanup_passthrough +}; + +struct testcase_t evbuffer_testcases[] = { + { "evbuffer", test_evbuffer, 0, NULL, NULL }, + { "remove_buffer_with_empty", test_evbuffer_remove_buffer_with_empty, 0, NULL, NULL }, + { "remove_buffer_with_empty2", test_evbuffer_remove_buffer_with_empty2, 0, NULL, NULL }, + { "remove_buffer_with_empty3", test_evbuffer_remove_buffer_with_empty3, 0, NULL, NULL }, + { "remove_buffer_with_empty_front", test_evbuffer_remove_buffer_with_empty_front, 0, NULL, NULL }, + { "remove_buffer_adjust_last_with_datap_with_empty", + test_evbuffer_remove_buffer_adjust_last_with_datap_with_empty, 0, NULL, NULL }, + { "add_buffer_with_empty", test_evbuffer_add_buffer_with_empty, 0, NULL, NULL }, + { "add_buffer_with_empty2", test_evbuffer_add_buffer_with_empty2, 0, NULL, NULL }, + { "reserve2", test_evbuffer_reserve2, 0, NULL, NULL }, + { "reserve_many", test_evbuffer_reserve_many, 0, NULL, NULL }, + { "reserve_many2", test_evbuffer_reserve_many, 0, &nil_setup, (void*)"add" }, + { "reserve_many3", test_evbuffer_reserve_many, 0, &nil_setup, (void*)"fill" }, + { "reserve_with_empty", test_evbuffer_reserve_with_empty, 0, NULL, NULL }, + { "reserve_invalid_last_with_datap", test_evbuffer_reserve_invalid_last_with_datap, TT_FORK, NULL, NULL }, + { "expand", test_evbuffer_expand, 0, NULL, NULL }, + { "expand_overflow", test_evbuffer_expand_overflow, 0, NULL, NULL }, + { "add1", test_evbuffer_add1, 0, NULL, NULL }, + { "add2", test_evbuffer_add2, 0, NULL, NULL }, + { "reference", test_evbuffer_reference, 0, NULL, NULL }, + { "reference2", test_evbuffer_reference2, 0, NULL, NULL }, + { "iterative", test_evbuffer_iterative, 0, NULL, NULL }, + { "readln", test_evbuffer_readln, TT_NO_LOGS, &basic_setup, NULL }, + { "search_eol", test_evbuffer_search_eol, 0, NULL, NULL }, + { "find", test_evbuffer_find, 0, NULL, NULL }, + { "ptr_set", test_evbuffer_ptr_set, 0, NULL, NULL }, + { "search", test_evbuffer_search, 0, NULL, NULL }, + { "callbacks", test_evbuffer_callbacks, 0, NULL, NULL }, + { "add_reference", test_evbuffer_add_reference, 0, NULL, NULL }, + { "multicast", test_evbuffer_multicast, 0, NULL, NULL }, + { "multicast_drain", test_evbuffer_multicast_drain, 0, NULL, NULL }, + { "prepend", test_evbuffer_prepend, TT_FORK, NULL, NULL }, + { "empty_reference_prepend", test_evbuffer_empty_reference_prepend, TT_FORK, NULL, NULL }, + { "empty_reference_prepend_buffer", test_evbuffer_empty_reference_prepend_buffer, TT_FORK, NULL, NULL }, + { "peek", test_evbuffer_peek, 0, NULL, NULL }, + { "peek_first_gt", test_evbuffer_peek_first_gt, 0, NULL, NULL }, + { "freeze_start", test_evbuffer_freeze, 0, &nil_setup, (void*)"start" }, + { "freeze_end", test_evbuffer_freeze, 0, &nil_setup, (void*)"end" }, + { "add_iovec", test_evbuffer_add_iovec, 0, NULL, NULL}, + { "copyout", test_evbuffer_copyout, 0, NULL, NULL}, + { "file_segment_add_cleanup_cb", test_evbuffer_file_segment_add_cleanup_cb, 0, NULL, NULL }, + +#define ADDFILE_TEST(name, parameters) \ + { name, test_evbuffer_add_file, TT_FORK|TT_NEED_BASE, \ + &basic_setup, (void*)(parameters) } + +#define ADDFILE_TEST_GROUP(name, parameters) \ + ADDFILE_TEST(name "_sendfile", "sendfile " parameters), \ + ADDFILE_TEST(name "_mmap", "mmap " parameters), \ + ADDFILE_TEST(name "_linear", "linear " parameters) + + ADDFILE_TEST_GROUP("add_file", ""), + ADDFILE_TEST("add_file_nosegment", "default nosegment"), + + ADDFILE_TEST_GROUP("add_big_file", "bigfile"), + ADDFILE_TEST("add_big_file_nosegment", "default nosegment bigfile"), + + ADDFILE_TEST_GROUP("add_file_offset", "bigfile map_offset"), + ADDFILE_TEST("add_file_offset_nosegment", + "default nosegment bigfile map_offset"), + + ADDFILE_TEST_GROUP("add_file_offset2", "bigfile offset_in_segment"), + + ADDFILE_TEST_GROUP("add_file_offset3", + "bigfile offset_in_segment map_offset"), + + END_OF_TESTCASES +}; diff --git a/probe-busybox/libevent-2.1.11-stable/test/regress_bufferevent.c b/probe-busybox/libevent-2.1.11-stable/test/regress_bufferevent.c new file mode 100644 index 00000000..d4208c20 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/regress_bufferevent.c @@ -0,0 +1,1439 @@ +/* + * Copyright (c) 2003-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "util-internal.h" + +/* The old tests here need assertions to work. */ +#undef NDEBUG + +#ifdef _WIN32 +#include +#include +#endif + +#include "event2/event-config.h" + +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#ifndef _WIN32 +#include +#include +#include +#include +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#ifdef EVENT__HAVE_ARPA_INET_H +#include +#endif + +#include "event2/event-config.h" +#include "event2/event.h" +#include "event2/event_struct.h" +#include "event2/event_compat.h" +#include "event2/tag.h" +#include "event2/buffer.h" +#include "event2/bufferevent.h" +#include "event2/bufferevent_compat.h" +#include "event2/bufferevent_struct.h" +#include "event2/listener.h" +#include "event2/util.h" + +#include "bufferevent-internal.h" +#include "evthread-internal.h" +#include "util-internal.h" +#ifdef _WIN32 +#include "iocp-internal.h" +#endif + +#include "regress.h" +#include "regress_testutils.h" + +/* + * simple bufferevent test + */ + +static void +readcb(struct bufferevent *bev, void *arg) +{ + if (evbuffer_get_length(bev->input) == 8333) { + struct evbuffer *evbuf = evbuffer_new(); + assert(evbuf != NULL); + + /* gratuitous test of bufferevent_read_buffer */ + bufferevent_read_buffer(bev, evbuf); + + bufferevent_disable(bev, EV_READ); + + if (evbuffer_get_length(evbuf) == 8333) { + test_ok++; + } + + evbuffer_free(evbuf); + } +} + +static void +writecb(struct bufferevent *bev, void *arg) +{ + if (evbuffer_get_length(bev->output) == 0) { + test_ok++; + } +} + +static void +errorcb(struct bufferevent *bev, short what, void *arg) +{ + test_ok = -2; +} + +static void +test_bufferevent_impl(int use_pair, int flush) +{ + struct bufferevent *bev1 = NULL, *bev2 = NULL; + char buffer[8333]; + int i; + int expected = 2; + + if (use_pair) { + struct bufferevent *pair[2]; + tt_assert(0 == bufferevent_pair_new(NULL, 0, pair)); + bev1 = pair[0]; + bev2 = pair[1]; + bufferevent_setcb(bev1, readcb, writecb, errorcb, bev1); + bufferevent_setcb(bev2, readcb, writecb, errorcb, NULL); + tt_fd_op(bufferevent_getfd(bev1), ==, EVUTIL_INVALID_SOCKET); + tt_ptr_op(bufferevent_get_underlying(bev1), ==, NULL); + tt_ptr_op(bufferevent_pair_get_partner(bev1), ==, bev2); + tt_ptr_op(bufferevent_pair_get_partner(bev2), ==, bev1); + } else { + bev1 = bufferevent_new(pair[0], readcb, writecb, errorcb, NULL); + bev2 = bufferevent_new(pair[1], readcb, writecb, errorcb, NULL); + tt_fd_op(bufferevent_getfd(bev1), ==, pair[0]); + tt_ptr_op(bufferevent_get_underlying(bev1), ==, NULL); + tt_ptr_op(bufferevent_pair_get_partner(bev1), ==, NULL); + tt_ptr_op(bufferevent_pair_get_partner(bev2), ==, NULL); + } + + { + /* Test getcb. */ + bufferevent_data_cb r, w; + bufferevent_event_cb e; + void *a; + bufferevent_getcb(bev1, &r, &w, &e, &a); + tt_ptr_op(r, ==, readcb); + tt_ptr_op(w, ==, writecb); + tt_ptr_op(e, ==, errorcb); + tt_ptr_op(a, ==, use_pair ? bev1 : NULL); + } + + bufferevent_disable(bev1, EV_READ); + bufferevent_enable(bev2, EV_READ); + + tt_int_op(bufferevent_get_enabled(bev1), ==, EV_WRITE); + tt_int_op(bufferevent_get_enabled(bev2), ==, EV_WRITE|EV_READ); + + for (i = 0; i < (int)sizeof(buffer); i++) + buffer[i] = i; + + bufferevent_write(bev1, buffer, sizeof(buffer)); + if (flush >= 0) { + tt_int_op(bufferevent_flush(bev1, EV_WRITE, flush), >=, 0); + } + + event_dispatch(); + + bufferevent_free(bev2); + tt_ptr_op(bufferevent_pair_get_partner(bev1), ==, NULL); + bufferevent_free(bev1); + + /** Only pair call errorcb for BEV_FINISHED */ + if (use_pair && flush == BEV_FINISHED) { + expected = -1; + } + if (test_ok != expected) + test_ok = 0; +end: + ; +} + +static void test_bufferevent(void) { test_bufferevent_impl(0, -1); } +static void test_bufferevent_pair(void) { test_bufferevent_impl(1, -1); } + +static void test_bufferevent_flush_normal(void) { test_bufferevent_impl(0, BEV_NORMAL); } +static void test_bufferevent_flush_flush(void) { test_bufferevent_impl(0, BEV_FLUSH); } +static void test_bufferevent_flush_finished(void) { test_bufferevent_impl(0, BEV_FINISHED); } + +static void test_bufferevent_pair_flush_normal(void) { test_bufferevent_impl(1, BEV_NORMAL); } +static void test_bufferevent_pair_flush_flush(void) { test_bufferevent_impl(1, BEV_FLUSH); } +static void test_bufferevent_pair_flush_finished(void) { test_bufferevent_impl(1, BEV_FINISHED); } + +#if defined(EVTHREAD_USE_PTHREADS_IMPLEMENTED) +/** + * Trace lock/unlock/alloc/free for locks. + * (More heavier then evthread_debug*) + */ +typedef struct +{ + void *lock; + enum { + ALLOC, FREE, + } status; + size_t locked /** allow recursive locking */; +} lock_wrapper; +struct lock_unlock_base +{ + /* Original callbacks */ + struct evthread_lock_callbacks cbs; + /* Map of locks */ + lock_wrapper *locks; + size_t nr_locks; +} lu_base = { + .locks = NULL, +}; + +static lock_wrapper *lu_find(void *lock_) +{ + size_t i; + for (i = 0; i < lu_base.nr_locks; ++i) { + lock_wrapper *lock = &lu_base.locks[i]; + if (lock->lock == lock_) + return lock; + } + return NULL; +} + +static void *trace_lock_alloc(unsigned locktype) +{ + void *lock; + ++lu_base.nr_locks; + lu_base.locks = realloc(lu_base.locks, + sizeof(lock_wrapper) * lu_base.nr_locks); + lock = lu_base.cbs.alloc(locktype); + lu_base.locks[lu_base.nr_locks - 1] = (lock_wrapper){ lock, ALLOC, 0 }; + return lock; +} +static void trace_lock_free(void *lock_, unsigned locktype) +{ + lock_wrapper *lock = lu_find(lock_); + if (!lock || lock->status == FREE || lock->locked) { + TT_FAIL(("lock: free error")); + } else { + lock->status = FREE; + lu_base.cbs.free(lock_, locktype); + } +} +static int trace_lock_lock(unsigned mode, void *lock_) +{ + lock_wrapper *lock = lu_find(lock_); + if (!lock || lock->status == FREE) { + TT_FAIL(("lock: lock error")); + return -1; + } else { + ++lock->locked; + return lu_base.cbs.lock(mode, lock_); + } +} +static int trace_lock_unlock(unsigned mode, void *lock_) +{ + lock_wrapper *lock = lu_find(lock_); + if (!lock || lock->status == FREE || !lock->locked) { + TT_FAIL(("lock: unlock error")); + return -1; + } else { + --lock->locked; + return lu_base.cbs.unlock(mode, lock_); + } +} +static void lock_unlock_free_thread_cbs(void) +{ + event_base_free(NULL); + + if (libevent_tests_running_in_debug_mode) + libevent_global_shutdown(); + + /** drop immutable flag */ + evthread_set_lock_callbacks(NULL); + /** avoid calling of event_global_setup_locks_() for new cbs */ + libevent_global_shutdown(); + /** drop immutable flag for non-debug ops (since called after shutdown) */ + evthread_set_lock_callbacks(NULL); +} + +static int use_lock_unlock_profiler(void) +{ + struct evthread_lock_callbacks cbs = { + EVTHREAD_LOCK_API_VERSION, + EVTHREAD_LOCKTYPE_RECURSIVE, + trace_lock_alloc, + trace_lock_free, + trace_lock_lock, + trace_lock_unlock, + }; + memcpy(&lu_base.cbs, evthread_get_lock_callbacks(), + sizeof(lu_base.cbs)); + { + lock_unlock_free_thread_cbs(); + + evthread_set_lock_callbacks(&cbs); + /** re-create debug locks correctly */ + evthread_enable_lock_debugging(); + + event_init(); + } + return 0; +} +static void free_lock_unlock_profiler(struct basic_test_data *data) +{ + /** fix "held_by" for kqueue */ + evthread_set_lock_callbacks(NULL); + + lock_unlock_free_thread_cbs(); + free(lu_base.locks); + data->base = NULL; +} + +static void test_bufferevent_pair_release_lock(void *arg) +{ + struct basic_test_data *data = arg; + use_lock_unlock_profiler(); + { + struct bufferevent *pair[2]; + if (!bufferevent_pair_new(NULL, BEV_OPT_THREADSAFE, pair)) { + bufferevent_free(pair[0]); + bufferevent_free(pair[1]); + } else + tt_abort_perror("bufferevent_pair_new"); + } + free_lock_unlock_profiler(data); +end: + ; +} +#endif + +/* + * test watermarks and bufferevent + */ + +static void +wm_readcb(struct bufferevent *bev, void *arg) +{ + struct evbuffer *evbuf = evbuffer_new(); + int len = (int)evbuffer_get_length(bev->input); + static int nread; + + assert(len >= 10 && len <= 20); + + assert(evbuf != NULL); + + /* gratuitous test of bufferevent_read_buffer */ + bufferevent_read_buffer(bev, evbuf); + + nread += len; + if (nread == 65000) { + bufferevent_disable(bev, EV_READ); + test_ok++; + } + + evbuffer_free(evbuf); +} + +static void +wm_writecb(struct bufferevent *bev, void *arg) +{ + assert(evbuffer_get_length(bev->output) <= 100); + if (evbuffer_get_length(bev->output) == 0) { + evbuffer_drain(bev->output, evbuffer_get_length(bev->output)); + test_ok++; + } +} + +static void +wm_errorcb(struct bufferevent *bev, short what, void *arg) +{ + test_ok = -2; +} + +static void +test_bufferevent_watermarks_impl(int use_pair) +{ + struct bufferevent *bev1 = NULL, *bev2 = NULL; + char buffer[65000]; + size_t low, high; + int i; + test_ok = 0; + + if (use_pair) { + struct bufferevent *pair[2]; + tt_assert(0 == bufferevent_pair_new(NULL, 0, pair)); + bev1 = pair[0]; + bev2 = pair[1]; + bufferevent_setcb(bev1, NULL, wm_writecb, errorcb, NULL); + bufferevent_setcb(bev2, wm_readcb, NULL, errorcb, NULL); + } else { + bev1 = bufferevent_new(pair[0], NULL, wm_writecb, wm_errorcb, NULL); + bev2 = bufferevent_new(pair[1], wm_readcb, NULL, wm_errorcb, NULL); + } + tt_assert(bev1); + tt_assert(bev2); + bufferevent_disable(bev1, EV_READ); + bufferevent_enable(bev2, EV_READ); + + /* By default, low watermarks are set to 0 */ + bufferevent_getwatermark(bev1, EV_READ, &low, NULL); + tt_int_op(low, ==, 0); + bufferevent_getwatermark(bev2, EV_WRITE, &low, NULL); + tt_int_op(low, ==, 0); + + for (i = 0; i < (int)sizeof(buffer); i++) + buffer[i] = (char)i; + + /* limit the reading on the receiving bufferevent */ + bufferevent_setwatermark(bev2, EV_READ, 10, 20); + + bufferevent_getwatermark(bev2, EV_READ, &low, &high); + tt_int_op(low, ==, 10); + tt_int_op(high, ==, 20); + + /* Tell the sending bufferevent not to notify us till it's down to + 100 bytes. */ + bufferevent_setwatermark(bev1, EV_WRITE, 100, 2000); + + bufferevent_getwatermark(bev1, EV_WRITE, &low, &high); + tt_int_op(low, ==, 100); + tt_int_op(high, ==, 2000); + + { + int r = bufferevent_getwatermark(bev1, EV_WRITE | EV_READ, &low, &high); + tt_int_op(r, !=, 0); + } + + bufferevent_write(bev1, buffer, sizeof(buffer)); + + event_dispatch(); + + tt_int_op(test_ok, ==, 2); + + /* The write callback drained all the data from outbuf, so we + * should have removed the write event... */ + tt_assert(!event_pending(&bev2->ev_write, EV_WRITE, NULL)); + +end: + if (bev1) + bufferevent_free(bev1); + if (bev2) + bufferevent_free(bev2); +} + +static void +test_bufferevent_watermarks(void) +{ + test_bufferevent_watermarks_impl(0); +} + +static void +test_bufferevent_pair_watermarks(void) +{ + test_bufferevent_watermarks_impl(1); +} + +/* + * Test bufferevent filters + */ + +/* strip an 'x' from each byte */ + +static enum bufferevent_filter_result +bufferevent_input_filter(struct evbuffer *src, struct evbuffer *dst, + ev_ssize_t lim, enum bufferevent_flush_mode state, void *ctx) +{ + const unsigned char *buffer; + unsigned i; + + buffer = evbuffer_pullup(src, evbuffer_get_length(src)); + for (i = 0; i < evbuffer_get_length(src); i += 2) { + if (buffer[i] == '-') + continue; + + assert(buffer[i] == 'x'); + evbuffer_add(dst, buffer + i + 1, 1); + } + + evbuffer_drain(src, i); + return (BEV_OK); +} + +/* add an 'x' before each byte */ + +static enum bufferevent_filter_result +bufferevent_output_filter(struct evbuffer *src, struct evbuffer *dst, + ev_ssize_t lim, enum bufferevent_flush_mode state, void *ctx) +{ + const unsigned char *buffer; + unsigned i; + struct bufferevent **bevp = ctx; + + ++test_ok; + + if (test_ok == 1) { + buffer = evbuffer_pullup(src, evbuffer_get_length(src)); + for (i = 0; i < evbuffer_get_length(src); ++i) { + evbuffer_add(dst, "x", 1); + evbuffer_add(dst, buffer + i, 1); + } + evbuffer_drain(src, evbuffer_get_length(src)); + } else { + return BEV_ERROR; + } + + if (bevp && test_ok == 1) { + int prev = ++test_ok; + bufferevent_write(*bevp, "-", 1); + /* check that during this bufferevent_write() + * bufferevent_output_filter() will not be called again */ + assert(test_ok == prev); + --test_ok; + } + + return (BEV_OK); +} + +static void +test_bufferevent_filters_impl(int use_pair, int disable) +{ + struct bufferevent *bev1 = NULL, *bev2 = NULL; + struct bufferevent *bev1_base = NULL, *bev2_base = NULL; + char buffer[8333]; + int i; + + test_ok = 0; + + if (use_pair) { + struct bufferevent *pair[2]; + tt_assert(0 == bufferevent_pair_new(NULL, 0, pair)); + bev1 = pair[0]; + bev2 = pair[1]; + } else { + bev1 = bufferevent_socket_new(NULL, pair[0], 0); + bev2 = bufferevent_socket_new(NULL, pair[1], 0); + } + bev1_base = bev1; + bev2_base = bev2; + + for (i = 0; i < (int)sizeof(buffer); i++) + buffer[i] = i; + + bev1 = bufferevent_filter_new(bev1, NULL, bufferevent_output_filter, + BEV_OPT_CLOSE_ON_FREE, NULL, + disable ? &bev1 : NULL); + + bev2 = bufferevent_filter_new(bev2, bufferevent_input_filter, + NULL, BEV_OPT_CLOSE_ON_FREE, NULL, NULL); + bufferevent_setcb(bev1, NULL, writecb, errorcb, NULL); + bufferevent_setcb(bev2, readcb, NULL, errorcb, NULL); + + tt_ptr_op(bufferevent_get_underlying(bev1), ==, bev1_base); + tt_ptr_op(bufferevent_get_underlying(bev2), ==, bev2_base); + tt_fd_op(bufferevent_getfd(bev1), ==, bufferevent_getfd(bev1_base)); + tt_fd_op(bufferevent_getfd(bev2), ==, bufferevent_getfd(bev2_base)); + + bufferevent_disable(bev1, EV_READ); + bufferevent_enable(bev2, EV_READ); + /* insert some filters */ + bufferevent_write(bev1, buffer, sizeof(buffer)); + + event_dispatch(); + + if (test_ok != 3 + !!disable) + test_ok = 0; + +end: + if (bev1) + bufferevent_free(bev1); + if (bev2) + bufferevent_free(bev2); + +} + +static void test_bufferevent_filters(void) +{ test_bufferevent_filters_impl(0, 0); } +static void test_bufferevent_pair_filters(void) +{ test_bufferevent_filters_impl(1, 0); } +static void test_bufferevent_filters_disable(void) +{ test_bufferevent_filters_impl(0, 1); } +static void test_bufferevent_pair_filters_disable(void) +{ test_bufferevent_filters_impl(1, 1); } + + +static void +sender_writecb(struct bufferevent *bev, void *ctx) +{ + if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) { + bufferevent_disable(bev,EV_READ|EV_WRITE); + TT_BLATHER(("Flushed %d: freeing it.", (int)bufferevent_getfd(bev))); + bufferevent_free(bev); + } +} + +static void +sender_errorcb(struct bufferevent *bev, short what, void *ctx) +{ + TT_FAIL(("Got sender error %d",(int)what)); +} + +static int bufferevent_connect_test_flags = 0; +static int bufferevent_trigger_test_flags = 0; +static int n_strings_read = 0; +static int n_reads_invoked = 0; +static int n_events_invoked = 0; + +#define TEST_STR "Now is the time for all good events to signal for " \ + "the good of their protocol" +static void +listen_cb(struct evconnlistener *listener, evutil_socket_t fd, + struct sockaddr *sa, int socklen, void *arg) +{ + struct event_base *base = arg; + struct bufferevent *bev; + const char s[] = TEST_STR; + TT_BLATHER(("Got a request on socket %d", (int)fd )); + bev = bufferevent_socket_new(base, fd, bufferevent_connect_test_flags); + tt_assert(bev); + bufferevent_setcb(bev, NULL, sender_writecb, sender_errorcb, NULL); + bufferevent_write(bev, s, sizeof(s)); +end: + ; +} + +static evutil_socket_t +fake_listener_create(struct sockaddr_in *localhost) +{ + struct sockaddr *sa = (struct sockaddr *)localhost; + evutil_socket_t fd = -1; + ev_socklen_t slen = sizeof(*localhost); + + memset(localhost, 0, sizeof(*localhost)); + localhost->sin_port = 0; /* have the kernel pick a port */ + localhost->sin_addr.s_addr = htonl(0x7f000001L); + localhost->sin_family = AF_INET; + + /* bind, but don't listen or accept. should trigger + "Connection refused" reliably on most platforms. */ + fd = socket(localhost->sin_family, SOCK_STREAM, 0); + tt_assert(fd >= 0); + tt_assert(bind(fd, sa, slen) == 0); + tt_assert(getsockname(fd, sa, &slen) == 0); + + return fd; + +end: + return -1; +} + +static void +reader_eventcb(struct bufferevent *bev, short what, void *ctx) +{ + struct event_base *base = ctx; + if (what & BEV_EVENT_ERROR) { + perror("foobar"); + TT_FAIL(("got connector error %d", (int)what)); + return; + } + if (what & BEV_EVENT_CONNECTED) { + TT_BLATHER(("connected on %d", (int)bufferevent_getfd(bev))); + bufferevent_enable(bev, EV_READ); + } + if (what & BEV_EVENT_EOF) { + char buf[512]; + size_t n; + n = bufferevent_read(bev, buf, sizeof(buf)-1); + tt_int_op(n, >=, 0); + buf[n] = '\0'; + tt_str_op(buf, ==, TEST_STR); + if (++n_strings_read == 2) + event_base_loopexit(base, NULL); + TT_BLATHER(("EOF on %d: %d strings read.", + (int)bufferevent_getfd(bev), n_strings_read)); + } +end: + ; +} + +static void +reader_eventcb_simple(struct bufferevent *bev, short what, void *ctx) +{ + TT_BLATHER(("Read eventcb simple invoked on %d.", + (int)bufferevent_getfd(bev))); + n_events_invoked++; +} + +static void +reader_readcb(struct bufferevent *bev, void *ctx) +{ + TT_BLATHER(("Read invoked on %d.", (int)bufferevent_getfd(bev))); + n_reads_invoked++; +} + +static void +test_bufferevent_connect(void *arg) +{ + struct basic_test_data *data = arg; + struct evconnlistener *lev=NULL; + struct bufferevent *bev1=NULL, *bev2=NULL; + struct sockaddr_in localhost; + struct sockaddr_storage ss; + struct sockaddr *sa; + ev_socklen_t slen; + + int be_flags=BEV_OPT_CLOSE_ON_FREE; + + if (strstr((char*)data->setup_data, "defer")) { + be_flags |= BEV_OPT_DEFER_CALLBACKS; + } + if (strstr((char*)data->setup_data, "unlocked")) { + be_flags |= BEV_OPT_UNLOCK_CALLBACKS; + } + if (strstr((char*)data->setup_data, "lock")) { + be_flags |= BEV_OPT_THREADSAFE; + } + bufferevent_connect_test_flags = be_flags; +#ifdef _WIN32 + if (!strcmp((char*)data->setup_data, "unset_connectex")) { + struct win32_extension_fns *ext = + (struct win32_extension_fns *) + event_get_win32_extension_fns_(); + ext->ConnectEx = NULL; + } +#endif + + memset(&localhost, 0, sizeof(localhost)); + + localhost.sin_port = 0; /* pick-a-port */ + localhost.sin_addr.s_addr = htonl(0x7f000001L); + localhost.sin_family = AF_INET; + sa = (struct sockaddr *)&localhost; + lev = evconnlistener_new_bind(data->base, listen_cb, data->base, + LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE, + 16, sa, sizeof(localhost)); + tt_assert(lev); + + sa = (struct sockaddr *)&ss; + slen = sizeof(ss); + if (regress_get_listener_addr(lev, sa, &slen) < 0) { + tt_abort_perror("getsockname"); + } + + tt_assert(!evconnlistener_enable(lev)); + bev1 = bufferevent_socket_new(data->base, -1, be_flags); + bev2 = bufferevent_socket_new(data->base, -1, be_flags); + tt_assert(bev1); + tt_assert(bev2); + bufferevent_setcb(bev1, reader_readcb,NULL, reader_eventcb, data->base); + bufferevent_setcb(bev2, reader_readcb,NULL, reader_eventcb, data->base); + + bufferevent_enable(bev1, EV_READ); + bufferevent_enable(bev2, EV_READ); + + tt_want(!bufferevent_socket_connect(bev1, sa, sizeof(localhost))); + tt_want(!bufferevent_socket_connect(bev2, sa, sizeof(localhost))); + + event_base_dispatch(data->base); + + tt_int_op(n_strings_read, ==, 2); + tt_int_op(n_reads_invoked, >=, 2); +end: + if (lev) + evconnlistener_free(lev); + + if (bev1) + bufferevent_free(bev1); + + if (bev2) + bufferevent_free(bev2); +} + +static void +test_bufferevent_connect_fail_eventcb(void *arg) +{ + struct basic_test_data *data = arg; + int flags = BEV_OPT_CLOSE_ON_FREE | (long)data->setup_data; + struct bufferevent *bev = NULL; + struct evconnlistener *lev = NULL; + struct sockaddr_in localhost; + ev_socklen_t slen = sizeof(localhost); + evutil_socket_t fake_listener = -1; + + fake_listener = fake_listener_create(&localhost); + + tt_int_op(n_events_invoked, ==, 0); + + bev = bufferevent_socket_new(data->base, -1, flags); + tt_assert(bev); + bufferevent_setcb(bev, reader_readcb, reader_readcb, + reader_eventcb_simple, data->base); + bufferevent_enable(bev, EV_READ|EV_WRITE); + tt_int_op(n_events_invoked, ==, 0); + tt_int_op(n_reads_invoked, ==, 0); + /** @see also test_bufferevent_connect_fail() */ + bufferevent_socket_connect(bev, (struct sockaddr *)&localhost, slen); + tt_int_op(n_events_invoked, ==, 0); + tt_int_op(n_reads_invoked, ==, 0); + event_base_dispatch(data->base); + tt_int_op(n_events_invoked, ==, 1); + tt_int_op(n_reads_invoked, ==, 0); + +end: + if (lev) + evconnlistener_free(lev); + if (bev) + bufferevent_free(bev); + if (fake_listener >= 0) + evutil_closesocket(fake_listener); +} + +static void +want_fail_eventcb(struct bufferevent *bev, short what, void *ctx) +{ + struct event_base *base = ctx; + const char *err; + evutil_socket_t s; + + if (what & BEV_EVENT_ERROR) { + s = bufferevent_getfd(bev); + err = evutil_socket_error_to_string(evutil_socket_geterror(s)); + TT_BLATHER(("connection failure on "EV_SOCK_FMT": %s", + EV_SOCK_ARG(s), err)); + test_ok = 1; + } else { + TT_FAIL(("didn't fail? what %hd", what)); + } + + event_base_loopexit(base, NULL); +} + +static void +close_socket_cb(evutil_socket_t fd, short what, void *arg) +{ + evutil_socket_t *fdp = arg; + if (*fdp >= 0) { + evutil_closesocket(*fdp); + *fdp = -1; + } +} + +static void +test_bufferevent_connect_fail(void *arg) +{ + struct basic_test_data *data = (struct basic_test_data *)arg; + struct bufferevent *bev=NULL; + struct event close_listener_event; + int close_listener_event_added = 0; + struct timeval one_second = { 1, 0 }; + struct sockaddr_in localhost; + ev_socklen_t slen = sizeof(localhost); + evutil_socket_t fake_listener = -1; + int r; + + test_ok = 0; + + fake_listener = fake_listener_create(&localhost); + bev = bufferevent_socket_new(data->base, -1, + BEV_OPT_CLOSE_ON_FREE | BEV_OPT_DEFER_CALLBACKS); + tt_assert(bev); + bufferevent_setcb(bev, NULL, NULL, want_fail_eventcb, data->base); + + r = bufferevent_socket_connect(bev, (struct sockaddr *)&localhost, slen); + /* XXXX we'd like to test the '0' case everywhere, but FreeBSD tells + * detects the error immediately, which is not really wrong of it. */ + tt_want(r == 0 || r == -1); + + /* Close the listener socket after a second. This should trigger + "connection refused" on some other platforms, including OSX. */ + evtimer_assign(&close_listener_event, data->base, close_socket_cb, + &fake_listener); + event_add(&close_listener_event, &one_second); + close_listener_event_added = 1; + + event_base_dispatch(data->base); + + tt_int_op(test_ok, ==, 1); + +end: + if (fake_listener >= 0) + evutil_closesocket(fake_listener); + + if (bev) + bufferevent_free(bev); + + if (close_listener_event_added) + event_del(&close_listener_event); +} + +struct timeout_cb_result { + struct timeval read_timeout_at; + struct timeval write_timeout_at; + struct timeval last_wrote_at; + struct timeval last_read_at; + int n_read_timeouts; + int n_write_timeouts; + int total_calls; +}; + +static void +bev_timeout_read_cb(struct bufferevent *bev, void *arg) +{ + struct timeout_cb_result *res = arg; + evutil_gettimeofday(&res->last_read_at, NULL); +} +static void +bev_timeout_write_cb(struct bufferevent *bev, void *arg) +{ + struct timeout_cb_result *res = arg; + evutil_gettimeofday(&res->last_wrote_at, NULL); +} +static void +bev_timeout_event_cb(struct bufferevent *bev, short what, void *arg) +{ + struct timeout_cb_result *res = arg; + ++res->total_calls; + + if ((what & (BEV_EVENT_READING|BEV_EVENT_TIMEOUT)) + == (BEV_EVENT_READING|BEV_EVENT_TIMEOUT)) { + evutil_gettimeofday(&res->read_timeout_at, NULL); + ++res->n_read_timeouts; + } + if ((what & (BEV_EVENT_WRITING|BEV_EVENT_TIMEOUT)) + == (BEV_EVENT_WRITING|BEV_EVENT_TIMEOUT)) { + evutil_gettimeofday(&res->write_timeout_at, NULL); + ++res->n_write_timeouts; + } +} + +static void +test_bufferevent_timeouts(void *arg) +{ + /* "arg" is a string containing "pair" and/or "filter". */ + struct bufferevent *bev1 = NULL, *bev2 = NULL; + struct basic_test_data *data = arg; + int use_pair = 0, use_filter = 0; + struct timeval tv_w, tv_r, started_at; + struct timeout_cb_result res1, res2; + + memset(&res1, 0, sizeof(res1)); + memset(&res2, 0, sizeof(res2)); + + if (strstr((char*)data->setup_data, "pair")) + use_pair = 1; + if (strstr((char*)data->setup_data, "filter")) + use_filter = 1; + + if (use_pair) { + struct bufferevent *p[2]; + tt_int_op(0, ==, bufferevent_pair_new(data->base, 0, p)); + bev1 = p[0]; + bev2 = p[1]; + } else { + bev1 = bufferevent_socket_new(data->base, data->pair[0], 0); + bev2 = bufferevent_socket_new(data->base, data->pair[1], 0); + } + tt_assert(bev1); + tt_assert(bev2); + + if (use_filter) { + struct bufferevent *bevf1, *bevf2; + bevf1 = bufferevent_filter_new(bev1, NULL, NULL, + BEV_OPT_CLOSE_ON_FREE, NULL, NULL); + bevf2 = bufferevent_filter_new(bev2, NULL, NULL, + BEV_OPT_CLOSE_ON_FREE, NULL, NULL); + tt_assert(bevf1); + tt_assert(bevf2); + bev1 = bevf1; + bev2 = bevf2; + } + + /* Do this nice and early. */ + bufferevent_disable(bev2, EV_READ); + + /* bev1 will try to write and read. Both will time out. */ + evutil_gettimeofday(&started_at, NULL); + tv_w.tv_sec = tv_r.tv_sec = 0; + tv_w.tv_usec = 100*1000; + tv_r.tv_usec = 150*1000; + bufferevent_setcb(bev1, bev_timeout_read_cb, bev_timeout_write_cb, + bev_timeout_event_cb, &res1); + bufferevent_set_timeouts(bev1, &tv_r, &tv_w); + bufferevent_write(bev1, "ABCDEFG", 7); + bufferevent_enable(bev1, EV_READ|EV_WRITE); + + /* bev2 has nothing to say, and isn't listening. */ + bufferevent_setcb(bev2, bev_timeout_read_cb, bev_timeout_write_cb, + bev_timeout_event_cb, &res2); + tv_w.tv_sec = tv_r.tv_sec = 0; + tv_w.tv_usec = 200*1000; + tv_r.tv_usec = 100*1000; + bufferevent_set_timeouts(bev2, &tv_r, &tv_w); + bufferevent_enable(bev2, EV_WRITE); + + tv_r.tv_sec = 0; + tv_r.tv_usec = 350000; + + event_base_loopexit(data->base, &tv_r); + event_base_dispatch(data->base); + + /* XXXX Test that actually reading or writing a little resets the + * timeouts. */ + + tt_want(res1.total_calls == 2); + tt_want(res1.n_read_timeouts == 1); + tt_want(res1.n_write_timeouts == 1); + tt_want(res2.total_calls == !(use_pair && !use_filter)); + tt_want(res2.n_write_timeouts == !(use_pair && !use_filter)); + tt_want(!res2.n_read_timeouts); + + test_timeval_diff_eq(&started_at, &res1.read_timeout_at, 150); + test_timeval_diff_eq(&started_at, &res1.write_timeout_at, 100); + +#define tt_assert_timeval_empty(tv) do { \ + tt_int_op((tv).tv_sec, ==, 0); \ + tt_int_op((tv).tv_usec, ==, 0); \ +} while(0) + tt_assert_timeval_empty(res1.last_read_at); + tt_assert_timeval_empty(res2.last_read_at); + tt_assert_timeval_empty(res2.last_wrote_at); + tt_assert_timeval_empty(res2.last_wrote_at); +#undef tt_assert_timeval_empty + +end: + if (bev1) + bufferevent_free(bev1); + if (bev2) + bufferevent_free(bev2); +} + +static void +trigger_failure_cb(evutil_socket_t fd, short what, void *ctx) +{ + TT_FAIL(("The triggered callback did not fire or the machine is really slow (try increasing timeout).")); +} + +static void +trigger_eventcb(struct bufferevent *bev, short what, void *ctx) +{ + struct event_base *base = ctx; + if (what == ~0) { + TT_BLATHER(("Event successfully triggered.")); + event_base_loopexit(base, NULL); + return; + } + reader_eventcb(bev, what, ctx); +} + +static void +trigger_readcb_triggered(struct bufferevent *bev, void *ctx) +{ + TT_BLATHER(("Read successfully triggered.")); + n_reads_invoked++; + bufferevent_trigger_event(bev, ~0, bufferevent_trigger_test_flags); +} + +static void +trigger_readcb(struct bufferevent *bev, void *ctx) +{ + struct timeval timeout = { 30, 0 }; + struct event_base *base = ctx; + size_t low, high, len; + int expected_reads; + + TT_BLATHER(("Read invoked on %d.", (int)bufferevent_getfd(bev))); + expected_reads = ++n_reads_invoked; + + bufferevent_setcb(bev, trigger_readcb_triggered, NULL, trigger_eventcb, ctx); + + bufferevent_getwatermark(bev, EV_READ, &low, &high); + len = evbuffer_get_length(bufferevent_get_input(bev)); + + bufferevent_setwatermark(bev, EV_READ, len + 1, 0); + bufferevent_trigger(bev, EV_READ, bufferevent_trigger_test_flags); + /* no callback expected */ + tt_int_op(n_reads_invoked, ==, expected_reads); + + if ((bufferevent_trigger_test_flags & BEV_TRIG_DEFER_CALLBACKS) || + (bufferevent_connect_test_flags & BEV_OPT_DEFER_CALLBACKS)) { + /* will be deferred */ + } else { + expected_reads++; + } + + event_base_once(base, -1, EV_TIMEOUT, trigger_failure_cb, NULL, &timeout); + + bufferevent_trigger(bev, EV_READ, + bufferevent_trigger_test_flags | BEV_TRIG_IGNORE_WATERMARKS); + tt_int_op(n_reads_invoked, ==, expected_reads); + + bufferevent_setwatermark(bev, EV_READ, low, high); +end: + ; +} + +static void +test_bufferevent_trigger(void *arg) +{ + struct basic_test_data *data = arg; + struct evconnlistener *lev=NULL; + struct bufferevent *bev=NULL; + struct sockaddr_in localhost; + struct sockaddr_storage ss; + struct sockaddr *sa; + ev_socklen_t slen; + + int be_flags=BEV_OPT_CLOSE_ON_FREE; + int trig_flags=0; + + if (strstr((char*)data->setup_data, "defer")) { + be_flags |= BEV_OPT_DEFER_CALLBACKS; + } + bufferevent_connect_test_flags = be_flags; + + if (strstr((char*)data->setup_data, "postpone")) { + trig_flags |= BEV_TRIG_DEFER_CALLBACKS; + } + bufferevent_trigger_test_flags = trig_flags; + + memset(&localhost, 0, sizeof(localhost)); + + localhost.sin_port = 0; /* pick-a-port */ + localhost.sin_addr.s_addr = htonl(0x7f000001L); + localhost.sin_family = AF_INET; + sa = (struct sockaddr *)&localhost; + lev = evconnlistener_new_bind(data->base, listen_cb, data->base, + LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE, + 16, sa, sizeof(localhost)); + tt_assert(lev); + + sa = (struct sockaddr *)&ss; + slen = sizeof(ss); + if (regress_get_listener_addr(lev, sa, &slen) < 0) { + tt_abort_perror("getsockname"); + } + + tt_assert(!evconnlistener_enable(lev)); + bev = bufferevent_socket_new(data->base, -1, be_flags); + tt_assert(bev); + bufferevent_setcb(bev, trigger_readcb, NULL, trigger_eventcb, data->base); + + bufferevent_enable(bev, EV_READ); + + tt_want(!bufferevent_socket_connect(bev, sa, sizeof(localhost))); + + event_base_dispatch(data->base); + + tt_int_op(n_reads_invoked, ==, 2); +end: + if (lev) + evconnlistener_free(lev); + + if (bev) + bufferevent_free(bev); +} + +static void +test_bufferevent_socket_filter_inactive(void *arg) +{ + struct basic_test_data *data = arg; + struct bufferevent *bev = NULL, *bevf = NULL; + + bev = bufferevent_socket_new(data->base, -1, 0); + tt_assert(bev); + bevf = bufferevent_filter_new(bev, NULL, NULL, 0, NULL, NULL); + tt_assert(bevf); + +end: + if (bevf) + bufferevent_free(bevf); + if (bev) + bufferevent_free(bev); +} + +static void +pair_flush_eventcb(struct bufferevent *bev, short what, void *ctx) +{ + int *callback_what = ctx; + *callback_what = what; +} + +static void +test_bufferevent_pair_flush(void *arg) +{ + struct basic_test_data *data = arg; + struct bufferevent *pair[2]; + struct bufferevent *bev1 = NULL; + struct bufferevent *bev2 = NULL; + int callback_what = 0; + + tt_assert(0 == bufferevent_pair_new(data->base, 0, pair)); + bev1 = pair[0]; + bev2 = pair[1]; + tt_assert(0 == bufferevent_enable(bev1, EV_WRITE)); + tt_assert(0 == bufferevent_enable(bev2, EV_READ)); + + bufferevent_setcb(bev2, NULL, NULL, pair_flush_eventcb, &callback_what); + + bufferevent_flush(bev1, EV_WRITE, BEV_FINISHED); + + event_base_loop(data->base, EVLOOP_ONCE); + + tt_assert(callback_what == (BEV_EVENT_READING | BEV_EVENT_EOF)); + +end: + if (bev1) + bufferevent_free(bev1); + if (bev2) + bufferevent_free(bev2); +} + +struct bufferevent_filter_data_stuck { + size_t header_size; + size_t total_read; +}; + +static void +bufferevent_filter_data_stuck_readcb(struct bufferevent *bev, void *arg) +{ + struct bufferevent_filter_data_stuck *filter_data = arg; + struct evbuffer *input = bufferevent_get_input(bev); + size_t read_size = evbuffer_get_length(input); + evbuffer_drain(input, read_size); + filter_data->total_read += read_size; +} + +/** + * This filter prepends header once before forwarding data. + */ +static enum bufferevent_filter_result +bufferevent_filter_data_stuck_inputcb( + struct evbuffer *src, struct evbuffer *dst, ev_ssize_t dst_limit, + enum bufferevent_flush_mode mode, void *ctx) +{ + struct bufferevent_filter_data_stuck *filter_data = ctx; + static int header_inserted = 0; + size_t payload_size; + size_t header_size = 0; + + if (!header_inserted) { + char *header = calloc(filter_data->header_size, 1); + evbuffer_add(dst, header, filter_data->header_size); + free(header); + header_size = filter_data->header_size; + header_inserted = 1; + } + + payload_size = evbuffer_get_length(src); + if (payload_size > dst_limit - header_size) { + payload_size = dst_limit - header_size; + } + + tt_int_op(payload_size, ==, evbuffer_remove_buffer(src, dst, payload_size)); + +end: + return BEV_OK; +} + +static void +test_bufferevent_filter_data_stuck(void *arg) +{ + const size_t read_high_wm = 4096; + struct bufferevent_filter_data_stuck filter_data; + struct basic_test_data *data = arg; + struct bufferevent *pair[2]; + struct bufferevent *filter = NULL; + + int options = BEV_OPT_CLOSE_ON_FREE | BEV_OPT_DEFER_CALLBACKS; + + char payload[4096]; + int payload_size = sizeof(payload); + + memset(&filter_data, 0, sizeof(filter_data)); + filter_data.header_size = 20; + + tt_assert(bufferevent_pair_new(data->base, options, pair) == 0); + + bufferevent_setwatermark(pair[0], EV_READ, 0, read_high_wm); + bufferevent_setwatermark(pair[1], EV_READ, 0, read_high_wm); + + tt_assert( + filter = + bufferevent_filter_new(pair[1], + bufferevent_filter_data_stuck_inputcb, + NULL, + options, + NULL, + &filter_data)); + + bufferevent_setcb(filter, + bufferevent_filter_data_stuck_readcb, + NULL, + NULL, + &filter_data); + + tt_assert(bufferevent_enable(filter, EV_READ|EV_WRITE) == 0); + + bufferevent_setwatermark(filter, EV_READ, 0, read_high_wm); + + tt_assert(bufferevent_write(pair[0], payload, sizeof(payload)) == 0); + + event_base_dispatch(data->base); + + tt_int_op(filter_data.total_read, ==, payload_size + filter_data.header_size); +end: + if (pair[0]) + bufferevent_free(pair[0]); + if (filter) + bufferevent_free(filter); +} + +struct testcase_t bufferevent_testcases[] = { + + LEGACY(bufferevent, TT_ISOLATED), + LEGACY(bufferevent_pair, TT_ISOLATED), + LEGACY(bufferevent_flush_normal, TT_ISOLATED), + LEGACY(bufferevent_flush_flush, TT_ISOLATED), + LEGACY(bufferevent_flush_finished, TT_ISOLATED), + LEGACY(bufferevent_pair_flush_normal, TT_ISOLATED), + LEGACY(bufferevent_pair_flush_flush, TT_ISOLATED), + LEGACY(bufferevent_pair_flush_finished, TT_ISOLATED), +#if defined(EVTHREAD_USE_PTHREADS_IMPLEMENTED) + { "bufferevent_pair_release_lock", test_bufferevent_pair_release_lock, + TT_FORK|TT_ISOLATED|TT_NEED_THREADS|TT_NEED_BASE|TT_LEGACY|TT_NO_LOGS, + &basic_setup, NULL }, +#endif + LEGACY(bufferevent_watermarks, TT_ISOLATED), + LEGACY(bufferevent_pair_watermarks, TT_ISOLATED), + LEGACY(bufferevent_filters, TT_ISOLATED), + LEGACY(bufferevent_pair_filters, TT_ISOLATED), + LEGACY(bufferevent_filters_disable, TT_ISOLATED), + LEGACY(bufferevent_pair_filters_disable, TT_ISOLATED), + { "bufferevent_connect", test_bufferevent_connect, TT_FORK|TT_NEED_BASE, + &basic_setup, (void*)"" }, + { "bufferevent_connect_defer", test_bufferevent_connect, + TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"defer" }, + { "bufferevent_connect_lock", test_bufferevent_connect, + TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup, (void*)"lock" }, + { "bufferevent_connect_lock_defer", test_bufferevent_connect, + TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup, + (void*)"defer lock" }, + { "bufferevent_connect_unlocked_cbs", test_bufferevent_connect, + TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup, + (void*)"lock defer unlocked" }, + { "bufferevent_connect_fail", test_bufferevent_connect_fail, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "bufferevent_timeout", test_bufferevent_timeouts, + TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"" }, + { "bufferevent_timeout_pair", test_bufferevent_timeouts, + TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"pair" }, + { "bufferevent_timeout_filter", test_bufferevent_timeouts, + TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"filter" }, + { "bufferevent_timeout_filter_pair", test_bufferevent_timeouts, + TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"filter pair" }, + { "bufferevent_trigger", test_bufferevent_trigger, TT_FORK|TT_NEED_BASE, + &basic_setup, (void*)"" }, + { "bufferevent_trigger_defer", test_bufferevent_trigger, + TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"defer" }, + { "bufferevent_trigger_postpone", test_bufferevent_trigger, + TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup, + (void*)"postpone" }, + { "bufferevent_trigger_defer_postpone", test_bufferevent_trigger, + TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup, + (void*)"defer postpone" }, +#ifdef EVENT__HAVE_LIBZ + LEGACY(bufferevent_zlib, TT_ISOLATED), +#else + { "bufferevent_zlib", NULL, TT_SKIP, NULL, NULL }, +#endif + + { "bufferevent_connect_fail_eventcb_defer", + test_bufferevent_connect_fail_eventcb, + TT_FORK|TT_NEED_BASE, &basic_setup, (void*)BEV_OPT_DEFER_CALLBACKS }, + { "bufferevent_connect_fail_eventcb", + test_bufferevent_connect_fail_eventcb, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + + { "bufferevent_socket_filter_inactive", + test_bufferevent_socket_filter_inactive, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "bufferevent_pair_flush", + test_bufferevent_pair_flush, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "bufferevent_filter_data_stuck", + test_bufferevent_filter_data_stuck, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + + END_OF_TESTCASES, +}; + +#define TT_IOCP (TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP) +#define TT_IOCP_LEGACY (TT_ISOLATED|TT_ENABLE_IOCP) +struct testcase_t bufferevent_iocp_testcases[] = { + LEGACY(bufferevent, TT_IOCP_LEGACY), + LEGACY(bufferevent_flush_normal, TT_ISOLATED), + LEGACY(bufferevent_flush_flush, TT_ISOLATED), + LEGACY(bufferevent_flush_finished, TT_ISOLATED), + LEGACY(bufferevent_watermarks, TT_IOCP_LEGACY), + LEGACY(bufferevent_filters, TT_IOCP_LEGACY), + LEGACY(bufferevent_filters_disable, TT_IOCP_LEGACY), + + { "bufferevent_connect", test_bufferevent_connect, + TT_IOCP, &basic_setup, (void*)"" }, + { "bufferevent_connect_defer", test_bufferevent_connect, + TT_IOCP, &basic_setup, (void*)"defer" }, + { "bufferevent_connect_lock", test_bufferevent_connect, + TT_IOCP, &basic_setup, (void*)"lock" }, + { "bufferevent_connect_lock_defer", test_bufferevent_connect, + TT_IOCP, &basic_setup, (void*)"defer lock" }, + { "bufferevent_connect_fail", test_bufferevent_connect_fail, + TT_IOCP, &basic_setup, NULL }, + { "bufferevent_connect_nonblocking", test_bufferevent_connect, + TT_IOCP, &basic_setup, (void*)"unset_connectex" }, + + { "bufferevent_connect_fail_eventcb_defer", + test_bufferevent_connect_fail_eventcb, + TT_IOCP, &basic_setup, (void*)BEV_OPT_DEFER_CALLBACKS }, + { "bufferevent_connect_fail_eventcb", + test_bufferevent_connect_fail_eventcb, TT_IOCP, &basic_setup, NULL }, + + END_OF_TESTCASES, +}; diff --git a/probe-busybox/libevent-2.1.11-stable/test/regress_dns.c b/probe-busybox/libevent-2.1.11-stable/test/regress_dns.c new file mode 100644 index 00000000..d2084b70 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/regress_dns.c @@ -0,0 +1,2449 @@ +/* + * Copyright (c) 2003-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "../util-internal.h" + +#ifdef _WIN32 +#include +#include +#include +#endif + +#include "event2/event-config.h" + +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#ifndef _WIN32 +#include +#include +#include +#include +#include +#endif +#ifdef EVENT__HAVE_NETINET_IN6_H +#include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#include +#include +#include +#include +#include + +#ifdef EVENT__HAVE_SYS_RESOURCE_H +#include +#endif + +#include "event2/dns.h" +#include "event2/dns_compat.h" +#include "event2/dns_struct.h" +#include "event2/event.h" +#include "event2/event_compat.h" +#include "event2/event_struct.h" +#include "event2/util.h" +#include "event2/listener.h" +#include "event2/bufferevent.h" +#include +#include "log-internal.h" +#include "evthread-internal.h" +#include "regress.h" +#include "regress_testutils.h" +#include "regress_thread.h" + +#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) + +static int dns_ok = 0; +static int dns_got_cancel = 0; +static int dns_err = 0; + + +static void +dns_gethostbyname_cb(int result, char type, int count, int ttl, + void *addresses, void *arg) +{ + dns_ok = dns_err = 0; + + if (result == DNS_ERR_TIMEOUT) { + printf("[Timed out] "); + dns_err = result; + goto out; + } + + if (result != DNS_ERR_NONE) { + printf("[Error code %d] ", result); + goto out; + } + + TT_BLATHER(("type: %d, count: %d, ttl: %d: ", type, count, ttl)); + + switch (type) { + case DNS_IPv6_AAAA: { +#if defined(EVENT__HAVE_STRUCT_IN6_ADDR) && defined(EVENT__HAVE_INET_NTOP) && defined(INET6_ADDRSTRLEN) + struct in6_addr *in6_addrs = addresses; + char buf[INET6_ADDRSTRLEN+1]; + int i; + /* a resolution that's not valid does not help */ + if (ttl < 0) + goto out; + for (i = 0; i < count; ++i) { + const char *b = evutil_inet_ntop(AF_INET6, &in6_addrs[i], buf,sizeof(buf)); + if (b) + TT_BLATHER(("%s ", b)); + else + TT_BLATHER(("%s ", strerror(errno))); + } +#endif + break; + } + case DNS_IPv4_A: { + struct in_addr *in_addrs = addresses; + int i; + /* a resolution that's not valid does not help */ + if (ttl < 0) + goto out; + for (i = 0; i < count; ++i) + TT_BLATHER(("%s ", inet_ntoa(in_addrs[i]))); + break; + } + case DNS_PTR: + /* may get at most one PTR */ + if (count != 1) + goto out; + + TT_BLATHER(("%s ", *(char **)addresses)); + break; + default: + goto out; + } + + dns_ok = type; + +out: + if (arg == NULL) + event_loopexit(NULL); + else + event_base_loopexit((struct event_base *)arg, NULL); +} + +static void +dns_gethostbyname(void) +{ + dns_ok = 0; + evdns_resolve_ipv4("www.monkey.org", 0, dns_gethostbyname_cb, NULL); + event_dispatch(); + + tt_int_op(dns_ok, ==, DNS_IPv4_A); + test_ok = dns_ok; +end: + ; +} + +static void +dns_gethostbyname6(void) +{ + dns_ok = 0; + evdns_resolve_ipv6("www.ietf.org", 0, dns_gethostbyname_cb, NULL); + event_dispatch(); + + if (!dns_ok && dns_err == DNS_ERR_TIMEOUT) { + tt_skip(); + } + + tt_int_op(dns_ok, ==, DNS_IPv6_AAAA); + test_ok = 1; +end: + ; +} + +static void +dns_gethostbyaddr(void) +{ + struct in_addr in; + in.s_addr = htonl(0x7f000001ul); /* 127.0.0.1 */ + dns_ok = 0; + evdns_resolve_reverse(&in, 0, dns_gethostbyname_cb, NULL); + event_dispatch(); + + tt_int_op(dns_ok, ==, DNS_PTR); + test_ok = dns_ok; +end: + ; +} + +static void +dns_resolve_reverse(void *ptr) +{ + struct in_addr in; + struct event_base *base = event_base_new(); + struct evdns_base *dns = evdns_base_new(base, EVDNS_BASE_INITIALIZE_NAMESERVERS); + struct evdns_request *req = NULL; + + tt_assert(base); + tt_assert(dns); + in.s_addr = htonl(0x7f000001ul); /* 127.0.0.1 */ + dns_ok = 0; + + req = evdns_base_resolve_reverse( + dns, &in, 0, dns_gethostbyname_cb, base); + tt_assert(req); + + event_base_dispatch(base); + + tt_int_op(dns_ok, ==, DNS_PTR); + +end: + if (dns) + evdns_base_free(dns, 0); + if (base) + event_base_free(base); +} + +static int n_server_responses = 0; + +static void +dns_server_request_cb(struct evdns_server_request *req, void *data) +{ + int i, r; + const char TEST_ARPA[] = "11.11.168.192.in-addr.arpa"; + const char TEST_IN6[] = + "f.e.f.e." "0.0.0.0." "0.0.0.0." "1.1.1.1." + "a.a.a.a." "0.0.0.0." "0.0.0.0." "0.f.f.f.ip6.arpa"; + + for (i = 0; i < req->nquestions; ++i) { + const int qtype = req->questions[i]->type; + const int qclass = req->questions[i]->dns_question_class; + const char *qname = req->questions[i]->name; + + struct in_addr ans; + ans.s_addr = htonl(0xc0a80b0bUL); /* 192.168.11.11 */ + if (qtype == EVDNS_TYPE_A && + qclass == EVDNS_CLASS_INET && + !evutil_ascii_strcasecmp(qname, "zz.example.com")) { + r = evdns_server_request_add_a_reply(req, qname, + 1, &ans.s_addr, 12345); + if (r<0) + dns_ok = 0; + } else if (qtype == EVDNS_TYPE_AAAA && + qclass == EVDNS_CLASS_INET && + !evutil_ascii_strcasecmp(qname, "zz.example.com")) { + char addr6[17] = "abcdefghijklmnop"; + r = evdns_server_request_add_aaaa_reply(req, + qname, 1, addr6, 123); + if (r<0) + dns_ok = 0; + } else if (qtype == EVDNS_TYPE_PTR && + qclass == EVDNS_CLASS_INET && + !evutil_ascii_strcasecmp(qname, TEST_ARPA)) { + r = evdns_server_request_add_ptr_reply(req, NULL, + qname, "ZZ.EXAMPLE.COM", 54321); + if (r<0) + dns_ok = 0; + } else if (qtype == EVDNS_TYPE_PTR && + qclass == EVDNS_CLASS_INET && + !evutil_ascii_strcasecmp(qname, TEST_IN6)){ + r = evdns_server_request_add_ptr_reply(req, NULL, + qname, + "ZZ-INET6.EXAMPLE.COM", 54322); + if (r<0) + dns_ok = 0; + } else if (qtype == EVDNS_TYPE_A && + qclass == EVDNS_CLASS_INET && + !evutil_ascii_strcasecmp(qname, "drop.example.com")) { + if (evdns_server_request_drop(req)<0) + dns_ok = 0; + return; + } else { + printf("Unexpected question %d %d \"%s\" ", + qtype, qclass, qname); + dns_ok = 0; + } + } + r = evdns_server_request_respond(req, 0); + if (r<0) { + printf("Couldn't send reply. "); + dns_ok = 0; + } +} + +static void +dns_server_gethostbyname_cb(int result, char type, int count, int ttl, + void *addresses, void *arg) +{ + if (result == DNS_ERR_CANCEL) { + if (arg != (void*)(char*)90909) { + printf("Unexpected cancelation"); + dns_ok = 0; + } + dns_got_cancel = 1; + goto out; + } + if (result != DNS_ERR_NONE) { + printf("Unexpected result %d. ", result); + dns_ok = 0; + goto out; + } + if (count != 1) { + printf("Unexpected answer count %d. ", count); + dns_ok = 0; + goto out; + } + switch (type) { + case DNS_IPv4_A: { + struct in_addr *in_addrs = addresses; + if (in_addrs[0].s_addr != htonl(0xc0a80b0bUL) || ttl != 12345) { + printf("Bad IPv4 response \"%s\" %d. ", + inet_ntoa(in_addrs[0]), ttl); + dns_ok = 0; + goto out; + } + break; + } + case DNS_IPv6_AAAA: { +#if defined (EVENT__HAVE_STRUCT_IN6_ADDR) && defined(EVENT__HAVE_INET_NTOP) && defined(INET6_ADDRSTRLEN) + struct in6_addr *in6_addrs = addresses; + char buf[INET6_ADDRSTRLEN+1]; + if (memcmp(&in6_addrs[0].s6_addr, "abcdefghijklmnop", 16) + || ttl != 123) { + const char *b = evutil_inet_ntop(AF_INET6, &in6_addrs[0],buf,sizeof(buf)); + printf("Bad IPv6 response \"%s\" %d. ", b, ttl); + dns_ok = 0; + goto out; + } +#endif + break; + } + case DNS_PTR: { + char **addrs = addresses; + if (arg != (void*)6) { + if (strcmp(addrs[0], "ZZ.EXAMPLE.COM") || + ttl != 54321) { + printf("Bad PTR response \"%s\" %d. ", + addrs[0], ttl); + dns_ok = 0; + goto out; + } + } else { + if (strcmp(addrs[0], "ZZ-INET6.EXAMPLE.COM") || + ttl != 54322) { + printf("Bad ipv6 PTR response \"%s\" %d. ", + addrs[0], ttl); + dns_ok = 0; + goto out; + } + } + break; + } + default: + printf("Bad response type %d. ", type); + dns_ok = 0; + } + out: + if (++n_server_responses == 3) { + event_loopexit(NULL); + } +} + +static void +dns_server(void) +{ + evutil_socket_t sock=-1; + struct sockaddr_in my_addr; + struct sockaddr_storage ss; + ev_socklen_t slen; + struct evdns_server_port *port=NULL; + struct in_addr resolve_addr; + struct in6_addr resolve_addr6; + struct evdns_base *base=NULL; + struct evdns_request *req=NULL; + + dns_ok = 1; + + base = evdns_base_new(NULL, 0); + + /* Now configure a nameserver port. */ + sock = socket(AF_INET, SOCK_DGRAM, 0); + if (sock<0) { + tt_abort_perror("socket"); + } + + evutil_make_socket_nonblocking(sock); + + memset(&my_addr, 0, sizeof(my_addr)); + my_addr.sin_family = AF_INET; + my_addr.sin_port = 0; /* kernel picks */ + my_addr.sin_addr.s_addr = htonl(0x7f000001UL); + if (bind(sock, (struct sockaddr*)&my_addr, sizeof(my_addr)) < 0) { + tt_abort_perror("bind"); + } + slen = sizeof(ss); + if (getsockname(sock, (struct sockaddr*)&ss, &slen) < 0) { + tt_abort_perror("getsockname"); + } + + port = evdns_add_server_port(sock, 0, dns_server_request_cb, NULL); + + /* Add ourself as the only nameserver, and make sure we really are + * the only nameserver. */ + evdns_base_nameserver_sockaddr_add(base, (struct sockaddr*)&ss, slen, 0); + tt_int_op(evdns_base_count_nameservers(base), ==, 1); + { + struct sockaddr_storage ss2; + int slen2; + + memset(&ss2, 0, sizeof(ss2)); + + slen2 = evdns_base_get_nameserver_addr(base, 0, (struct sockaddr *)&ss2, 3); + tt_int_op(slen2, ==, slen); + tt_int_op(ss2.ss_family, ==, 0); + slen2 = evdns_base_get_nameserver_addr(base, 0, (struct sockaddr *)&ss2, sizeof(ss2)); + tt_int_op(slen2, ==, slen); + tt_mem_op(&ss2, ==, &ss, slen); + + slen2 = evdns_base_get_nameserver_addr(base, 1, (struct sockaddr *)&ss2, sizeof(ss2)); + tt_int_op(-1, ==, slen2); + } + + /* Send some queries. */ + evdns_base_resolve_ipv4(base, "zz.example.com", DNS_QUERY_NO_SEARCH, + dns_server_gethostbyname_cb, NULL); + evdns_base_resolve_ipv6(base, "zz.example.com", DNS_QUERY_NO_SEARCH, + dns_server_gethostbyname_cb, NULL); + resolve_addr.s_addr = htonl(0xc0a80b0bUL); /* 192.168.11.11 */ + evdns_base_resolve_reverse(base, &resolve_addr, 0, + dns_server_gethostbyname_cb, NULL); + memcpy(resolve_addr6.s6_addr, + "\xff\xf0\x00\x00\x00\x00\xaa\xaa" + "\x11\x11\x00\x00\x00\x00\xef\xef", 16); + evdns_base_resolve_reverse_ipv6(base, &resolve_addr6, 0, + dns_server_gethostbyname_cb, (void*)6); + + req = evdns_base_resolve_ipv4(base, + "drop.example.com", DNS_QUERY_NO_SEARCH, + dns_server_gethostbyname_cb, (void*)(char*)90909); + + evdns_cancel_request(base, req); + + event_dispatch(); + + tt_assert(dns_got_cancel); + test_ok = dns_ok; + +end: + if (port) + evdns_close_server_port(port); + if (sock >= 0) + evutil_closesocket(sock); + if (base) + evdns_base_free(base, 0); +} + +static int n_replies_left; +static struct event_base *exit_base; +static struct evdns_server_port *exit_port; + +struct generic_dns_callback_result { + int result; + char type; + int count; + int ttl; + size_t addrs_len; + void *addrs; + char addrs_buf[256]; +}; + +static void +generic_dns_callback(int result, char type, int count, int ttl, void *addresses, + void *arg) +{ + size_t len; + struct generic_dns_callback_result *res = arg; + res->result = result; + res->type = type; + res->count = count; + res->ttl = ttl; + + if (type == DNS_IPv4_A) + len = count * 4; + else if (type == DNS_IPv6_AAAA) + len = count * 16; + else if (type == DNS_PTR) + len = strlen(addresses)+1; + else { + res->addrs_len = len = 0; + res->addrs = NULL; + } + if (len) { + res->addrs_len = len; + if (len > 256) + len = 256; + memcpy(res->addrs_buf, addresses, len); + res->addrs = res->addrs_buf; + } + + --n_replies_left; + if (n_replies_left == 0) { + if (exit_port) { + evdns_close_server_port(exit_port); + exit_port = NULL; + } else + event_base_loopexit(exit_base, NULL); + } +} + +static struct regress_dns_server_table search_table[] = { + { "host.a.example.com", "err", "3", 0, 0 }, + { "host.b.example.com", "err", "3", 0, 0 }, + { "host.c.example.com", "A", "11.22.33.44", 0, 0 }, + { "host2.a.example.com", "err", "3", 0, 0 }, + { "host2.b.example.com", "A", "200.100.0.100", 0, 0 }, + { "host2.c.example.com", "err", "3", 0, 0 }, + { "hostn.a.example.com", "errsoa", "0", 0, 0 }, + { "hostn.b.example.com", "errsoa", "3", 0, 0 }, + { "hostn.c.example.com", "err", "0", 0, 0 }, + + { "host", "err", "3", 0, 0 }, + { "host2", "err", "3", 0, 0 }, + { "*", "err", "3", 0, 0 }, + { NULL, NULL, NULL, 0, 0 } +}; +static void +dns_search_test_impl(void *arg, int lower) +{ + struct regress_dns_server_table table[ARRAY_SIZE(search_table)]; + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct evdns_base *dns = NULL; + ev_uint16_t portnum = 0; + char buf[64]; + + struct generic_dns_callback_result r[8]; + size_t i; + + for (i = 0; i < ARRAY_SIZE(table); ++i) { + table[i] = search_table[i]; + table[i].lower = lower; + } + + tt_assert(regress_dnsserver(base, &portnum, table)); + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum); + + dns = evdns_base_new(base, 0); + tt_assert(!evdns_base_nameserver_ip_add(dns, buf)); + + evdns_base_search_add(dns, "a.example.com"); + evdns_base_search_add(dns, "b.example.com"); + evdns_base_search_add(dns, "c.example.com"); + + n_replies_left = ARRAY_SIZE(r); + exit_base = base; + + evdns_base_resolve_ipv4(dns, "host", 0, generic_dns_callback, &r[0]); + evdns_base_resolve_ipv4(dns, "host2", 0, generic_dns_callback, &r[1]); + evdns_base_resolve_ipv4(dns, "host", DNS_NO_SEARCH, generic_dns_callback, &r[2]); + evdns_base_resolve_ipv4(dns, "host2", DNS_NO_SEARCH, generic_dns_callback, &r[3]); + evdns_base_resolve_ipv4(dns, "host3", 0, generic_dns_callback, &r[4]); + evdns_base_resolve_ipv4(dns, "hostn.a.example.com", DNS_NO_SEARCH, generic_dns_callback, &r[5]); + evdns_base_resolve_ipv4(dns, "hostn.b.example.com", DNS_NO_SEARCH, generic_dns_callback, &r[6]); + evdns_base_resolve_ipv4(dns, "hostn.c.example.com", DNS_NO_SEARCH, generic_dns_callback, &r[7]); + + event_base_dispatch(base); + + tt_int_op(r[0].type, ==, DNS_IPv4_A); + tt_int_op(r[0].count, ==, 1); + tt_int_op(((ev_uint32_t*)r[0].addrs)[0], ==, htonl(0x0b16212c)); + tt_int_op(r[1].type, ==, DNS_IPv4_A); + tt_int_op(r[1].count, ==, 1); + tt_int_op(((ev_uint32_t*)r[1].addrs)[0], ==, htonl(0xc8640064)); + tt_int_op(r[2].result, ==, DNS_ERR_NOTEXIST); + tt_int_op(r[3].result, ==, DNS_ERR_NOTEXIST); + tt_int_op(r[4].result, ==, DNS_ERR_NOTEXIST); + tt_int_op(r[5].result, ==, DNS_ERR_NODATA); + tt_int_op(r[5].ttl, ==, 42); + tt_int_op(r[6].result, ==, DNS_ERR_NOTEXIST); + tt_int_op(r[6].ttl, ==, 42); + tt_int_op(r[7].result, ==, DNS_ERR_NODATA); + tt_int_op(r[7].ttl, ==, 0); + +end: + if (dns) + evdns_base_free(dns, 0); + + regress_clean_dnsserver(); +} +static void +dns_search_empty_test(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct evdns_base *dns = NULL; + + dns = evdns_base_new(base, 0); + + evdns_base_search_add(dns, "whatever.example.com"); + + n_replies_left = 1; + exit_base = base; + + tt_ptr_op(evdns_base_resolve_ipv4(dns, "", 0, generic_dns_callback, NULL), ==, NULL); + +end: + if (dns) + evdns_base_free(dns, 0); +} +static void dns_search_test(void *arg) { dns_search_test_impl(arg, 0); } +static void dns_search_lower_test(void *arg) { dns_search_test_impl(arg, 1); } + +static int request_count = 0; +static struct evdns_request *current_req = NULL; + +static void +search_cancel_server_cb(struct evdns_server_request *req, void *data) +{ + const char *question; + + if (req->nquestions != 1) + TT_DIE(("Only handling one question at a time; got %d", + req->nquestions)); + + question = req->questions[0]->name; + + TT_BLATHER(("got question, %s", question)); + + tt_assert(request_count > 0); + tt_assert(!evdns_server_request_respond(req, 3)); + + if (!--request_count) + evdns_cancel_request(NULL, current_req); + +end: + ; +} + +static void +dns_search_cancel_test(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct evdns_base *dns = NULL; + struct evdns_server_port *port = NULL; + ev_uint16_t portnum = 0; + struct generic_dns_callback_result r1; + char buf[64]; + + port = regress_get_dnsserver(base, &portnum, NULL, + search_cancel_server_cb, NULL); + tt_assert(port); + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum); + + dns = evdns_base_new(base, 0); + tt_assert(!evdns_base_nameserver_ip_add(dns, buf)); + + evdns_base_search_add(dns, "a.example.com"); + evdns_base_search_add(dns, "b.example.com"); + evdns_base_search_add(dns, "c.example.com"); + evdns_base_search_add(dns, "d.example.com"); + + exit_base = base; + request_count = 3; + n_replies_left = 1; + + current_req = evdns_base_resolve_ipv4(dns, "host", 0, + generic_dns_callback, &r1); + event_base_dispatch(base); + + tt_int_op(r1.result, ==, DNS_ERR_CANCEL); + +end: + if (port) + evdns_close_server_port(port); + if (dns) + evdns_base_free(dns, 0); +} + +static void +fail_server_cb(struct evdns_server_request *req, void *data) +{ + const char *question; + int *count = data; + struct in_addr in; + + /* Drop the first N requests that we get. */ + if (*count > 0) { + --*count; + tt_want(! evdns_server_request_drop(req)); + return; + } + + if (req->nquestions != 1) + TT_DIE(("Only handling one question at a time; got %d", + req->nquestions)); + + question = req->questions[0]->name; + + if (!evutil_ascii_strcasecmp(question, "google.com")) { + /* Detect a probe, and get out of the loop. */ + event_base_loopexit(exit_base, NULL); + } + + tt_assert(evutil_inet_pton(AF_INET, "16.32.64.128", &in)); + evdns_server_request_add_a_reply(req, question, 1, &in.s_addr, + 100); + tt_assert(! evdns_server_request_respond(req, 0)) + return; +end: + tt_want(! evdns_server_request_drop(req)); +} + +static void +dns_retry_test_impl(void *arg, int flags) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct evdns_server_port *port = NULL; + struct evdns_base *dns = NULL; + int drop_count = 2; + ev_uint16_t portnum = 0; + char buf[64]; + + struct generic_dns_callback_result r1; + + port = regress_get_dnsserver(base, &portnum, NULL, + fail_server_cb, &drop_count); + tt_assert(port); + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum); + + dns = evdns_base_new(base, flags); + tt_assert(!evdns_base_nameserver_ip_add(dns, buf)); + tt_assert(! evdns_base_set_option(dns, "timeout", "0.2")); + tt_assert(! evdns_base_set_option(dns, "max-timeouts:", "10")); + tt_assert(! evdns_base_set_option(dns, "initial-probe-timeout", "0.1")); + + evdns_base_resolve_ipv4(dns, "host.example.com", 0, + generic_dns_callback, &r1); + + n_replies_left = 1; + exit_base = base; + + event_base_dispatch(base); + + tt_int_op(drop_count, ==, 0); + + tt_int_op(r1.type, ==, DNS_IPv4_A); + tt_int_op(r1.count, ==, 1); + tt_int_op(((ev_uint32_t*)r1.addrs)[0], ==, htonl(0x10204080)); + + /* Now try again, but this time have the server get treated as + * failed, so we can send it a test probe. */ + drop_count = 4; + tt_assert(! evdns_base_set_option(dns, "max-timeouts:", "2")); + tt_assert(! evdns_base_set_option(dns, "attempts:", "3")); + memset(&r1, 0, sizeof(r1)); + + evdns_base_resolve_ipv4(dns, "host.example.com", 0, + generic_dns_callback, &r1); + + n_replies_left = 2; + + /* This will run until it answers the "google.com" probe request. */ + event_base_dispatch(base); + + /* We'll treat the server as failed here. */ + tt_int_op(r1.result, ==, DNS_ERR_TIMEOUT); + + /* It should work this time. */ + tt_int_op(drop_count, ==, 0); + evdns_base_resolve_ipv4(dns, "host.example.com", 0, + generic_dns_callback, &r1); + + event_base_dispatch(base); + tt_int_op(r1.result, ==, DNS_ERR_NONE); + tt_int_op(r1.type, ==, DNS_IPv4_A); + tt_int_op(r1.count, ==, 1); + tt_int_op(((ev_uint32_t*)r1.addrs)[0], ==, htonl(0x10204080)); + +end: + if (dns) + evdns_base_free(dns, 0); + if (port) + evdns_close_server_port(port); +} +static void +dns_retry_test(void *arg) +{ + dns_retry_test_impl(arg, 0); +} +static void +dns_retry_disable_when_inactive_test(void *arg) +{ + dns_retry_test_impl(arg, EVDNS_BASE_DISABLE_WHEN_INACTIVE); +} + +static struct regress_dns_server_table internal_error_table[] = { + /* Error 4 (NOTIMPL) makes us reissue the request to another server + if we can. + + XXXX we should reissue under a much wider set of circumstances! + */ + { "foof.example.com", "err", "4", 0, 0 }, + { NULL, NULL, NULL, 0, 0 } +}; + +static struct regress_dns_server_table reissue_table[] = { + { "foof.example.com", "A", "240.15.240.15", 0, 0 }, + { NULL, NULL, NULL, 0, 0 } +}; + +static void +dns_reissue_test_impl(void *arg, int flags) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct evdns_server_port *port1 = NULL, *port2 = NULL; + struct evdns_base *dns = NULL; + struct generic_dns_callback_result r1; + ev_uint16_t portnum1 = 0, portnum2=0; + char buf1[64], buf2[64]; + + port1 = regress_get_dnsserver(base, &portnum1, NULL, + regress_dns_server_cb, internal_error_table); + tt_assert(port1); + port2 = regress_get_dnsserver(base, &portnum2, NULL, + regress_dns_server_cb, reissue_table); + tt_assert(port2); + evutil_snprintf(buf1, sizeof(buf1), "127.0.0.1:%d", (int)portnum1); + evutil_snprintf(buf2, sizeof(buf2), "127.0.0.1:%d", (int)portnum2); + + dns = evdns_base_new(base, flags); + tt_assert(!evdns_base_nameserver_ip_add(dns, buf1)); + tt_assert(! evdns_base_set_option(dns, "timeout:", "0.3")); + tt_assert(! evdns_base_set_option(dns, "max-timeouts:", "2")); + tt_assert(! evdns_base_set_option(dns, "attempts:", "5")); + + memset(&r1, 0, sizeof(r1)); + evdns_base_resolve_ipv4(dns, "foof.example.com", 0, + generic_dns_callback, &r1); + + /* Add this after, so that we are sure to get a reissue. */ + tt_assert(!evdns_base_nameserver_ip_add(dns, buf2)); + + n_replies_left = 1; + exit_base = base; + + event_base_dispatch(base); + tt_int_op(r1.result, ==, DNS_ERR_NONE); + tt_int_op(r1.type, ==, DNS_IPv4_A); + tt_int_op(r1.count, ==, 1); + tt_int_op(((ev_uint32_t*)r1.addrs)[0], ==, htonl(0xf00ff00f)); + + /* Make sure we dropped at least once. */ + tt_int_op(internal_error_table[0].seen, >, 0); + +end: + if (dns) + evdns_base_free(dns, 0); + if (port1) + evdns_close_server_port(port1); + if (port2) + evdns_close_server_port(port2); +} +static void +dns_reissue_test(void *arg) +{ + dns_reissue_test_impl(arg, 0); +} +static void +dns_reissue_disable_when_inactive_test(void *arg) +{ + dns_reissue_test_impl(arg, EVDNS_BASE_DISABLE_WHEN_INACTIVE); +} + +#if 0 +static void +dumb_bytes_fn(char *p, size_t n) +{ + unsigned i; + /* This gets us 6 bits of entropy per transaction ID, which means we + * will have probably have collisions and need to pick again. */ + for (i=0;ibase; + struct evdns_base *dns = NULL; + struct evdns_server_port *dns_port = NULL; + ev_uint16_t portnum = 0; + char buf[64]; + int disable_when_inactive = flags & EVDNS_BASE_DISABLE_WHEN_INACTIVE; + + struct generic_dns_callback_result r[20]; + int i; + + dns_port = regress_get_dnsserver(base, &portnum, NULL, + regress_dns_server_cb, reissue_table); + tt_assert(dns_port); + if (disable_when_inactive) { + exit_port = dns_port; + } + + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum); + + dns = evdns_base_new(base, flags); + tt_assert(!evdns_base_nameserver_ip_add(dns, buf)); + tt_assert(! evdns_base_set_option(dns, "max-inflight:", "3")); + tt_assert(! evdns_base_set_option(dns, "randomize-case:", "0")); + + for (i=0;i<20;++i) + evdns_base_resolve_ipv4(dns, "foof.example.com", 0, generic_dns_callback, &r[i]); + + n_replies_left = 20; + exit_base = base; + + event_base_dispatch(base); + + for (i=0;i<20;++i) { + tt_int_op(r[i].type, ==, DNS_IPv4_A); + tt_int_op(r[i].count, ==, 1); + tt_int_op(((ev_uint32_t*)r[i].addrs)[0], ==, htonl(0xf00ff00f)); + } + +end: + if (dns) + evdns_base_free(dns, 0); + if (exit_port) { + evdns_close_server_port(exit_port); + exit_port = NULL; + } else if (! disable_when_inactive) { + evdns_close_server_port(dns_port); + } +} + +static void +dns_inflight_test(void *arg) +{ + dns_inflight_test_impl(arg, 0); +} + +static void +dns_disable_when_inactive_test(void *arg) +{ + dns_inflight_test_impl(arg, EVDNS_BASE_DISABLE_WHEN_INACTIVE); +} + +static void +dns_disable_when_inactive_no_ns_test(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base, *inactive_base; + struct evdns_base *dns = NULL; + ev_uint16_t portnum = 0; + char buf[64]; + struct generic_dns_callback_result r; + + inactive_base = event_base_new(); + tt_assert(inactive_base); + + /** Create dns server with inactive base, to avoid replying to clients */ + tt_assert(regress_dnsserver(inactive_base, &portnum, search_table)); + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum); + + dns = evdns_base_new(base, EVDNS_BASE_DISABLE_WHEN_INACTIVE); + tt_assert(!evdns_base_nameserver_ip_add(dns, buf)); + tt_assert(! evdns_base_set_option(dns, "timeout:", "0.1")); + + evdns_base_resolve_ipv4(dns, "foof.example.com", 0, generic_dns_callback, &r); + n_replies_left = 1; + exit_base = base; + + event_base_dispatch(base); + + tt_int_op(n_replies_left, ==, 0); + + tt_int_op(r.result, ==, DNS_ERR_TIMEOUT); + tt_int_op(r.count, ==, 0); + tt_ptr_op(r.addrs, ==, NULL); + +end: + if (dns) + evdns_base_free(dns, 0); + regress_clean_dnsserver(); + if (inactive_base) + event_base_free(inactive_base); +} + +static void +dns_initialize_nameservers_test(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct evdns_base *dns = NULL; + + dns = evdns_base_new(base, 0); + tt_assert(dns); + tt_int_op(evdns_base_get_nameserver_addr(dns, 0, NULL, 0), ==, -1); + evdns_base_free(dns, 0); + + dns = evdns_base_new(base, EVDNS_BASE_INITIALIZE_NAMESERVERS); + tt_assert(dns); + tt_int_op(evdns_base_get_nameserver_addr(dns, 0, NULL, 0), ==, sizeof(struct sockaddr)); + +end: + if (dns) + evdns_base_free(dns, 0); +} +#ifndef _WIN32 +#define RESOLV_FILE "empty-resolv.conf" +static void +dns_nameservers_no_default_test(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct evdns_base *dns = NULL; + int ok = access(RESOLV_FILE, R_OK); + + tt_assert(ok); + + dns = evdns_base_new(base, 0); + tt_assert(dns); + tt_int_op(evdns_base_get_nameserver_addr(dns, 0, NULL, 0), ==, -1); + + /* We cannot test + * EVDNS_BASE_INITIALIZE_NAMESERVERS|EVDNS_BASE_NAMESERVERS_NO_DEFAULT + * because we cannot mock "/etc/resolv.conf" (yet). */ + + evdns_base_resolv_conf_parse(dns, + DNS_OPTIONS_ALL|DNS_OPTION_NAMESERVERS_NO_DEFAULT, RESOLV_FILE); + tt_int_op(evdns_base_get_nameserver_addr(dns, 0, NULL, 0), ==, -1); + + evdns_base_resolv_conf_parse(dns, DNS_OPTIONS_ALL, RESOLV_FILE); + tt_int_op(evdns_base_get_nameserver_addr(dns, 0, NULL, 0), ==, sizeof(struct sockaddr)); + +end: + if (dns) + evdns_base_free(dns, 0); +} +#endif + +/* === Test for bufferevent_socket_connect_hostname */ + +static int total_connected_or_failed = 0; +static int total_n_accepted = 0; +static struct event_base *be_connect_hostname_base = NULL; + +/* Implements a DNS server for the connect_hostname test and the + * getaddrinfo_async test */ +static void +be_getaddrinfo_server_cb(struct evdns_server_request *req, void *data) +{ + int i; + int *n_got_p=data; + int added_any=0; + ++*n_got_p; + + for (i = 0; i < req->nquestions; ++i) { + const int qtype = req->questions[i]->type; + const int qclass = req->questions[i]->dns_question_class; + const char *qname = req->questions[i]->name; + struct in_addr ans; + struct in6_addr ans6; + memset(&ans6, 0, sizeof(ans6)); + + TT_BLATHER(("Got question about %s, type=%d", qname, qtype)); + + if (qtype == EVDNS_TYPE_A && + qclass == EVDNS_CLASS_INET && + !evutil_ascii_strcasecmp(qname, "nobodaddy.example.com")) { + ans.s_addr = htonl(0x7f000001); + evdns_server_request_add_a_reply(req, qname, + 1, &ans.s_addr, 2000); + added_any = 1; + } else if (!evutil_ascii_strcasecmp(qname, + "nosuchplace.example.com")) { + /* ok, just say notfound. */ + } else if (!evutil_ascii_strcasecmp(qname, + "both.example.com")) { + if (qtype == EVDNS_TYPE_A) { + ans.s_addr = htonl(0x50502020); + evdns_server_request_add_a_reply(req, qname, + 1, &ans.s_addr, 2000); + added_any = 1; + } else if (qtype == EVDNS_TYPE_AAAA) { + ans6.s6_addr[0] = 0x80; + ans6.s6_addr[1] = 0xff; + ans6.s6_addr[14] = 0xbb; + ans6.s6_addr[15] = 0xbb; + evdns_server_request_add_aaaa_reply(req, qname, + 1, &ans6.s6_addr, 2000); + added_any = 1; + } + evdns_server_request_add_cname_reply(req, qname, + "both-canonical.example.com", 1000); + } else if (!evutil_ascii_strcasecmp(qname, + "v4only.example.com") || + !evutil_ascii_strcasecmp(qname, "v4assert.example.com")) { + if (qtype == EVDNS_TYPE_A) { + ans.s_addr = htonl(0x12345678); + evdns_server_request_add_a_reply(req, qname, + 1, &ans.s_addr, 2000); + added_any = 1; + } else if (!evutil_ascii_strcasecmp(qname, + "v4assert.example.com")) { + TT_FAIL(("Got an AAAA request for v4assert")); + } + } else if (!evutil_ascii_strcasecmp(qname, + "v6only.example.com") || + !evutil_ascii_strcasecmp(qname, "v6assert.example.com")) { + if (qtype == EVDNS_TYPE_AAAA) { + ans6.s6_addr[0] = 0x0b; + ans6.s6_addr[1] = 0x0b; + ans6.s6_addr[14] = 0xf0; + ans6.s6_addr[15] = 0x0d; + evdns_server_request_add_aaaa_reply(req, qname, + 1, &ans6.s6_addr, 2000); + added_any = 1; + } else if (!evutil_ascii_strcasecmp(qname, + "v6assert.example.com")) { + TT_FAIL(("Got a A request for v6assert")); + } + } else if (!evutil_ascii_strcasecmp(qname, + "v6timeout.example.com")) { + if (qtype == EVDNS_TYPE_A) { + ans.s_addr = htonl(0xabcdef01); + evdns_server_request_add_a_reply(req, qname, + 1, &ans.s_addr, 2000); + added_any = 1; + } else if (qtype == EVDNS_TYPE_AAAA) { + /* Let the v6 request time out.*/ + evdns_server_request_drop(req); + return; + } + } else if (!evutil_ascii_strcasecmp(qname, + "v4timeout.example.com")) { + if (qtype == EVDNS_TYPE_AAAA) { + ans6.s6_addr[0] = 0x0a; + ans6.s6_addr[1] = 0x0a; + ans6.s6_addr[14] = 0xff; + ans6.s6_addr[15] = 0x01; + evdns_server_request_add_aaaa_reply(req, qname, + 1, &ans6.s6_addr, 2000); + added_any = 1; + } else if (qtype == EVDNS_TYPE_A) { + /* Let the v4 request time out.*/ + evdns_server_request_drop(req); + return; + } + } else if (!evutil_ascii_strcasecmp(qname, + "v6timeout-nonexist.example.com")) { + if (qtype == EVDNS_TYPE_A) { + /* Fall through, give an nexist. */ + } else if (qtype == EVDNS_TYPE_AAAA) { + /* Let the v6 request time out.*/ + evdns_server_request_drop(req); + return; + } + } else if (!evutil_ascii_strcasecmp(qname, + "all-timeout.example.com")) { + /* drop all requests */ + evdns_server_request_drop(req); + return; + } else { + TT_GRIPE(("Got weird request for %s",qname)); + } + } + if (added_any) { + TT_BLATHER(("answering")); + evdns_server_request_respond(req, 0); + } else { + TT_BLATHER(("saying nexist.")); + evdns_server_request_respond(req, 3); + } +} + +/* Implements a listener for connect_hostname test. */ +static void +nil_accept_cb(struct evconnlistener *l, evutil_socket_t fd, struct sockaddr *s, + int socklen, void *arg) +{ + int *p = arg; + (*p)++; + ++total_n_accepted; + /* don't do anything with the socket; let it close when we exit() */ + if (total_n_accepted >= 3 && total_connected_or_failed >= 5) + event_base_loopexit(be_connect_hostname_base, + NULL); +} + +struct be_conn_hostname_result { + int dnserr; + int what; +}; + +/* Bufferevent event callback for the connect_hostname test: remembers what + * event we got. */ +static void +be_connect_hostname_event_cb(struct bufferevent *bev, short what, void *ctx) +{ + struct be_conn_hostname_result *got = ctx; + + if (got->what) { + TT_FAIL(("Two events on one bufferevent. %d,%d", + got->what, (int)what)); + } + + TT_BLATHER(("Got a bufferevent event %d", what)); + got->what = what; + + if ((what & BEV_EVENT_CONNECTED) || (what & BEV_EVENT_ERROR)) { + int expected = 3; + int r = bufferevent_socket_get_dns_error(bev); + + if (r) { + got->dnserr = r; + TT_BLATHER(("DNS error %d: %s", r, + evutil_gai_strerror(r))); + } + ++total_connected_or_failed; + TT_BLATHER(("Got %d connections or errors.", total_connected_or_failed)); + + /** emfile test */ + if (errno == EMFILE) { + expected = 0; + } + + if (total_n_accepted >= expected && total_connected_or_failed >= 5) + event_base_loopexit(be_connect_hostname_base, + NULL); + } +} + +static void +test_bufferevent_connect_hostname(void *arg) +{ + struct basic_test_data *data = arg; + struct evconnlistener *listener = NULL; + struct bufferevent *be[5]; + struct be_conn_hostname_result be_outcome[ARRAY_SIZE(be)]; + int expect_err; + struct evdns_base *dns=NULL; + struct evdns_server_port *port=NULL; + struct sockaddr_in sin; + int listener_port=-1; + ev_uint16_t dns_port=0; + int n_accept=0, n_dns=0; + char buf[128]; + int emfile = data->setup_data && !strcmp(data->setup_data, "emfile"); + int success = BEV_EVENT_CONNECTED; + int default_error = 0; + unsigned i; + int ret; + + if (emfile) { + success = BEV_EVENT_ERROR; +#if defined(__linux__) + /* on linux glibc/musl reports EAI_SYSTEM, when getaddrinfo() cannot + * open file for resolving service. */ + default_error = EVUTIL_EAI_SYSTEM; +#elif defined(__sun__) + /* on solaris it returns EAI_FAIL */ + default_error = EVUTIL_EAI_FAIL; + /** the DP_POLL can also fail with EINVAL under EMFILE */ +#else + /* on osx/freebsd it returns EAI_NONAME */ + default_error = EVUTIL_EAI_NONAME; +#endif + } + + be_connect_hostname_base = data->base; + + /* Bind an address and figure out what port it's on. */ + memset(&sin, 0, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = htonl(0x7f000001); /* 127.0.0.1 */ + sin.sin_port = 0; + listener = evconnlistener_new_bind(data->base, nil_accept_cb, + &n_accept, + LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_EXEC, + -1, (struct sockaddr *)&sin, sizeof(sin)); + tt_assert(listener); + listener_port = regress_get_socket_port( + evconnlistener_get_fd(listener)); + + port = regress_get_dnsserver(data->base, &dns_port, NULL, + be_getaddrinfo_server_cb, &n_dns); + tt_assert(port); + tt_int_op(dns_port, >=, 0); + + /* Start an evdns_base that uses the server as its resolver. */ + dns = evdns_base_new(data->base, 0); + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)dns_port); + evdns_base_nameserver_ip_add(dns, buf); + +#ifdef EVENT__HAVE_SETRLIMIT + if (emfile) { + int fd = socket(AF_INET, SOCK_STREAM, 0); + struct rlimit file = { fd, fd }; + + tt_int_op(fd, >=, 0); + tt_assert(!close(fd)); + + tt_assert(!setrlimit(RLIMIT_NOFILE, &file)); + } +#endif + + /* Now, finally, at long last, launch the bufferevents. One should do + * a failing lookup IP, one should do a successful lookup by IP, + * and one should do a successful lookup by hostname. */ + for (i = 0; i < ARRAY_SIZE(be); ++i) { + memset(&be_outcome[i], 0, sizeof(be_outcome[i])); + be[i] = bufferevent_socket_new(data->base, -1, BEV_OPT_CLOSE_ON_FREE); + bufferevent_setcb(be[i], NULL, NULL, be_connect_hostname_event_cb, + &be_outcome[i]); + } + + /* Use the blocking resolver. This one will fail if your resolver + * can't resolve localhost to 127.0.0.1 */ + tt_assert(!bufferevent_socket_connect_hostname(be[3], NULL, AF_INET, + "localhost", listener_port)); + /* Use the blocking resolver with a nonexistent hostname. */ + tt_assert(!bufferevent_socket_connect_hostname(be[4], NULL, AF_INET, + "nonesuch.nowhere.example.com", 80)); + { + /* The blocking resolver will use the system nameserver, which + * might tell us anything. (Yes, some twits even pretend that + * example.com is real.) Let's see what answer to expect. */ + struct evutil_addrinfo hints, *ai = NULL; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_INET; + hints.ai_socktype = SOCK_STREAM; + hints.ai_protocol = IPPROTO_TCP; + expect_err = evutil_getaddrinfo( + "nonesuch.nowhere.example.com", "80", &hints, &ai); + } + /* Launch an async resolve that will fail. */ + tt_assert(!bufferevent_socket_connect_hostname(be[0], dns, AF_INET, + "nosuchplace.example.com", listener_port)); + /* Connect to the IP without resolving. */ + tt_assert(!bufferevent_socket_connect_hostname(be[1], dns, AF_INET, + "127.0.0.1", listener_port)); + /* Launch an async resolve that will succeed. */ + tt_assert(!bufferevent_socket_connect_hostname(be[2], dns, AF_INET, + "nobodaddy.example.com", listener_port)); + + ret = event_base_dispatch(data->base); +#ifdef __sun__ + if (emfile && !strcmp(event_base_get_method(data->base), "devpoll")) { + tt_int_op(ret, ==, -1); + /** DP_POLL failed */ + tt_skip(); + } else +#endif + { + tt_int_op(ret, ==, 0); + } + + tt_int_op(be_outcome[0].what, ==, BEV_EVENT_ERROR); + tt_int_op(be_outcome[0].dnserr, ==, EVUTIL_EAI_NONAME); + tt_int_op(be_outcome[1].what, ==, success); + tt_int_op(be_outcome[1].dnserr, ==, 0); + tt_int_op(be_outcome[2].what, ==, success); + tt_int_op(be_outcome[2].dnserr, ==, 0); + tt_int_op(be_outcome[3].what, ==, success); + tt_int_op(be_outcome[3].dnserr, ==, default_error); + if (expect_err) { + tt_int_op(be_outcome[4].what, ==, BEV_EVENT_ERROR); + tt_int_op(be_outcome[4].dnserr, ==, expect_err); + } + + if (emfile) { + tt_int_op(n_accept, ==, 0); + } else { + tt_int_op(n_accept, ==, 3); + } + tt_int_op(n_dns, ==, 2); + +end: + if (listener) + evconnlistener_free(listener); + if (port) + evdns_close_server_port(port); + if (dns) + evdns_base_free(dns, 0); + for (i = 0; i < ARRAY_SIZE(be); ++i) { + if (be[i]) + bufferevent_free(be[i]); + } +} + + +struct gai_outcome { + int err; + struct evutil_addrinfo *ai; +}; + +static int n_gai_results_pending = 0; +static struct event_base *exit_base_on_no_pending_results = NULL; + +static void +gai_cb(int err, struct evutil_addrinfo *res, void *ptr) +{ + struct gai_outcome *go = ptr; + go->err = err; + go->ai = res; + if (--n_gai_results_pending <= 0 && exit_base_on_no_pending_results) + event_base_loopexit(exit_base_on_no_pending_results, NULL); + if (n_gai_results_pending < 900) + TT_BLATHER(("Got an answer; expecting %d more.", + n_gai_results_pending)); +} + +static void +cancel_gai_cb(evutil_socket_t fd, short what, void *ptr) +{ + struct evdns_getaddrinfo_request *r = ptr; + evdns_getaddrinfo_cancel(r); +} + +static void +test_getaddrinfo_async(void *arg) +{ + struct basic_test_data *data = arg; + struct evutil_addrinfo hints, *a; + struct gai_outcome local_outcome; + struct gai_outcome a_out[12]; + unsigned i; + struct evdns_getaddrinfo_request *r; + char buf[128]; + struct evdns_server_port *port = NULL; + ev_uint16_t dns_port = 0; + int n_dns_questions = 0; + struct evdns_base *dns_base; + + memset(a_out, 0, sizeof(a_out)); + memset(&local_outcome, 0, sizeof(local_outcome)); + + dns_base = evdns_base_new(data->base, 0); + tt_assert(dns_base); + + /* for localhost */ + evdns_base_load_hosts(dns_base, NULL); + + tt_assert(! evdns_base_set_option(dns_base, "timeout", "0.3")); + tt_assert(! evdns_base_set_option(dns_base, "getaddrinfo-allow-skew", "0.2")); + + n_gai_results_pending = 10000; /* don't think about exiting yet. */ + + /* 1. Try some cases that will never hit the asynchronous resolver. */ + /* 1a. Simple case with a symbolic service name */ + memset(&hints, 0, sizeof(hints)); + hints.ai_family = PF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + memset(&local_outcome, 0, sizeof(local_outcome)); + r = evdns_getaddrinfo(dns_base, "1.2.3.4", "http", + &hints, gai_cb, &local_outcome); + tt_assert(! r); + if (!local_outcome.err) { + tt_ptr_op(local_outcome.ai,!=,NULL); + test_ai_eq(local_outcome.ai, "1.2.3.4:80", SOCK_STREAM, IPPROTO_TCP); + evutil_freeaddrinfo(local_outcome.ai); + local_outcome.ai = NULL; + } else { + TT_BLATHER(("Apparently we have no getservbyname.")); + } + + /* 1b. EVUTIL_AI_NUMERICHOST is set */ + memset(&hints, 0, sizeof(hints)); + hints.ai_family = PF_UNSPEC; + hints.ai_flags = EVUTIL_AI_NUMERICHOST; + memset(&local_outcome, 0, sizeof(local_outcome)); + r = evdns_getaddrinfo(dns_base, "www.google.com", "80", + &hints, gai_cb, &local_outcome); + tt_ptr_op(r,==,NULL); + tt_int_op(local_outcome.err,==,EVUTIL_EAI_NONAME); + tt_ptr_op(local_outcome.ai,==,NULL); + + /* 1c. We give a numeric address (ipv6) */ + memset(&hints, 0, sizeof(hints)); + memset(&local_outcome, 0, sizeof(local_outcome)); + hints.ai_family = PF_UNSPEC; + hints.ai_protocol = IPPROTO_TCP; + r = evdns_getaddrinfo(dns_base, "f::f", "8008", + &hints, gai_cb, &local_outcome); + tt_assert(!r); + tt_int_op(local_outcome.err,==,0); + tt_assert(local_outcome.ai); + tt_ptr_op(local_outcome.ai->ai_next,==,NULL); + test_ai_eq(local_outcome.ai, "[f::f]:8008", SOCK_STREAM, IPPROTO_TCP); + evutil_freeaddrinfo(local_outcome.ai); + local_outcome.ai = NULL; + + /* 1d. We give a numeric address (ipv4) */ + memset(&hints, 0, sizeof(hints)); + memset(&local_outcome, 0, sizeof(local_outcome)); + hints.ai_family = PF_UNSPEC; + r = evdns_getaddrinfo(dns_base, "5.6.7.8", NULL, + &hints, gai_cb, &local_outcome); + tt_assert(!r); + tt_int_op(local_outcome.err,==,0); + tt_assert(local_outcome.ai); + a = ai_find_by_protocol(local_outcome.ai, IPPROTO_TCP); + tt_assert(a); + test_ai_eq(a, "5.6.7.8", SOCK_STREAM, IPPROTO_TCP); + a = ai_find_by_protocol(local_outcome.ai, IPPROTO_UDP); + tt_assert(a); + test_ai_eq(a, "5.6.7.8", SOCK_DGRAM, IPPROTO_UDP); + evutil_freeaddrinfo(local_outcome.ai); + local_outcome.ai = NULL; + + /* 1e. nodename is NULL (bind) */ + memset(&hints, 0, sizeof(hints)); + memset(&local_outcome, 0, sizeof(local_outcome)); + hints.ai_family = PF_UNSPEC; + hints.ai_socktype = SOCK_DGRAM; + hints.ai_flags = EVUTIL_AI_PASSIVE; + r = evdns_getaddrinfo(dns_base, NULL, "9090", + &hints, gai_cb, &local_outcome); + tt_assert(!r); + tt_int_op(local_outcome.err,==,0); + tt_assert(local_outcome.ai); + /* we should get a v4 address of 0.0.0.0... */ + a = ai_find_by_family(local_outcome.ai, PF_INET); + tt_assert(a); + test_ai_eq(a, "0.0.0.0:9090", SOCK_DGRAM, IPPROTO_UDP); + /* ... and a v6 address of ::0 */ + a = ai_find_by_family(local_outcome.ai, PF_INET6); + tt_assert(a); + test_ai_eq(a, "[::]:9090", SOCK_DGRAM, IPPROTO_UDP); + evutil_freeaddrinfo(local_outcome.ai); + local_outcome.ai = NULL; + + /* 1f. nodename is NULL (connect) */ + memset(&hints, 0, sizeof(hints)); + memset(&local_outcome, 0, sizeof(local_outcome)); + hints.ai_family = PF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + r = evdns_getaddrinfo(dns_base, NULL, "2", + &hints, gai_cb, &local_outcome); + tt_assert(!r); + tt_int_op(local_outcome.err,==,0); + tt_assert(local_outcome.ai); + /* we should get a v4 address of 127.0.0.1 .... */ + a = ai_find_by_family(local_outcome.ai, PF_INET); + tt_assert(a); + test_ai_eq(a, "127.0.0.1:2", SOCK_STREAM, IPPROTO_TCP); + /* ... and a v6 address of ::1 */ + a = ai_find_by_family(local_outcome.ai, PF_INET6); + tt_assert(a); + test_ai_eq(a, "[::1]:2", SOCK_STREAM, IPPROTO_TCP); + evutil_freeaddrinfo(local_outcome.ai); + local_outcome.ai = NULL; + + /* 1g. We find localhost immediately. (pf_unspec) */ + memset(&hints, 0, sizeof(hints)); + memset(&local_outcome, 0, sizeof(local_outcome)); + hints.ai_family = PF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + r = evdns_getaddrinfo(dns_base, "LOCALHOST", "80", + &hints, gai_cb, &local_outcome); + tt_assert(!r); + tt_int_op(local_outcome.err,==,0); + tt_assert(local_outcome.ai); + /* we should get a v4 address of 127.0.0.1 .... */ + a = ai_find_by_family(local_outcome.ai, PF_INET); + tt_assert(a); + test_ai_eq(a, "127.0.0.1:80", SOCK_STREAM, IPPROTO_TCP); + /* ... and a v6 address of ::1 */ + a = ai_find_by_family(local_outcome.ai, PF_INET6); + tt_assert(a); + test_ai_eq(a, "[::1]:80", SOCK_STREAM, IPPROTO_TCP); + evutil_freeaddrinfo(local_outcome.ai); + local_outcome.ai = NULL; + + /* 1g. We find localhost immediately. (pf_inet6) */ + memset(&hints, 0, sizeof(hints)); + memset(&local_outcome, 0, sizeof(local_outcome)); + hints.ai_family = PF_INET6; + hints.ai_socktype = SOCK_STREAM; + r = evdns_getaddrinfo(dns_base, "LOCALHOST", "9999", + &hints, gai_cb, &local_outcome); + tt_assert(! r); + tt_int_op(local_outcome.err,==,0); + tt_assert(local_outcome.ai); + a = local_outcome.ai; + test_ai_eq(a, "[::1]:9999", SOCK_STREAM, IPPROTO_TCP); + tt_ptr_op(a->ai_next, ==, NULL); + evutil_freeaddrinfo(local_outcome.ai); + local_outcome.ai = NULL; + + /* 2. Okay, now we can actually test the asynchronous resolver. */ + /* Start a dummy local dns server... */ + port = regress_get_dnsserver(data->base, &dns_port, NULL, + be_getaddrinfo_server_cb, &n_dns_questions); + tt_assert(port); + tt_int_op(dns_port, >=, 0); + /* ... and tell the evdns_base about it. */ + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", dns_port); + evdns_base_nameserver_ip_add(dns_base, buf); + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = PF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + hints.ai_flags = EVUTIL_AI_CANONNAME; + /* 0: Request for both.example.com should return both addresses. */ + r = evdns_getaddrinfo(dns_base, "both.example.com", "8000", + &hints, gai_cb, &a_out[0]); + tt_assert(r); + + /* 1: Request for v4only.example.com should return one address. */ + r = evdns_getaddrinfo(dns_base, "v4only.example.com", "8001", + &hints, gai_cb, &a_out[1]); + tt_assert(r); + + /* 2: Request for v6only.example.com should return one address. */ + hints.ai_flags = 0; + r = evdns_getaddrinfo(dns_base, "v6only.example.com", "8002", + &hints, gai_cb, &a_out[2]); + tt_assert(r); + + /* 3: PF_INET request for v4assert.example.com should not generate a + * v6 request. The server will fail the test if it does. */ + hints.ai_family = PF_INET; + r = evdns_getaddrinfo(dns_base, "v4assert.example.com", "8003", + &hints, gai_cb, &a_out[3]); + tt_assert(r); + + /* 4: PF_INET6 request for v6assert.example.com should not generate a + * v4 request. The server will fail the test if it does. */ + hints.ai_family = PF_INET6; + r = evdns_getaddrinfo(dns_base, "v6assert.example.com", "8004", + &hints, gai_cb, &a_out[4]); + tt_assert(r); + + /* 5: PF_INET request for nosuchplace.example.com should give NEXIST. */ + hints.ai_family = PF_INET; + r = evdns_getaddrinfo(dns_base, "nosuchplace.example.com", "8005", + &hints, gai_cb, &a_out[5]); + tt_assert(r); + + /* 6: PF_UNSPEC request for nosuchplace.example.com should give NEXIST. + */ + hints.ai_family = PF_UNSPEC; + r = evdns_getaddrinfo(dns_base, "nosuchplace.example.com", "8006", + &hints, gai_cb, &a_out[6]); + tt_assert(r); + + /* 7: PF_UNSPEC request for v6timeout.example.com should give an ipv4 + * address only. */ + hints.ai_family = PF_UNSPEC; + r = evdns_getaddrinfo(dns_base, "v6timeout.example.com", "8007", + &hints, gai_cb, &a_out[7]); + tt_assert(r); + + /* 8: PF_UNSPEC request for v6timeout-nonexist.example.com should give + * a NEXIST */ + hints.ai_family = PF_UNSPEC; + r = evdns_getaddrinfo(dns_base, "v6timeout-nonexist.example.com", + "8008", &hints, gai_cb, &a_out[8]); + tt_assert(r); + + /* 9: AI_ADDRCONFIG should at least not crash. Can't test it more + * without knowing what kind of internet we have. */ + hints.ai_flags |= EVUTIL_AI_ADDRCONFIG; + r = evdns_getaddrinfo(dns_base, "both.example.com", + "8009", &hints, gai_cb, &a_out[9]); + tt_assert(r); + + /* 10: PF_UNSPEC for v4timeout.example.com should give an ipv6 address + * only. */ + hints.ai_family = PF_UNSPEC; + hints.ai_flags = 0; + r = evdns_getaddrinfo(dns_base, "v4timeout.example.com", "8010", + &hints, gai_cb, &a_out[10]); + tt_assert(r); + + /* 11: timeout.example.com: cancel it after 100 msec. */ + r = evdns_getaddrinfo(dns_base, "all-timeout.example.com", "8011", + &hints, gai_cb, &a_out[11]); + tt_assert(r); + { + struct timeval tv; + tv.tv_sec = 0; + tv.tv_usec = 100*1000; /* 100 msec */ + event_base_once(data->base, -1, EV_TIMEOUT, cancel_gai_cb, + r, &tv); + } + + /* XXXXX There are more tests we could do, including: + + - A test to elicit NODATA. + + */ + + n_gai_results_pending = 12; + exit_base_on_no_pending_results = data->base; + + event_base_dispatch(data->base); + + /* 0: both.example.com */ + tt_int_op(a_out[0].err, ==, 0); + tt_assert(a_out[0].ai); + tt_assert(a_out[0].ai->ai_next); + tt_assert(!a_out[0].ai->ai_next->ai_next); + a = ai_find_by_family(a_out[0].ai, PF_INET); + tt_assert(a); + test_ai_eq(a, "80.80.32.32:8000", SOCK_STREAM, IPPROTO_TCP); + a = ai_find_by_family(a_out[0].ai, PF_INET6); + tt_assert(a); + test_ai_eq(a, "[80ff::bbbb]:8000", SOCK_STREAM, IPPROTO_TCP); + tt_assert(a_out[0].ai->ai_canonname); + tt_str_op(a_out[0].ai->ai_canonname, ==, "both-canonical.example.com"); + + /* 1: v4only.example.com */ + tt_int_op(a_out[1].err, ==, 0); + tt_assert(a_out[1].ai); + tt_assert(! a_out[1].ai->ai_next); + test_ai_eq(a_out[1].ai, "18.52.86.120:8001", SOCK_STREAM, IPPROTO_TCP); + tt_assert(a_out[1].ai->ai_canonname == NULL); + + + /* 2: v6only.example.com */ + tt_int_op(a_out[2].err, ==, 0); + tt_assert(a_out[2].ai); + tt_assert(! a_out[2].ai->ai_next); + test_ai_eq(a_out[2].ai, "[b0b::f00d]:8002", SOCK_STREAM, IPPROTO_TCP); + + /* 3: v4assert.example.com */ + tt_int_op(a_out[3].err, ==, 0); + tt_assert(a_out[3].ai); + tt_assert(! a_out[3].ai->ai_next); + test_ai_eq(a_out[3].ai, "18.52.86.120:8003", SOCK_STREAM, IPPROTO_TCP); + + /* 4: v6assert.example.com */ + tt_int_op(a_out[4].err, ==, 0); + tt_assert(a_out[4].ai); + tt_assert(! a_out[4].ai->ai_next); + test_ai_eq(a_out[4].ai, "[b0b::f00d]:8004", SOCK_STREAM, IPPROTO_TCP); + + /* 5: nosuchplace.example.com (inet) */ + tt_int_op(a_out[5].err, ==, EVUTIL_EAI_NONAME); + tt_assert(! a_out[5].ai); + + /* 6: nosuchplace.example.com (unspec) */ + tt_int_op(a_out[6].err, ==, EVUTIL_EAI_NONAME); + tt_assert(! a_out[6].ai); + + /* 7: v6timeout.example.com */ + tt_int_op(a_out[7].err, ==, 0); + tt_assert(a_out[7].ai); + tt_assert(! a_out[7].ai->ai_next); + test_ai_eq(a_out[7].ai, "171.205.239.1:8007", SOCK_STREAM, IPPROTO_TCP); + + /* 8: v6timeout-nonexist.example.com */ + tt_int_op(a_out[8].err, ==, EVUTIL_EAI_NONAME); + tt_assert(! a_out[8].ai); + + /* 9: both (ADDRCONFIG) */ + tt_int_op(a_out[9].err, ==, 0); + tt_assert(a_out[9].ai); + a = ai_find_by_family(a_out[9].ai, PF_INET); + if (a) + test_ai_eq(a, "80.80.32.32:8009", SOCK_STREAM, IPPROTO_TCP); + else + tt_assert(ai_find_by_family(a_out[9].ai, PF_INET6)); + a = ai_find_by_family(a_out[9].ai, PF_INET6); + if (a) + test_ai_eq(a, "[80ff::bbbb]:8009", SOCK_STREAM, IPPROTO_TCP); + else + tt_assert(ai_find_by_family(a_out[9].ai, PF_INET)); + + /* 10: v4timeout.example.com */ + tt_int_op(a_out[10].err, ==, 0); + tt_assert(a_out[10].ai); + tt_assert(! a_out[10].ai->ai_next); + test_ai_eq(a_out[10].ai, "[a0a::ff01]:8010", SOCK_STREAM, IPPROTO_TCP); + + /* 11: cancelled request. */ + tt_int_op(a_out[11].err, ==, EVUTIL_EAI_CANCEL); + tt_assert(a_out[11].ai == NULL); + +end: + if (local_outcome.ai) + evutil_freeaddrinfo(local_outcome.ai); + for (i = 0; i < ARRAY_SIZE(a_out); ++i) { + if (a_out[i].ai) + evutil_freeaddrinfo(a_out[i].ai); + } + if (port) + evdns_close_server_port(port); + if (dns_base) + evdns_base_free(dns_base, 0); +} + +struct gaic_request_status { + int magic; + struct event_base *base; + struct evdns_base *dns_base; + struct evdns_getaddrinfo_request *request; + struct event cancel_event; + int canceled; +}; + +#define GAIC_MAGIC 0x1234abcd + +static int pending = 0; + +static void +gaic_cancel_request_cb(evutil_socket_t fd, short what, void *arg) +{ + struct gaic_request_status *status = arg; + + tt_assert(status->magic == GAIC_MAGIC); + status->canceled = 1; + evdns_getaddrinfo_cancel(status->request); + return; +end: + event_base_loopexit(status->base, NULL); +} + +static void +gaic_server_cb(struct evdns_server_request *req, void *arg) +{ + ev_uint32_t answer = 0x7f000001; + tt_assert(req->nquestions); + evdns_server_request_add_a_reply(req, req->questions[0]->name, 1, + &answer, 100); + evdns_server_request_respond(req, 0); + return; +end: + evdns_server_request_respond(req, DNS_ERR_REFUSED); +} + + +static void +gaic_getaddrinfo_cb(int result, struct evutil_addrinfo *res, void *arg) +{ + struct gaic_request_status *status = arg; + struct event_base *base = status->base; + tt_assert(status->magic == GAIC_MAGIC); + + if (result == EVUTIL_EAI_CANCEL) { + tt_assert(status->canceled); + } + event_del(&status->cancel_event); + + memset(status, 0xf0, sizeof(*status)); + free(status); + +end: + if (--pending <= 0) + event_base_loopexit(base, NULL); +} + +static void +gaic_launch(struct event_base *base, struct evdns_base *dns_base) +{ + struct gaic_request_status *status = calloc(1,sizeof(*status)); + struct timeval tv = { 0, 10000 }; + status->magic = GAIC_MAGIC; + status->base = base; + status->dns_base = dns_base; + event_assign(&status->cancel_event, base, -1, 0, gaic_cancel_request_cb, + status); + status->request = evdns_getaddrinfo(dns_base, + "foobar.bazquux.example.com", "80", NULL, gaic_getaddrinfo_cb, + status); + event_add(&status->cancel_event, &tv); + ++pending; +} + +#ifdef EVENT_SET_MEM_FUNCTIONS_IMPLEMENTED +/* FIXME: We should move this to regress_main.c if anything else needs it.*/ + +/* Trivial replacements for malloc/free/realloc to check for memory leaks. + * Not threadsafe. */ +static int allocated_chunks = 0; + +static void * +cnt_malloc(size_t sz) +{ + allocated_chunks += 1; + return malloc(sz); +} + +static void * +cnt_realloc(void *old, size_t sz) +{ + if (!old) + allocated_chunks += 1; + if (!sz) + allocated_chunks -= 1; + return realloc(old, sz); +} + +static void +cnt_free(void *ptr) +{ + allocated_chunks -= 1; + free(ptr); +} + +struct testleak_env_t { + struct event_base *base; + struct evdns_base *dns_base; + struct evdns_request *req; + struct generic_dns_callback_result r; +}; + +static void * +testleak_setup(const struct testcase_t *testcase) +{ + struct testleak_env_t *env; + + allocated_chunks = 0; + + /* Reset allocation counter, to start allocations from the very beginning. + * (this will avoid false-positive negative numbers for allocated_chunks) + */ + libevent_global_shutdown(); + + event_set_mem_functions(cnt_malloc, cnt_realloc, cnt_free); + + event_enable_debug_mode(); + + /* not mm_calloc: we don't want to mess with the count. */ + env = calloc(1, sizeof(struct testleak_env_t)); + env->base = event_base_new(); + env->dns_base = evdns_base_new(env->base, 0); + env->req = evdns_base_resolve_ipv4( + env->dns_base, "example.com", DNS_QUERY_NO_SEARCH, + generic_dns_callback, &env->r); + return env; +} + +static int +testleak_cleanup(const struct testcase_t *testcase, void *env_) +{ + int ok = 0; + struct testleak_env_t *env = env_; + tt_assert(env); +#ifdef EVENT__DISABLE_DEBUG_MODE + tt_int_op(allocated_chunks, ==, 0); +#else + libevent_global_shutdown(); + tt_int_op(allocated_chunks, ==, 0); +#endif + ok = 1; +end: + if (env) { + if (env->dns_base) + evdns_base_free(env->dns_base, 0); + if (env->base) + event_base_free(env->base); + free(env); + } + return ok; +} + +static struct testcase_setup_t testleak_funcs = { + testleak_setup, testleak_cleanup +}; + +static void +test_dbg_leak_cancel(void *env_) +{ + /* cancel, loop, free/dns, free/base */ + struct testleak_env_t *env = env_; + int send_err_shutdown = 1; + evdns_cancel_request(env->dns_base, env->req); + env->req = 0; + + /* `req` is freed in callback, that's why one loop is required. */ + event_base_loop(env->base, EVLOOP_NONBLOCK); + + /* send_err_shutdown means nothing as soon as our request is + * already canceled */ + evdns_base_free(env->dns_base, send_err_shutdown); + env->dns_base = 0; + event_base_free(env->base); + env->base = 0; +} + +static void +dbg_leak_resume(void *env_, int cancel, int send_err_shutdown) +{ + /* cancel, loop, free/dns, free/base */ + struct testleak_env_t *env = env_; + if (cancel) { + evdns_cancel_request(env->dns_base, env->req); + tt_assert(!evdns_base_resume(env->dns_base)); + } else { + /* TODO: No nameservers, request can't be processed, must be errored */ + tt_assert(!evdns_base_resume(env->dns_base)); + } + + event_base_loop(env->base, EVLOOP_NONBLOCK); + /** + * Because we don't cancel request, and want our callback to recieve + * DNS_ERR_SHUTDOWN, we use deferred callback, and there was: + * - one extra malloc(), + * @see reply_schedule_callback() + * - and one missing free + * @see request_finished() (req->handle->pending_cb = 1) + * than we don't need to count in testleak_cleanup(), but we can clean them + * if we will run loop once again, but *after* evdns base freed. + */ + evdns_base_free(env->dns_base, send_err_shutdown); + env->dns_base = 0; + event_base_loop(env->base, EVLOOP_NONBLOCK); + +end: + event_base_free(env->base); + env->base = 0; +} + +#define IMPL_DBG_LEAK_RESUME(name, cancel, send_err_shutdown) \ + static void \ + test_dbg_leak_##name##_(void *env_) \ + { \ + dbg_leak_resume(env_, cancel, send_err_shutdown); \ + } +IMPL_DBG_LEAK_RESUME(resume, 0, 0) +IMPL_DBG_LEAK_RESUME(cancel_and_resume, 1, 0) +IMPL_DBG_LEAK_RESUME(resume_send_err, 0, 1) +IMPL_DBG_LEAK_RESUME(cancel_and_resume_send_err, 1, 1) + +static void +test_dbg_leak_shutdown(void *env_) +{ + /* free/dns, loop, free/base */ + struct testleak_env_t *env = env_; + int send_err_shutdown = 1; + + /* `req` is freed both with `send_err_shutdown` and without it, + * the only difference is `evdns_callback` call */ + env->req = 0; + + evdns_base_free(env->dns_base, send_err_shutdown); + env->dns_base = 0; + + /* `req` is freed in callback, that's why one loop is required */ + event_base_loop(env->base, EVLOOP_NONBLOCK); + event_base_free(env->base); + env->base = 0; +} +#endif + +static void +test_getaddrinfo_async_cancel_stress(void *ptr) +{ + struct event_base *base; + struct evdns_base *dns_base = NULL; + struct evdns_server_port *server = NULL; + evutil_socket_t fd = -1; + struct sockaddr_in sin; + struct sockaddr_storage ss; + ev_socklen_t slen; + unsigned i; + + base = event_base_new(); + dns_base = evdns_base_new(base, 0); + + memset(&sin, 0, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_port = 0; + sin.sin_addr.s_addr = htonl(0x7f000001); + if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) { + tt_abort_perror("socket"); + } + evutil_make_socket_nonblocking(fd); + if (bind(fd, (struct sockaddr*)&sin, sizeof(sin))<0) { + tt_abort_perror("bind"); + } + server = evdns_add_server_port_with_base(base, fd, 0, gaic_server_cb, + base); + + memset(&ss, 0, sizeof(ss)); + slen = sizeof(ss); + if (getsockname(fd, (struct sockaddr*)&ss, &slen)<0) { + tt_abort_perror("getsockname"); + } + evdns_base_nameserver_sockaddr_add(dns_base, + (struct sockaddr*)&ss, slen, 0); + + for (i = 0; i < 1000; ++i) { + gaic_launch(base, dns_base); + } + + event_base_dispatch(base); + +end: + if (dns_base) + evdns_base_free(dns_base, 1); + if (server) + evdns_close_server_port(server); + if (base) + event_base_free(base); + if (fd >= 0) + evutil_closesocket(fd); +} + +static void +dns_client_fail_requests_test(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct evdns_base *dns = NULL; + struct evdns_server_port *dns_port = NULL; + ev_uint16_t portnum = 0; + char buf[64]; + + struct generic_dns_callback_result r[20]; + unsigned i; + + dns_port = regress_get_dnsserver(base, &portnum, NULL, + regress_dns_server_cb, reissue_table); + tt_assert(dns_port); + + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum); + + dns = evdns_base_new(base, EVDNS_BASE_DISABLE_WHEN_INACTIVE); + tt_assert(!evdns_base_nameserver_ip_add(dns, buf)); + + for (i = 0; i < 20; ++i) + evdns_base_resolve_ipv4(dns, "foof.example.com", 0, generic_dns_callback, &r[i]); + + n_replies_left = 20; + exit_base = base; + + evdns_base_free(dns, 1 /** fail requests */); + /** run defered callbacks, to trigger UAF */ + event_base_dispatch(base); + + tt_int_op(n_replies_left, ==, 0); + for (i = 0; i < 20; ++i) + tt_int_op(r[i].result, ==, DNS_ERR_SHUTDOWN); + +end: + evdns_close_server_port(dns_port); +} + +static void +getaddrinfo_cb(int err, struct evutil_addrinfo *res, void *ptr) +{ + generic_dns_callback(err, 0, 0, 0, NULL, ptr); +} +static void +dns_client_fail_requests_getaddrinfo_test(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + struct evdns_base *dns = NULL; + struct evdns_server_port *dns_port = NULL; + ev_uint16_t portnum = 0; + char buf[64]; + + struct generic_dns_callback_result r[20]; + int i; + + dns_port = regress_get_dnsserver(base, &portnum, NULL, + regress_dns_server_cb, reissue_table); + tt_assert(dns_port); + + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum); + + dns = evdns_base_new(base, EVDNS_BASE_DISABLE_WHEN_INACTIVE); + tt_assert(!evdns_base_nameserver_ip_add(dns, buf)); + + for (i = 0; i < 20; ++i) + tt_assert(evdns_getaddrinfo(dns, "foof.example.com", "80", NULL, getaddrinfo_cb, &r[i])); + + n_replies_left = 20; + exit_base = base; + + evdns_base_free(dns, 1 /** fail requests */); + /** run defered callbacks, to trigger UAF */ + event_base_dispatch(base); + + tt_int_op(n_replies_left, ==, 0); + for (i = 0; i < 20; ++i) + tt_int_op(r[i].result, ==, EVUTIL_EAI_FAIL); + +end: + evdns_close_server_port(dns_port); +} + +#ifdef EVTHREAD_USE_PTHREADS_IMPLEMENTED +struct race_param +{ + void *lock; + void *reqs_cmpl_cond; + int bw_threads; + void *bw_threads_exited_cond; + volatile int stopping; + void *base; + void *dns; + + int locked; +}; +static void * +race_base_run(void *arg) +{ + struct race_param *rp = (struct race_param *)arg; + event_base_loop(rp->base, EVLOOP_NO_EXIT_ON_EMPTY); + THREAD_RETURN(); +} +static void * +race_busywait_run(void *arg) +{ + struct race_param *rp = (struct race_param *)arg; + struct sockaddr_storage ss; + while (!rp->stopping) + evdns_base_get_nameserver_addr(rp->dns, 0, (struct sockaddr *)&ss, sizeof(ss)); + EVLOCK_LOCK(rp->lock, 0); + if (--rp->bw_threads == 0) + EVTHREAD_COND_SIGNAL(rp->bw_threads_exited_cond); + EVLOCK_UNLOCK(rp->lock, 0); + THREAD_RETURN(); +} +static void +race_gai_cb(int result, struct evutil_addrinfo *res, void *arg) +{ + struct race_param *rp = arg; + (void)result; + (void)res; + + --n_replies_left; + if (n_replies_left == 0) { + EVLOCK_LOCK(rp->lock, 0); + EVTHREAD_COND_SIGNAL(rp->reqs_cmpl_cond); + EVLOCK_UNLOCK(rp->lock, 0); + } +} +static void +getaddrinfo_race_gotresolve_test(void *arg) +{ + struct race_param rp; + struct evdns_server_port *dns_port = NULL; + ev_uint16_t portnum = 0; + char buf[64]; + int i; + + // Some stress is needed to yield inside getaddrinfo between resolve_ipv4 and resolve_ipv6 + int n_reqs = 16384; +#ifdef _SC_NPROCESSORS_ONLN + int n_threads = sysconf(_SC_NPROCESSORS_ONLN) + 1; +#else + int n_threads = 17; +#endif + THREAD_T thread[n_threads]; + struct timeval tv; + + (void)arg; + + evthread_use_pthreads(); + + rp.base = event_base_new(); + tt_assert(rp.base); + if (evthread_make_base_notifiable(rp.base) < 0) + tt_abort_msg("Couldn't make base notifiable!"); + + dns_port = regress_get_dnsserver(rp.base, &portnum, NULL, + regress_dns_server_cb, reissue_table); + tt_assert(dns_port); + + evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum); + + rp.dns = evdns_base_new(rp.base, 0); + tt_assert(!evdns_base_nameserver_ip_add(rp.dns, buf)); + + n_replies_left = n_reqs; + + EVTHREAD_ALLOC_LOCK(rp.lock, 0); + EVTHREAD_ALLOC_COND(rp.reqs_cmpl_cond); + EVTHREAD_ALLOC_COND(rp.bw_threads_exited_cond); + tt_assert(rp.lock); + tt_assert(rp.reqs_cmpl_cond); + tt_assert(rp.bw_threads_exited_cond); + rp.bw_threads = 0; + rp.stopping = 0; + + // Run resolver thread + THREAD_START(thread[0], race_base_run, &rp); + // Run busy-wait threads used to force yield this thread + for (i = 1; i < n_threads; i++) { + rp.bw_threads++; + THREAD_START(thread[i], race_busywait_run, &rp); + } + + EVLOCK_LOCK(rp.lock, 0); + rp.locked = 1; + + for (i = 0; i < n_reqs; ++i) { + tt_assert(evdns_getaddrinfo(rp.dns, "foof.example.com", "80", NULL, race_gai_cb, &rp)); + // This magic along with busy-wait threads make this thread yield frequently + if (i % 100 == 0) { + tv.tv_sec = 0; + tv.tv_usec = 10000; + evutil_usleep_(&tv); + } + } + + exit_base = rp.base; + + // Wait for some time + tv.tv_sec = 5; + tv.tv_usec = 0; + EVTHREAD_COND_WAIT_TIMED(rp.reqs_cmpl_cond, rp.lock, &tv); + + // Stop busy-wait threads + tv.tv_sec = 1; + tv.tv_usec = 0; + rp.stopping = 1; + tt_assert(EVTHREAD_COND_WAIT_TIMED(rp.bw_threads_exited_cond, rp.lock, &tv) == 0); + + EVLOCK_UNLOCK(rp.lock, 0); + rp.locked = 0; + + evdns_base_free(rp.dns, 1 /** fail requests */); + + tt_int_op(n_replies_left, ==, 0); + +end: + if (rp.locked) + EVLOCK_UNLOCK(rp.lock, 0); + EVTHREAD_FREE_LOCK(rp.lock, 0); + EVTHREAD_FREE_COND(rp.reqs_cmpl_cond); + EVTHREAD_FREE_COND(rp.bw_threads_exited_cond); + evdns_close_server_port(dns_port); + event_base_loopbreak(rp.base); + event_base_free(rp.base); +} +#endif + +static void +test_set_so_rcvbuf_so_sndbuf(void *arg) +{ + struct basic_test_data *data = arg; + struct evdns_base *dns_base; + + dns_base = evdns_base_new(data->base, 0); + tt_assert(dns_base); + + tt_assert(!evdns_base_set_option(dns_base, "so-rcvbuf", "10240")); + tt_assert(!evdns_base_set_option(dns_base, "so-sndbuf", "10240")); + + /* actually check SO_RCVBUF/SO_SNDBUF not fails */ + tt_assert(!evdns_base_nameserver_ip_add(dns_base, "127.0.0.1")); + +end: + if (dns_base) + evdns_base_free(dns_base, 0); +} + +#define DNS_LEGACY(name, flags) \ + { #name, run_legacy_test_fn, flags|TT_LEGACY, &legacy_setup, \ + dns_##name } + +struct testcase_t dns_testcases[] = { + DNS_LEGACY(server, TT_FORK|TT_NEED_BASE), + DNS_LEGACY(gethostbyname, TT_FORK|TT_NEED_BASE|TT_NEED_DNS|TT_OFF_BY_DEFAULT), + DNS_LEGACY(gethostbyname6, TT_FORK|TT_NEED_BASE|TT_NEED_DNS|TT_OFF_BY_DEFAULT), + DNS_LEGACY(gethostbyaddr, TT_FORK|TT_NEED_BASE|TT_NEED_DNS|TT_OFF_BY_DEFAULT), + { "resolve_reverse", dns_resolve_reverse, TT_FORK|TT_OFF_BY_DEFAULT, NULL, NULL }, + { "search_empty", dns_search_empty_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "search", dns_search_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "search_lower", dns_search_lower_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "search_cancel", dns_search_cancel_test, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "retry", dns_retry_test, TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL }, + { "retry_disable_when_inactive", dns_retry_disable_when_inactive_test, + TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL }, + { "reissue", dns_reissue_test, TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL }, + { "reissue_disable_when_inactive", dns_reissue_disable_when_inactive_test, + TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL }, + { "inflight", dns_inflight_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "bufferevent_connect_hostname", test_bufferevent_connect_hostname, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, +#ifdef EVENT__HAVE_SETRLIMIT + { "bufferevent_connect_hostname_emfile", test_bufferevent_connect_hostname, + TT_FORK|TT_NEED_BASE, &basic_setup, (char*)"emfile" }, +#endif + { "disable_when_inactive", dns_disable_when_inactive_test, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + { "disable_when_inactive_no_ns", dns_disable_when_inactive_no_ns_test, + TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL }, + + { "initialize_nameservers", dns_initialize_nameservers_test, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, +#ifndef _WIN32 + { "nameservers_no_default", dns_nameservers_no_default_test, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, +#endif + + { "getaddrinfo_async", test_getaddrinfo_async, + TT_FORK|TT_NEED_BASE, &basic_setup, (char*)"" }, + { "getaddrinfo_cancel_stress", test_getaddrinfo_async_cancel_stress, + TT_FORK, NULL, NULL }, + +#ifdef EVENT_SET_MEM_FUNCTIONS_IMPLEMENTED + { "leak_shutdown", test_dbg_leak_shutdown, TT_FORK, &testleak_funcs, NULL }, + { "leak_cancel", test_dbg_leak_cancel, TT_FORK, &testleak_funcs, NULL }, + + { "leak_resume", test_dbg_leak_resume_, TT_FORK, &testleak_funcs, NULL }, + { "leak_cancel_and_resume", test_dbg_leak_cancel_and_resume_, + TT_FORK, &testleak_funcs, NULL }, + { "leak_resume_send_err", test_dbg_leak_resume_send_err_, + TT_FORK, &testleak_funcs, NULL }, + { "leak_cancel_and_resume_send_err", test_dbg_leak_cancel_and_resume_send_err_, + TT_FORK, &testleak_funcs, NULL }, +#endif + + { "client_fail_requests", dns_client_fail_requests_test, + TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL }, + { "client_fail_requests_getaddrinfo", + dns_client_fail_requests_getaddrinfo_test, + TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL }, +#ifdef EVTHREAD_USE_PTHREADS_IMPLEMENTED + { "getaddrinfo_race_gotresolve", + getaddrinfo_race_gotresolve_test, + TT_FORK|TT_OFF_BY_DEFAULT, NULL, NULL }, +#endif + + { "set_SO_RCVBUF_SO_SNDBUF", test_set_so_rcvbuf_so_sndbuf, + TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, + + END_OF_TESTCASES +}; + diff --git a/probe-busybox/libevent-2.1.11-stable/test/regress_et.c b/probe-busybox/libevent-2.1.11-stable/test/regress_et.c new file mode 100644 index 00000000..5fa87a39 --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/regress_et.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "../util-internal.h" +#include "event2/event-config.h" + +#ifdef _WIN32 +#include +#endif +#include +#include +#ifdef EVENT__HAVE_SYS_SOCKET_H +#include +#endif +#include +#include +#include +#include +#ifndef _WIN32 +#include +#include +#endif +#include + +#include "event2/event.h" +#include "event2/util.h" + +#include "regress.h" + +static int was_et = 0; + +static int base_supports_et(struct event_base *base) +{ + return + (!strcmp(event_base_get_method(base), "epoll") || + !strcmp(event_base_get_method(base), "epoll (with changelist)") || + !strcmp(event_base_get_method(base), "kqueue")); +} + +static void +read_cb(evutil_socket_t fd, short event, void *arg) +{ + char buf; + int len; + + len = recv(fd, &buf, sizeof(buf), 0); + + called++; + if (event & EV_ET) + was_et = 1; + + if (!len) + event_del(arg); +} + +static void +test_edgetriggered(void *data_) +{ + struct basic_test_data *data = data_; + struct event_base *base = data->base; + evutil_socket_t *pair = data->pair; + struct event *ev = NULL; + const char *test = "test string"; + int supports_et; + + /* On Linux 3.2.1 (at least, as patched by Fedora and tested by Nick), + * doing a "recv" on an AF_UNIX socket resets the readability of the + * socket, even though there is no state change, so we don't actually + * get edge-triggered behavior. Yuck! Linux 3.1.9 didn't have this + * problem. + */ + + called = was_et = 0; + + tt_int_op(send(pair[0], test, (int)strlen(test)+1, 0), >, 0); + tt_int_op(shutdown(pair[0], EVUTIL_SHUT_WR), ==, 0); + + supports_et = base_supports_et(base); + TT_BLATHER(("Checking for edge-triggered events with %s, which should %s" + "support edge-triggering", event_base_get_method(base), + supports_et?"":"not ")); + + /* Initalize one event */ + ev = event_new(base, pair[1], EV_READ|EV_ET|EV_PERSIST, read_cb, &ev); + tt_assert(ev != NULL); + tt_int_op(event_add(ev, NULL), ==, 0); + + /* We're going to call the dispatch function twice. The first invocation + * will read a single byte from pair[1] in either case. If we're edge + * triggered, we'll only see the event once (since we only see transitions + * from no data to data), so the second invocation of event_base_loop will + * do nothing. If we're level triggered, the second invocation of + * event_base_loop will also activate the event (because there's still + * data to read). */ + tt_int_op(event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE), ==, 0); + tt_int_op(event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE), ==, 0); + + if (supports_et) { + tt_int_op(called, ==, 1); + tt_assert(was_et); + } else { + tt_int_op(called, ==, 2); + tt_assert(!was_et); + } + +end: + if (ev) { + event_del(ev); + event_free(ev); + } +} + +static void +test_edgetriggered_mix_error(void *data_) +{ + struct basic_test_data *data = data_; + struct event_base *base = NULL; + struct event *ev_et=NULL, *ev_lt=NULL; + +#ifdef EVENT__DISABLE_DEBUG_MODE + if (1) + tt_skip(); +#endif + + if (!libevent_tests_running_in_debug_mode) + event_enable_debug_mode(); + + base = event_base_new(); + + /* try mixing edge-triggered and level-triggered to make sure it fails*/ + ev_et = event_new(base, data->pair[0], EV_READ|EV_ET, read_cb, ev_et); + tt_assert(ev_et); + ev_lt = event_new(base, data->pair[0], EV_READ, read_cb, ev_lt); + tt_assert(ev_lt); + + /* Add edge-triggered, then level-triggered. Get an error. */ + tt_int_op(0, ==, event_add(ev_et, NULL)); + tt_int_op(-1, ==, event_add(ev_lt, NULL)); + tt_int_op(EV_READ, ==, event_pending(ev_et, EV_READ, NULL)); + tt_int_op(0, ==, event_pending(ev_lt, EV_READ, NULL)); + + tt_int_op(0, ==, event_del(ev_et)); + /* Add level-triggered, then edge-triggered. Get an error. */ + tt_int_op(0, ==, event_add(ev_lt, NULL)); + tt_int_op(-1, ==, event_add(ev_et, NULL)); + tt_int_op(EV_READ, ==, event_pending(ev_lt, EV_READ, NULL)); + tt_int_op(0, ==, event_pending(ev_et, EV_READ, NULL)); + +end: + if (ev_et) + event_free(ev_et); + if (ev_lt) + event_free(ev_lt); + if (base) + event_base_free(base); +} + +static int read_notification_count; +static int last_read_notification_was_et; +static void +read_notification_cb(evutil_socket_t fd, short event, void *arg) +{ + read_notification_count++; + last_read_notification_was_et = (event & EV_ET); +} + +static int write_notification_count; +static int last_write_notification_was_et; +static void +write_notification_cb(evutil_socket_t fd, short event, void *arg) +{ + write_notification_count++; + last_write_notification_was_et = (event & EV_ET); +} + +/* After two or more events have been registered for the same + * file descriptor using EV_ET, if one of the events is + * deleted, then the epoll_ctl() call issued by libevent drops + * the EPOLLET flag resulting in level triggered + * notifications. + */ +static void +test_edge_triggered_multiple_events(void *data_) +{ + struct basic_test_data *data = data_; + struct event *read_ev = NULL; + struct event *write_ev = NULL; + const char c = 'A'; + struct event_base *base = data->base; + evutil_socket_t *pair = data->pair; + + if (!base_supports_et(base)) { + tt_skip(); + return; + } + + read_notification_count = 0; + last_read_notification_was_et = 0; + write_notification_count = 0; + last_write_notification_was_et = 0; + + /* Make pair[1] readable */ + tt_int_op(send(pair[0], &c, 1, 0), >, 0); + + read_ev = event_new(base, pair[1], EV_READ|EV_ET|EV_PERSIST, + read_notification_cb, NULL); + write_ev = event_new(base, pair[1], EV_WRITE|EV_ET|EV_PERSIST, + write_notification_cb, NULL); + + event_add(read_ev, NULL); + event_add(write_ev, NULL); + event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE); + event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE); + + tt_assert(last_read_notification_was_et); + tt_int_op(read_notification_count, ==, 1); + tt_assert(last_write_notification_was_et); + tt_int_op(write_notification_count, ==, 1); + + event_del(read_ev); + + /* trigger acitivity second time for the backend that can have multiple + * events for one fd (like kqueue) */ + close(pair[0]); + pair[0] = -1; + + /* Verify that we are still edge-triggered for write notifications */ + event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE); + event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE); + tt_assert(last_write_notification_was_et); + tt_int_op(write_notification_count, ==, 2); + +end: + if (read_ev) + event_free(read_ev); + if (write_ev) + event_free(write_ev); +} + +struct testcase_t edgetriggered_testcases[] = { + { "et", test_edgetriggered, + TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR, &basic_setup, NULL }, + { "et_mix_error", test_edgetriggered_mix_error, + TT_FORK|TT_NEED_SOCKETPAIR|TT_NO_LOGS, &basic_setup, NULL }, + { "et_multiple_events", test_edge_triggered_multiple_events, + TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR, &basic_setup, NULL }, + END_OF_TESTCASES +}; diff --git a/probe-busybox/libevent-2.1.11-stable/test/regress_finalize.c b/probe-busybox/libevent-2.1.11-stable/test/regress_finalize.c new file mode 100644 index 00000000..552210fe --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/regress_finalize.c @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2013 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "event2/event-config.h" +#include "evconfig-private.h" +#include "tinytest.h" +#include "tinytest_macros.h" +#include + +#include "event2/event.h" +#include "event2/util.h" +#include "event-internal.h" +#include "defer-internal.h" + +#include "regress.h" +#include "regress_thread.h" + +static void +timer_callback(evutil_socket_t fd, short what, void *arg) +{ + int *int_arg = arg; + *int_arg += 1; + (void)fd; + (void)what; +} +static void +simple_callback(struct event_callback *evcb, void *arg) +{ + int *int_arg = arg; + *int_arg += 1; + (void)evcb; +} +static void +event_finalize_callback_1(struct event *ev, void *arg) +{ + int *int_arg = arg; + *int_arg += 100; + (void)ev; +} +static void +callback_finalize_callback_1(struct event_callback *evcb, void *arg) +{ + int *int_arg = arg; + *int_arg += 100; + (void)evcb; +} + + +static void +test_fin_cb_invoked(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + + struct event *ev; + struct event ev2; + struct event_callback evcb; + int cb_called = 0; + int ev_called = 0; + + const struct timeval ten_sec = {10,0}; + + event_deferred_cb_init_(&evcb, 0, simple_callback, &cb_called); + ev = evtimer_new(base, timer_callback, &ev_called); + /* Just finalize them; don't bother adding. */ + event_free_finalize(0, ev, event_finalize_callback_1); + event_callback_finalize_(base, 0, &evcb, callback_finalize_callback_1); + + event_base_dispatch(base); + + tt_int_op(cb_called, ==, 100); + tt_int_op(ev_called, ==, 100); + + ev_called = cb_called = 0; + event_base_assert_ok_(base); + + /* Now try it when they're active. (actually, don't finalize: make + * sure activation can happen! */ + ev = evtimer_new(base, timer_callback, &ev_called); + event_deferred_cb_init_(&evcb, 0, simple_callback, &cb_called); + + event_active(ev, EV_TIMEOUT, 1); + event_callback_activate_(base, &evcb); + + event_base_dispatch(base); + tt_int_op(cb_called, ==, 1); + tt_int_op(ev_called, ==, 1); + + ev_called = cb_called = 0; + event_base_assert_ok_(base); + + /* Great, it worked. Now activate and finalize and make sure only + * finalizing happens. */ + event_active(ev, EV_TIMEOUT, 1); + event_callback_activate_(base, &evcb); + event_free_finalize(0, ev, event_finalize_callback_1); + event_callback_finalize_(base, 0, &evcb, callback_finalize_callback_1); + + event_base_dispatch(base); + tt_int_op(cb_called, ==, 100); + tt_int_op(ev_called, ==, 100); + + ev_called = 0; + + event_base_assert_ok_(base); + + /* Okay, now add but don't have it become active, and make sure *that* + * works. */ + ev = evtimer_new(base, timer_callback, &ev_called); + event_add(ev, &ten_sec); + event_free_finalize(0, ev, event_finalize_callback_1); + + event_base_dispatch(base); + tt_int_op(ev_called, ==, 100); + + ev_called = 0; + event_base_assert_ok_(base); + + /* Now try adding and deleting after finalizing. */ + ev = evtimer_new(base, timer_callback, &ev_called); + evtimer_assign(&ev2, base, timer_callback, &ev_called); + event_add(ev, &ten_sec); + event_free_finalize(0, ev, event_finalize_callback_1); + event_finalize(0, &ev2, event_finalize_callback_1); + + event_add(&ev2, &ten_sec); + event_del(ev); + event_active(&ev2, EV_TIMEOUT, 1); + + event_base_dispatch(base); + tt_int_op(ev_called, ==, 200); + + event_base_assert_ok_(base); + +end: + ; +} + +#ifndef EVENT__DISABLE_MM_REPLACEMENT +static void * +tfff_malloc(size_t n) +{ + return malloc(n); +} +static void *tfff_p1=NULL, *tfff_p2=NULL; +static int tfff_p1_freed=0, tfff_p2_freed=0; +static void +tfff_free(void *p) +{ + if (! p) + return; + if (p == tfff_p1) + ++tfff_p1_freed; + if (p == tfff_p2) + ++tfff_p2_freed; + free(p); +} +static void * +tfff_realloc(void *p, size_t sz) +{ + return realloc(p,sz); +} +#endif + +static void +test_fin_free_finalize(void *arg) +{ +#ifdef EVENT__DISABLE_MM_REPLACEMENT + tinytest_set_test_skipped_(); +#else + struct event_base *base = NULL; + struct event *ev, *ev2; + int ev_called = 0; + int ev2_called = 0; + + (void)arg; + + event_set_mem_functions(tfff_malloc, tfff_realloc, tfff_free); + + base = event_base_new(); + tt_assert(base); + + ev = evtimer_new(base, timer_callback, &ev_called); + ev2 = evtimer_new(base, timer_callback, &ev2_called); + tfff_p1 = ev; + tfff_p2 = ev2; + event_free_finalize(0, ev, event_finalize_callback_1); + event_finalize(0, ev2, event_finalize_callback_1); + + event_base_dispatch(base); + + tt_int_op(ev_called, ==, 100); + tt_int_op(ev2_called, ==, 100); + + event_base_assert_ok_(base); + tt_int_op(tfff_p1_freed, ==, 1); + tt_int_op(tfff_p2_freed, ==, 0); + + event_free(ev2); + +end: + if (base) + event_base_free(base); +#endif +} + +/* For test_fin_within_cb */ +struct event_and_count { + struct event *ev; + struct event *ev2; + int count; +}; +static void +event_finalize_callback_2(struct event *ev, void *arg) +{ + struct event_and_count *evc = arg; + evc->count += 100; + event_free(ev); +} +static void +timer_callback_2(evutil_socket_t fd, short what, void *arg) +{ + struct event_and_count *evc = arg; + event_finalize(0, evc->ev, event_finalize_callback_2); + event_finalize(0, evc->ev2, event_finalize_callback_2); + ++ evc->count; + (void)fd; + (void)what; +} + +static void +test_fin_within_cb(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + + struct event_and_count evc1, evc2; + evc1.count = evc2.count = 0; + evc2.ev2 = evc1.ev = evtimer_new(base, timer_callback_2, &evc1); + evc1.ev2 = evc2.ev = evtimer_new(base, timer_callback_2, &evc2); + + /* Activate both. The first one will have its callback run, which + * will finalize both of them, preventing the second one's callback + * from running. */ + event_active(evc1.ev, EV_TIMEOUT, 1); + event_active(evc2.ev, EV_TIMEOUT, 1); + + event_base_dispatch(base); + tt_int_op(evc1.count, ==, 101); + tt_int_op(evc2.count, ==, 100); + + event_base_assert_ok_(base); + /* Now try with EV_PERSIST events. */ + evc1.count = evc2.count = 0; + evc2.ev2 = evc1.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc1); + evc1.ev2 = evc2.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc2); + + event_active(evc1.ev, EV_TIMEOUT, 1); + event_active(evc2.ev, EV_TIMEOUT, 1); + + event_base_dispatch(base); + tt_int_op(evc1.count, ==, 101); + tt_int_op(evc2.count, ==, 100); + + event_base_assert_ok_(base); +end: + ; +} + +#if 0 +static void +timer_callback_3(evutil_socket_t *fd, short what, void *arg) +{ + (void)fd; + (void)what; + +} +static void +test_fin_many(void *arg) +{ + struct basic_test_data *data = arg; + struct event_base *base = data->base; + + struct event *ev1, *ev2; + struct event_callback evcb1, evcb2; + int ev1_count = 0, ev2_count = 0; + int evcb1_count = 0, evcb2_count = 0; + struct event_callback *array[4]; + + int n; + + /* First attempt: call finalize_many with no events running */ + ev1 = evtimer_new(base, timer_callback, &ev1_count); + ev1 = evtimer_new(base, timer_callback, &ev2_count); + event_deferred_cb_init_(&evcb1, 0, simple_callback, &evcb1_called); + event_deferred_cb_init_(&evcb2, 0, simple_callback, &evcb2_called); + array[0] = &ev1->ev_evcallback; + array[1] = &ev2->ev_evcallback; + array[2] = &evcb1; + array[3] = &evcb2; + + + + n = event_callback_finalize_many(base, 4, array, + callback_finalize_callback_1); + +} +#endif + + +#define TEST(name, flags) \ + { #name, test_fin_##name, (flags), &basic_setup, NULL } + +struct testcase_t finalize_testcases[] = { + + TEST(cb_invoked, TT_FORK|TT_NEED_BASE), + TEST(free_finalize, TT_FORK), + TEST(within_cb, TT_FORK|TT_NEED_BASE), +// TEST(many, TT_FORK|TT_NEED_BASE), + + + END_OF_TESTCASES +}; + diff --git a/probe-busybox/libevent-2.1.11-stable/test/regress_http.c b/probe-busybox/libevent-2.1.11-stable/test/regress_http.c new file mode 100644 index 00000000..8f30b57b --- /dev/null +++ b/probe-busybox/libevent-2.1.11-stable/test/regress_http.c @@ -0,0 +1,4808 @@ +/* + * Copyright (c) 2003-2007 Niels Provos + * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "util-internal.h" + +#ifdef _WIN32 +#include +#include +#include +#endif + +#include "event2/event-config.h" + +#include +#include +#ifdef EVENT__HAVE_SYS_TIME_H +#include +#endif +#include +#ifndef _WIN32 +#include +#include +#include +#include +#endif +#include +#include +#include +#include +#include + +#include "event2/dns.h" + +#include "event2/event.h" +#include "event2/http.h" +#include "event2/buffer.h" +#include "event2/bufferevent.h" +#include "event2/bufferevent_ssl.h" +#include "event2/util.h" +#include "event2/listener.h" +#include "log-internal.h" +#include "http-internal.h" +#include "regress.h" +#include "regress_testutils.h" + +/* set if a test needs to call loopexit on a base */ +static struct event_base *exit_base; + +static char const BASIC_REQUEST_BODY[] = "This is funny"; + +static void http_basic_cb(struct evhttp_request *req, void *arg); +static void http_timeout_cb(struct evhttp_request *req, void *arg); +static void http_large_cb(struct evhttp_request *req, void *arg); +static void http_chunked_cb(struct evhttp_request *req, void *arg); +static void http_post_cb(struct evhttp_request *req, void *arg); +static void http_put_cb(struct evhttp_request *req, void *arg); +static void http_delete_cb(struct evhttp_request *req, void *arg); +static void http_delay_cb(struct evhttp_request *req, void *arg); +static void http_large_delay_cb(struct evhttp_request *req, void *arg); +static void http_badreq_cb(struct evhttp_request *req, void *arg); +static void http_dispatcher_cb(struct evhttp_request *req, void *arg); +static void http_on_complete_cb(struct evhttp_request *req, void *arg); + +#define HTTP_BIND_IPV6 1 +#define HTTP_BIND_SSL 2 +#define HTTP_SSL_FILTER 4 +static int +http_bind(struct evhttp *myhttp, ev_uint16_t *pport, int mask) +{ + int port; + struct evhttp_bound_socket *sock; + int ipv6 = mask & HTTP_BIND_IPV6; + + if (ipv6) + sock = evhttp_bind_socket_with_handle(myhttp, "::1", *pport); + else + sock = evhttp_bind_socket_with_handle(myhttp, "127.0.0.1", *pport); + + if (sock == NULL) { + if (ipv6) + return -1; + else + event_errx(1, "Could not start web server"); + } + + port = regress_get_socket_port(evhttp_bound_socket_get_fd(sock)); + if (port < 0) + return -1; + *pport = (ev_uint16_t) port; + + return 0; +} + +#ifdef EVENT__HAVE_OPENSSL +static struct bufferevent * +https_bev(struct event_base *base, void *arg) +{ + SSL *ssl = SSL_new(get_ssl_ctx()); + + SSL_use_certificate(ssl, ssl_getcert(ssl_getkey())); + SSL_use_PrivateKey(ssl, ssl_getkey()); + + return bufferevent_openssl_socket_new( + base, -1, ssl, BUFFEREVENT_SSL_ACCEPTING, + BEV_OPT_CLOSE_ON_FREE); +} +#endif +static struct evhttp * +http_setup_gencb(ev_uint16_t *pport, struct event_base *base, int mask, + void (*cb)(struct evhttp_request *, void *), void *cbarg) +{ + struct evhttp *myhttp; + + /* Try a few different ports */ + myhttp = evhttp_new(base); + + if (http_bind(myhttp, pport, mask) < 0) + return NULL; +#ifdef EVENT__HAVE_OPENSSL + if (mask & HTTP_BIND_SSL) { + init_ssl(); + evhttp_set_bevcb(myhttp, https_bev, NULL); + } +#endif + + evhttp_set_gencb(myhttp, cb, cbarg); + + /* Register a callback for certain types of requests */ + evhttp_set_cb(myhttp, "/test", http_basic_cb, myhttp); + evhttp_set_cb(myhttp, "/test nonconformant", http_basic_cb, myhttp); + evhttp_set_cb(myhttp, "/timeout", http_timeout_cb, myhttp); + evhttp_set_cb(myhttp, "/large", http_large_cb, base); + evhttp_set_cb(myhttp, "/chunked", http_chunked_cb, base); + evhttp_set_cb(myhttp, "/streamed", http_chunked_cb, base); + evhttp_set_cb(myhttp, "/postit", http_post_cb, base); + evhttp_set_cb(myhttp, "/putit", http_put_cb, base); + evhttp_set_cb(myhttp, "/deleteit", http_delete_cb, base); + evhttp_set_cb(myhttp, "/delay", http_delay_cb, base); + evhttp_set_cb(myhttp, "/largedelay", http_large_delay_cb, base); + evhttp_set_cb(myhttp, "/badrequest", http_badreq_cb, base); + evhttp_set_cb(myhttp, "/oncomplete", http_on_complete_cb, base); + evhttp_set_cb(myhttp, "/", http_dispatcher_cb, base); + return (myhttp); +} +static struct evhttp * +http_setup(ev_uint16_t *pport, struct event_base *base, int mask) +{ return http_setup_gencb(pport, base, mask, NULL, NULL); } + +#ifndef NI_MAXSERV +#define NI_MAXSERV 1024 +#endif + +static evutil_socket_t +http_connect(const char *address, ev_uint16_t port) +{ + /* Stupid code for connecting */ + struct evutil_addrinfo ai, *aitop; + char strport[NI_MAXSERV]; + + struct sockaddr *sa; + size_t slen; + evutil_socket_t fd; + + memset(&ai, 0, sizeof(ai)); + ai.ai_family = AF_INET; + ai.ai_socktype = SOCK_STREAM; + evutil_snprintf(strport, sizeof(strport), "%d", port); + if (evutil_getaddrinfo(address, strport, &ai, &aitop) != 0) { + event_warn("getaddrinfo"); + return (-1); + } + sa = aitop->ai_addr; + slen = aitop->ai_addrlen; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (fd == -1) + event_err(1, "socket failed"); + + evutil_make_socket_nonblocking(fd); + if (connect(fd, sa, slen) == -1) { +#ifdef _WIN32 + int tmp_err = WSAGetLastError(); + if (tmp_err != WSAEINPROGRESS && tmp_err != WSAEINVAL && + tmp_err != WSAEWOULDBLOCK) + event_err(1, "connect failed"); +#else + if (errno != EINPROGRESS) + event_err(1, "connect failed"); +#endif + } + + evutil_freeaddrinfo(aitop); + + return (fd); +} + +/* Helper: do a strcmp on the contents of buf and the string s. */ +static int +evbuffer_datacmp(struct evbuffer *buf, const char *s) +{ + size_t b_sz = evbuffer_get_length(buf); + size_t s_sz = strlen(s); + unsigned char *d; + int r; + + if (b_sz < s_sz) + return -1; + + d = evbuffer_pullup(buf, s_sz); + if ((r = memcmp(d, s, s_sz))) + return r; + + if (b_sz > s_sz) + return 1; + else + return 0; +} + +/* Helper: Return true iff buf contains s */ +static int +evbuffer_contains(struct evbuffer *buf, const char *s) +{ + struct evbuffer_ptr ptr; + ptr = evbuffer_search(buf, s, strlen(s), NULL); + return ptr.pos != -1; +} + +static void +http_readcb(struct bufferevent *bev, void *arg) +{ + const char *what = BASIC_REQUEST_BODY; + struct event_base *my_base = arg; + + if (evbuffer_contains(bufferevent_get_input(bev), what)) { + struct evhttp_request *req = evhttp_request_new(NULL, NULL); + enum message_read_status done; + + /* req->kind = EVHTTP_RESPONSE; */ + done = evhttp_parse_firstline_(req, bufferevent_get_input(bev)); + if (done != ALL_DATA_READ) + goto out; + + done = evhttp_parse_headers_(req, bufferevent_get_input(bev)); + if (done != ALL_DATA_READ) + goto out; + + if (done == 1 && + evhttp_find_header(evhttp_request_get_input_headers(req), + "Content-Type") != NULL) + test_ok++; + + out: + evhttp_request_free(req); + bufferevent_disable(bev, EV_READ); + if (exit_base) + event_base_loopexit(exit_base, NULL); + else if (my_base) + event_base_loopexit(my_base, NULL); + else { + fprintf(stderr, "No way to exit loop!\n"); + exit(1); + } + } +} + +static void +http_writecb(struct bufferevent *bev, void *arg) +{ + if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) { + /* enable reading of the reply */ + bufferevent_enable(bev, EV_READ); + test_ok++; + } +} + +static void +http_errorcb(struct bufferevent *bev, short what, void *arg) +{ + /** For ssl */ + if (what & BEV_EVENT_CONNECTED) + return; + test_ok = -2; + event_base_loopexit(arg, NULL); +} + +static int found_multi = 0; +static int found_multi2 = 0; + +static void +http_basic_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *evb = evbuffer_new(); + struct evhttp_connection *evcon; + int empty = evhttp_find_header(evhttp_request_get_input_headers(req), "Empty") != NULL; + + TT_BLATHER(("%s: called\n", __func__)); + evbuffer_add_printf(evb, BASIC_REQUEST_BODY); + + evcon = evhttp_request_get_connection(req); + tt_assert(evhttp_connection_get_server(evcon) == arg); + + { + const struct sockaddr *sa; + char addrbuf[128]; + + sa = evhttp_connection_get_addr(evcon); + tt_assert(sa); + + if (sa->sa_family == AF_INET) { + evutil_format_sockaddr_port_((struct sockaddr *)sa, addrbuf, sizeof(addrbuf)); + tt_assert(!strncmp(addrbuf, "127.0.0.1:", strlen("127.0.0.1:"))); + } else if (sa->sa_family == AF_INET6) { + evutil_format_sockaddr_port_((struct sockaddr *)sa, addrbuf, sizeof(addrbuf)); + tt_assert(!strncmp(addrbuf, "[::1]:", strlen("[::1]:"))); + } else { + tt_fail_msg("Unsupported family"); + } + } + + /* For multi-line headers test */ + { + const char *multi = + evhttp_find_header(evhttp_request_get_input_headers(req),"X-Multi"); + if (multi) { + found_multi = !strcmp(multi,"aaaaaaaa a END"); + if (strcmp("END", multi + strlen(multi) - 3) == 0) + test_ok++; + if (evhttp_find_header(evhttp_request_get_input_headers(req), "X-Last")) + test_ok++; + } + } + { + const char *multi2 = + evhttp_find_header(evhttp_request_get_input_headers(req),"X-Multi-Extra-WS"); + if (multi2) { + found_multi2 = !strcmp(multi2,"libevent 2.1"); + } + } + + + /* injecting a bad content-length */ + if (evhttp_find_header(evhttp_request_get_input_headers(req), "X-Negative")) + evhttp_add_header(evhttp_request_get_output_headers(req), + "Content-Length", "-100"); + + /* allow sending of an empty reply */ + evhttp_send_reply(req, HTTP_OK, "Everything is fine", + !empty ? evb : NULL); + +end: + evbuffer_free(evb); +} + +static void http_timeout_reply_cb(evutil_socket_t fd, short events, void *arg) +{ + struct evhttp_request *req = arg; + evhttp_send_reply(req, HTTP_OK, "Everything is fine", NULL); + test_ok++; +} +static void +http_timeout_cb(struct evhttp_request *req, void *arg) +{ + struct timeval when = { 0, 100 }; + event_base_once(exit_base, -1, EV_TIMEOUT, + http_timeout_reply_cb, req, &when); +} + +static void +http_large_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *evb = evbuffer_new(); + int i; + + for (i = 0; i < 1<<20; ++i) { + evbuffer_add_printf(evb, BASIC_REQUEST_BODY); + } + evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb); + evbuffer_free(evb); +} + +static char const* const CHUNKS[] = { + "This is funny", + "but not hilarious.", + "bwv 1052" +}; + +struct chunk_req_state { + struct event_base *base; + struct evhttp_request *req; + int i; +}; + +static void +http_chunked_trickle_cb(evutil_socket_t fd, short events, void *arg) +{ + struct evbuffer *evb = evbuffer_new(); + struct chunk_req_state *state = arg; + struct timeval when = { 0, 0 }; + + evbuffer_add_printf(evb, "%s", CHUNKS[state->i]); + evhttp_send_reply_chunk(state->req, evb); + evbuffer_free(evb); + + if (++state->i < (int) (sizeof(CHUNKS)/sizeof(CHUNKS[0]))) { + event_base_once(state->base, -1, EV_TIMEOUT, + http_chunked_trickle_cb, state, &when); + } else { + evhttp_send_reply_end(state->req); + free(state); + } +} + +static void +http_chunked_cb(struct evhttp_request *req, void *arg) +{ + struct timeval when = { 0, 0 }; + struct chunk_req_state *state = malloc(sizeof(struct chunk_req_state)); + TT_BLATHER(("%s: called\n", __func__)); + + memset(state, 0, sizeof(struct chunk_req_state)); + state->req = req; + state->base = arg; + + if (strcmp(evhttp_request_get_uri(req), "/streamed") == 0) { + evhttp_add_header(evhttp_request_get_output_headers(req), "Content-Length", "39"); + } + + /* generate a chunked/streamed reply */ + evhttp_send_reply_start(req, HTTP_OK, "Everything is fine"); + + /* but trickle it across several iterations to ensure we're not + * assuming it comes all at once */ + event_base_once(arg, -1, EV_TIMEOUT, http_chunked_trickle_cb, state, &when); +} + +static struct bufferevent * +create_bev(struct event_base *base, evutil_socket_t fd, int ssl_mask) +{ + int flags = BEV_OPT_DEFER_CALLBACKS; + struct bufferevent *bev = NULL; + + if (!ssl_mask) { + bev = bufferevent_socket_new(base, fd, flags); + } else { +#ifdef EVENT__HAVE_OPENSSL + SSL *ssl = SSL_new(get_ssl_ctx()); + if (ssl_mask & HTTP_SSL_FILTER) { + struct bufferevent *underlying = + bufferevent_socket_new(base, fd, flags); + bev = bufferevent_openssl_filter_new( + base, underlying, ssl, BUFFEREVENT_SSL_CONNECTING, flags); + } else { + bev = bufferevent_openssl_socket_new( + base, fd, ssl, BUFFEREVENT_SSL_CONNECTING, flags); + } + bufferevent_openssl_set_allow_dirty_shutdown(bev, 1); +#endif + } + + return bev; +} + +static void +http_half_writecb(struct bufferevent *bev, void *arg) +{ + if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) { + if (!test_ok) { + const char http_request[] = "host\r\n" + "Connection: close\r\n" + "\r\n"; + bufferevent_write(bev, http_request, strlen(http_request)); + } + /* enable reading of the reply */ + bufferevent_enable(bev, EV_READ); + test_ok++; + } +} + +static void +http_basic_test_impl(void *arg, int ssl, const char *request_line) +{ + struct basic_test_data *data = arg; + struct bufferevent *bev = NULL; + evutil_socket_t fd; + const char *http_request; + ev_uint16_t port = 0, port2 = 0; + int server_flags = ssl ? HTTP_BIND_SSL : 0; + struct evhttp *http = http_setup(&port, data->base, server_flags); + struct evbuffer *out; + + exit_base = data->base; + + /* bind to a second socket */ + if (http_bind(http, &port2, server_flags) == -1) { + fprintf(stdout, "FAILED (bind)\n"); + exit(1); + } + + fd = http_connect("127.0.0.1", port); + + /* Stupid thing to send a request */ + bev = create_bev(data->base, fd, ssl); + bufferevent_setcb(bev, http_readcb, http_half_writecb, + http_errorcb, data->base); + out = bufferevent_get_output(bev); + + /* first half of the http request */ + evbuffer_add_printf(out, + "%s\r\n" + "Host: some", request_line); + + test_ok = 0; + event_base_dispatch(data->base); + tt_int_op(test_ok, ==, 3); + + /* connect to the second port */ + bufferevent_free(bev); + evutil_closesocket(fd); + + fd = http_connect("127.0.0.1", port2); + + /* Stupid thing to send a request */ + bev = create_bev(data->base, fd, ssl); + bufferevent_setcb(bev, http_readcb, http_writecb, + http_errorcb, data->base); + out = bufferevent_get_output(bev); + + evbuffer_add_printf(out, + "%s\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n", request_line); + + test_ok = 0; + event_base_dispatch(data->base); + tt_int_op(test_ok, ==, 2); + + /* Connect to the second port again. This time, send an absolute uri. */ + bufferevent_free(bev); + evutil_closesocket(fd); + + fd = http_connect("127.0.0.1", port2); + + /* Stupid thing to send a request */ + bev = create_bev(data->base, fd, ssl); + bufferevent_setcb(bev, http_readcb, http_writecb, + http_errorcb, data->base); + + http_request = + "GET http://somehost.net/test HTTP/1.1\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n"; + + bufferevent_write(bev, http_request, strlen(http_request)); + + test_ok = 0; + event_base_dispatch(data->base); + tt_int_op(test_ok, ==, 2); + + evhttp_free(http); +end: + if (bev) + bufferevent_free(bev); +} +static void http_basic_test(void *arg)\ +{ http_basic_test_impl(arg, 0, "GET /test HTTP/1.1"); } +static void http_basic_trailing_space_test(void *arg) +{ http_basic_test_impl(arg, 0, "GET /test HTTP/1.1 "); } + + +static void +http_delay_reply(evutil_socket_t fd, short what, void *arg) +{ + struct evhttp_request *req = arg; + + evhttp_send_reply(req, HTTP_OK, "Everything is fine", NULL); + + ++test_ok; +} + +static void +http_delay_cb(struct evhttp_request *req, void *arg) +{ + struct timeval tv; + evutil_timerclear(&tv); + tv.tv_sec = 0; + tv.tv_usec = 200 * 1000; + + event_base_once(arg, -1, EV_TIMEOUT, http_delay_reply, req, &tv); +} + +static void +http_badreq_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *buf = evbuffer_new(); + + evhttp_add_header(evhttp_request_get_output_headers(req), "Content-Type", "text/xml; charset=UTF-8"); + evbuffer_add_printf(buf, "Hello, %s!", "127.0.0.1"); + + evhttp_send_reply(req, HTTP_OK, "OK", buf); + evbuffer_free(buf); +} + +static void +http_badreq_errorcb(struct bufferevent *bev, short what, void *arg) +{ + TT_BLATHER(("%s: called (what=%04x, arg=%p)", __func__, what, arg)); + /* ignore */ +} + +static void +http_badreq_readcb(struct bufferevent *bev, void *arg) +{ + const char *what = "Hello, 127.0.0.1"; + const char *bad_request = "400 Bad Request"; + + if (evbuffer_contains(bufferevent_get_input(bev), bad_request)) { + TT_FAIL(("%s:bad request detected", __func__)); + bufferevent_disable(bev, EV_READ); + event_base_loopexit(arg, NULL); + return; + } + + if (evbuffer_contains(bufferevent_get_input(bev), what)) { + struct evhttp_request *req = evhttp_request_new(NULL, NULL); + enum message_read_status done; + + /* req->kind = EVHTTP_RESPONSE; */ + done = evhttp_parse_firstline_(req, bufferevent_get_input(bev)); + if (done != ALL_DATA_READ) + goto out; + + done = evhttp_parse_headers_(req, bufferevent_get_input(bev)); + if (done != ALL_DATA_READ) + goto out; + + if (done == 1 && + evhttp_find_header(evhttp_request_get_input_headers(req), + "Content-Type") != NULL) + test_ok++; + + out: + evhttp_request_free(req); + evbuffer_drain(bufferevent_get_input(bev), evbuffer_get_length(bufferevent_get_input(bev))); + } + + shutdown(bufferevent_getfd(bev), EVUTIL_SHUT_WR); +} + +static void +http_badreq_successcb(evutil_socket_t fd, short what, void *arg) +{ + TT_BLATHER(("%s: called (what=%04x, arg=%p)", __func__, what, arg)); + event_base_loopexit(exit_base, NULL); +} + +static void +http_bad_request_test(void *arg) +{ + struct basic_test_data *data = arg; + struct timeval tv; + struct bufferevent *bev = NULL; + evutil_socket_t fd = EVUTIL_INVALID_SOCKET; + const char *http_request; + ev_uint16_t port=0, port2=0; + struct evhttp *http = http_setup(&port, data->base, 0); + + test_ok = 0; + exit_base = data->base; + + /* bind to a second socket */ + if (http_bind(http, &port2, 0) == -1) + TT_DIE(("Bind socket failed")); + + /* NULL request test */ + fd = http_connect("127.0.0.1", port); + tt_assert(fd != EVUTIL_INVALID_SOCKET); + + /* Stupid thing to send a request */ + bev = bufferevent_socket_new(data->base, fd, 0); + bufferevent_setcb(bev, http_badreq_readcb, http_writecb, + http_badreq_errorcb, data->base); + bufferevent_enable(bev, EV_READ); + + /* real NULL request */ + http_request = ""; + + bufferevent_write(bev, http_request, strlen(http_request)); + + shutdown(fd, EVUTIL_SHUT_WR); + timerclear(&tv); + tv.tv_usec = 10000; + event_base_once(data->base, -1, EV_TIMEOUT, http_badreq_successcb, bev, &tv); + + event_base_dispatch(data->base); + + bufferevent_free(bev); + evutil_closesocket(fd); + + if (test_ok != 0) { + fprintf(stdout, "FAILED\n"); + exit(1); + } + + /* Second answer (BAD REQUEST) on connection close */ + + /* connect to the second port */ + fd = http_connect("127.0.0.1", port2); + tt_assert(fd != EVUTIL_INVALID_SOCKET); + + /* Stupid thing to send a request */ + bev = bufferevent_socket_new(data->base, fd, 0); + bufferevent_setcb(bev, http_badreq_readcb, http_writecb, + http_badreq_errorcb, data->base); + bufferevent_enable(bev, EV_READ); + + /* first half of the http request */ + http_request = + "GET /badrequest HTTP/1.0\r\n" \ + "Connection: Keep-Alive\r\n" \ + "\r\n"; + + bufferevent_write(bev, http_request, strlen(http_request)); + + timerclear(&tv); + tv.tv_usec = 10000; + event_base_once(data->base, -1, EV_TIMEOUT, http_badreq_successcb, bev, &tv); + + event_base_dispatch(data->base); + + tt_int_op(test_ok, ==, 2); + +end: + evhttp_free(http); + if (bev) + bufferevent_free(bev); + if (fd >= 0) + evutil_closesocket(fd); +} + +static struct evhttp_connection *delayed_client; + +static void +http_large_delay_cb(struct evhttp_request *req, void *arg) +{ + struct timeval tv; + evutil_timerclear(&tv); + tv.tv_usec = 500000; + + event_base_once(arg, -1, EV_TIMEOUT, http_delay_reply, req, &tv); + evhttp_connection_fail_(delayed_client, EVREQ_HTTP_EOF); +} + +/* + * HTTP DELETE test, just piggyback on the basic test + */ + +static void +http_delete_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *evb = evbuffer_new(); + int empty = evhttp_find_header(evhttp_request_get_input_headers(req), "Empty") != NULL; + + /* Expecting a DELETE request */ + if (evhttp_request_get_command(req) != EVHTTP_REQ_DELETE) { + fprintf(stdout, "FAILED (delete type)\n"); + exit(1); + } + + TT_BLATHER(("%s: called\n", __func__)); + evbuffer_add_printf(evb, BASIC_REQUEST_BODY); + + /* allow sending of an empty reply */ + evhttp_send_reply(req, HTTP_OK, "Everything is fine", + !empty ? evb : NULL); + + evbuffer_free(evb); +} + +static void +http_delete_test(void *arg) +{ + struct basic_test_data *data = arg; + struct bufferevent *bev; + evutil_socket_t fd = EVUTIL_INVALID_SOCKET; + const char *http_request; + ev_uint16_t port = 0; + struct evhttp *http = http_setup(&port, data->base, 0); + + exit_base = data->base; + test_ok = 0; + + tt_assert(http); + fd = http_connect("127.0.0.1", port); + tt_assert(fd != EVUTIL_INVALID_SOCKET); + + /* Stupid thing to send a request */ + bev = bufferevent_socket_new(data->base, fd, 0); + bufferevent_setcb(bev, http_readcb, http_writecb, + http_errorcb, data->base); + + http_request = + "DELETE /deleteit HTTP/1.1\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n"; + + bufferevent_write(bev, http_request, strlen(http_request)); + + event_base_dispatch(data->base); + + bufferevent_free(bev); + evutil_closesocket(fd); + fd = EVUTIL_INVALID_SOCKET; + + evhttp_free(http); + + tt_int_op(test_ok, ==, 2); + end: + if (fd >= 0) + evutil_closesocket(fd); +} + +static void +http_sent_cb(struct evhttp_request *req, void *arg) +{ + ev_uintptr_t val = (ev_uintptr_t)arg; + struct evbuffer *b; + + if (val != 0xDEADBEEF) { + fprintf(stdout, "FAILED on_complete_cb argument\n"); + exit(1); + } + + b = evhttp_request_get_output_buffer(req); + if (evbuffer_get_length(b) != 0) { + fprintf(stdout, "FAILED on_complete_cb output buffer not written\n"); + exit(1); + } + + TT_BLATHER(("%s: called\n", __func__)); + + ++test_ok; +} + +static void +http_on_complete_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *evb = evbuffer_new(); + + evhttp_request_set_on_complete_cb(req, http_sent_cb, (void *)0xDEADBEEF); + + TT_BLATHER(("%s: called\n", __func__)); + evbuffer_add_printf(evb, BASIC_REQUEST_BODY); + + /* allow sending of an empty reply */ + evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb); + + evbuffer_free(evb); + + ++test_ok; +} + +static void +http_on_complete_test(void *arg) +{ + struct basic_test_data *data = arg; + struct bufferevent *bev; + evutil_socket_t fd = EVUTIL_INVALID_SOCKET; + const char *http_request; + ev_uint16_t port = 0; + struct evhttp *http = http_setup(&port, data->base, 0); + + exit_base = data->base; + test_ok = 0; + + fd = http_connect("127.0.0.1", port); + tt_assert(fd != EVUTIL_INVALID_SOCKET); + + /* Stupid thing to send a request */ + bev = bufferevent_socket_new(data->base, fd, 0); + bufferevent_setcb(bev, http_readcb, http_writecb, + http_errorcb, data->base); + + http_request = + "GET /oncomplete HTTP/1.1\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n"; + + bufferevent_write(bev, http_request, strlen(http_request)); + + event_base_dispatch(data->base); + + bufferevent_free(bev); + + evhttp_free(http); + + tt_int_op(test_ok, ==, 4); + end: + if (fd >= 0) + evutil_closesocket(fd); +} + +static void +http_allowed_methods_eventcb(struct bufferevent *bev, short what, void *arg) +{ + char **output = arg; + if ((what & (BEV_EVENT_ERROR|BEV_EVENT_EOF))) { + char buf[4096]; + int n; + n = evbuffer_remove(bufferevent_get_input(bev), buf, + sizeof(buf)-1); + if (n >= 0) { + buf[n]='\0'; + if (*output) + free(*output); + *output = strdup(buf); + } + event_base_loopexit(exit_base, NULL); + } +} + +static void +http_allowed_methods_test(void *arg) +{ + struct basic_test_data *data = arg; + struct bufferevent *bev1, *bev2, *bev3; + evutil_socket_t fd1=-1, fd2=-1, fd3=-1; + const char *http_request; + char *result1=NULL, *result2=NULL, *result3=NULL; + ev_uint16_t port = 0; + struct evhttp *http = http_setup(&port, data->base, 0); + + exit_base = data->base; + test_ok = 0; + + fd1 = http_connect("127.0.0.1", port); + tt_assert(fd1 != EVUTIL_INVALID_SOCKET); + + /* GET is out; PATCH is in. */ + evhttp_set_allowed_methods(http, EVHTTP_REQ_PATCH); + + /* Stupid thing to send a request */ + bev1 = bufferevent_socket_new(data->base, fd1, 0); + bufferevent_enable(bev1, EV_READ|EV_WRITE); + bufferevent_setcb(bev1, NULL, NULL, + http_allowed_methods_eventcb, &result1); + + http_request = + "GET /index.html HTTP/1.1\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n"; + + bufferevent_write(bev1, http_request, strlen(http_request)); + + event_base_dispatch(data->base); + + fd2 = http_connect("127.0.0.1", port); + tt_assert(fd2 != EVUTIL_INVALID_SOCKET); + + bev2 = bufferevent_socket_new(data->base, fd2, 0); + bufferevent_enable(bev2, EV_READ|EV_WRITE); + bufferevent_setcb(bev2, NULL, NULL, + http_allowed_methods_eventcb, &result2); + + http_request = + "PATCH /test HTTP/1.1\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n"; + + bufferevent_write(bev2, http_request, strlen(http_request)); + + event_base_dispatch(data->base); + + fd3 = http_connect("127.0.0.1", port); + tt_assert(fd3 != EVUTIL_INVALID_SOCKET); + + bev3 = bufferevent_socket_new(data->base, fd3, 0); + bufferevent_enable(bev3, EV_READ|EV_WRITE); + bufferevent_setcb(bev3, NULL, NULL, + http_allowed_methods_eventcb, &result3); + + http_request = + "FLOOP /test HTTP/1.1\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n"; + + bufferevent_write(bev3, http_request, strlen(http_request)); + + event_base_dispatch(data->base); + + bufferevent_free(bev1); + bufferevent_free(bev2); + bufferevent_free(bev3); + + evhttp_free(http); + + /* Method known but disallowed */ + tt_assert(result1); + tt_assert(!strncmp(result1, "HTTP/1.1 501 ", strlen("HTTP/1.1 501 "))); + + /* Method known and allowed */ + tt_assert(result2); + tt_assert(!strncmp(result2, "HTTP/1.1 200 ", strlen("HTTP/1.1 200 "))); + + /* Method unknown */ + tt_assert(result3); + tt_assert(!strncmp(result3, "HTTP/1.1 501 ", strlen("HTTP/1.1 501 "))); + + end: + if (result1) + free(result1); + if (result2) + free(result2); + if (result3) + free(result3); + if (fd1 >= 0) + evutil_closesocket(fd1); + if (fd2 >= 0) + evutil_closesocket(fd2); + if (fd3 >= 0) + evutil_closesocket(fd3); +} + +static void http_request_no_action_done(struct evhttp_request *, void *); +static void http_request_done(struct evhttp_request *, void *); +static void http_request_empty_done(struct evhttp_request *, void *); + +static void +http_connection_test_(struct basic_test_data *data, int persistent, + const char *address, struct evdns_base *dnsbase, int ipv6, int family, + int ssl) +{ + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct evhttp *http; + + int mask = 0; + if (ipv6) + mask |= HTTP_BIND_IPV6; + if (ssl) + mask |= HTTP_BIND_SSL; + + http = http_setup(&port, data->base, mask); + + test_ok = 0; + if (!http && ipv6) { + tt_skip(); + } + tt_assert(http); + + if (ssl) { +#ifdef EVENT__HAVE_OPENSSL + SSL *ssl = SSL_new(get_ssl_ctx()); + struct bufferevent *bev = bufferevent_openssl_socket_new( + data->base, -1, ssl, + BUFFEREVENT_SSL_CONNECTING, BEV_OPT_DEFER_CALLBACKS); + bufferevent_openssl_set_allow_dirty_shutdown(bev, 1); + + evcon = evhttp_connection_base_bufferevent_new(data->base, dnsbase, bev, address, port); +#else + tt_skip(); +#endif + } else { + evcon = evhttp_connection_base_new(data->base, dnsbase, address, port); + } + tt_assert(evcon); + evhttp_connection_set_family(evcon, family); + + tt_assert(evhttp_connection_get_base(evcon) == data->base); + + exit_base = data->base; + + tt_assert(evhttp_connection_get_server(evcon) == NULL); + + /* + * At this point, we want to schedule a request to the HTTP + * server using our make request method. + */ + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) { + fprintf(stdout, "FAILED\n"); + exit(1); + } + + event_base_dispatch(data->base); + + tt_assert(test_ok); + + /* try to make another request over the same connection */ + test_ok = 0; + + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* + * if our connections are not supposed to be persistent; request + * a close from the server. + */ + if (!persistent) + evhttp_add_header(evhttp_request_get_output_headers(req), "Connection", "close"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) { + tt_abort_msg("couldn't make request"); + } + + event_base_dispatch(data->base); + + /* make another request: request empty reply */ + test_ok = 0; + + req = evhttp_request_new(http_request_empty_done, data->base); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Empty", "itis"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); +} + +static void +http_connection_test(void *arg) +{ + http_connection_test_(arg, 0, "127.0.0.1", NULL, 0, AF_UNSPEC, 0); +} +static void +http_persist_connection_test(void *arg) +{ + http_connection_test_(arg, 1, "127.0.0.1", NULL, 0, AF_UNSPEC, 0); +} + +static struct regress_dns_server_table search_table[] = { + { "localhost", "A", "127.0.0.1", 0, 0 }, + { NULL, NULL, NULL, 0, 0 } +}; + +static void +http_connection_async_test(void *arg) +{ + struct basic_test_data *data = arg; + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct evdns_base *dns_base = NULL; + ev_uint16_t portnum = 0; + char address[64]; + struct evhttp *http = http_setup(&port, data->base, 0); + + exit_base = data->base; + tt_assert(regress_dnsserver(data->base, &portnum, search_table)); + + dns_base = evdns_base_new(data->base, 0/* init name servers */); + tt_assert(dns_base); + + /* Add ourself as the only nameserver, and make sure we really are + * the only nameserver. */ + evutil_snprintf(address, sizeof(address), "127.0.0.1:%d", portnum); + evdns_base_nameserver_ip_add(dns_base, address); + + test_ok = 0; + + evcon = evhttp_connection_base_new(data->base, dns_base, "127.0.0.1", port); + tt_assert(evcon); + + /* + * At this point, we want to schedule a request to the HTTP + * server using our make request method. + */ + + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) { + fprintf(stdout, "FAILED\n"); + exit(1); + } + + event_base_dispatch(data->base); + + tt_assert(test_ok); + + /* try to make another request over the same connection */ + test_ok = 0; + + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* + * if our connections are not supposed to be persistent; request + * a close from the server. + */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Connection", "close"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) { + tt_abort_msg("couldn't make request"); + } + + event_base_dispatch(data->base); + + /* make another request: request empty reply */ + test_ok = 0; + + req = evhttp_request_new(http_request_empty_done, data->base); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Empty", "itis"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); + if (dns_base) + evdns_base_free(dns_base, 0); + regress_clean_dnsserver(); +} + +static void +http_autofree_connection_test(void *arg) +{ + struct basic_test_data *data = arg; + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req[2] = { NULL }; + struct evhttp *http = http_setup(&port, data->base, 0); + + test_ok = 0; + + evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port); + tt_assert(evcon); + + /* + * At this point, we want to schedule two request to the HTTP + * server using our make request method. + */ + req[0] = evhttp_request_new(http_request_empty_done, data->base); + req[1] = evhttp_request_new(http_request_empty_done, data->base); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req[0]), "Host", "somehost"); + evhttp_add_header(evhttp_request_get_output_headers(req[0]), "Connection", "close"); + evhttp_add_header(evhttp_request_get_output_headers(req[0]), "Empty", "itis"); + evhttp_add_header(evhttp_request_get_output_headers(req[1]), "Host", "somehost"); + evhttp_add_header(evhttp_request_get_output_headers(req[1]), "Connection", "close"); + evhttp_add_header(evhttp_request_get_output_headers(req[1]), "Empty", "itis"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req[0], EVHTTP_REQ_GET, "/test") == -1) { + tt_abort_msg("couldn't make request"); + } + if (evhttp_make_request(evcon, req[1], EVHTTP_REQ_GET, "/test") == -1) { + tt_abort_msg("couldn't make request"); + } + + /* + * Tell libevent to free the connection when the request completes + * We then set the evcon pointer to NULL since we don't want to free it + * when this function ends. + */ + evhttp_connection_free_on_completion(evcon); + evcon = NULL; + + event_base_dispatch(data->base); + + /* at this point, the http server should have no connection */ + tt_assert(TAILQ_FIRST(&http->connections) == NULL); + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); +} + +static void +http_request_never_call(struct evhttp_request *req, void *arg) +{ + fprintf(stdout, "FAILED\n"); + exit(1); +} +static void +http_failed_request_done(struct evhttp_request *req, void *arg) +{ + tt_assert(!req); +end: + event_base_loopexit(arg, NULL); +} +#ifndef _WIN32 +static void +http_timed_out_request_done(struct evhttp_request *req, void *arg) +{ + tt_assert(req); + tt_int_op(evhttp_request_get_response_code(req), !=, HTTP_OK); +end: + event_base_loopexit(arg, NULL); +} +#endif + +static void +http_request_error_cb_with_cancel(enum evhttp_request_error error, void *arg) +{ + if (error != EVREQ_HTTP_REQUEST_CANCEL) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + test_ok = 1; + + { + struct timeval tv; + evutil_timerclear(&tv); + tv.tv_sec = 0; + tv.tv_usec = 500 * 1000; + event_base_loopexit(exit_base, &tv); + } +} +static void +http_do_cancel(evutil_socket_t fd, short what, void *arg) +{ + struct evhttp_request *req = arg; + evhttp_cancel_request(req); + ++test_ok; +} +static void +http_no_write(struct evbuffer *buffer, const struct evbuffer_cb_info *info, void *arg) +{ + fprintf(stdout, "FAILED\n"); + exit(1); +} +static void +http_free_evcons(struct evhttp_connection **evcons) +{ + struct evhttp_connection *evcon, **orig = evcons; + + if (!evcons) + return; + + while ((evcon = *evcons++)) { + evhttp_connection_free(evcon); + } + free(orig); +} +/** fill the backlog to force server drop packages for timeouts */ +static struct evhttp_connection ** +http_fill_backlog(struct event_base *base, int port) +{ +#define BACKLOG_SIZE 256 + struct evhttp_connection **evcon = malloc(sizeof(*evcon) * (BACKLOG_SIZE + 1)); + int i; + + for (i = 0; i < BACKLOG_SIZE; ++i) { + struct evhttp_request *req; + + evcon[i] = evhttp_connection_base_new(base, NULL, "127.0.0.1", port); + tt_assert(evcon[i]); + evhttp_connection_set_timeout(evcon[i], 5); + + req = evhttp_request_new(http_request_never_call, NULL); + tt_assert(req); + tt_int_op(evhttp_make_request(evcon[i], req, EVHTTP_REQ_GET, "/delay"), !=, -1); + } + evcon[i] = NULL; + + return evcon; + end: + fprintf(stderr, "Couldn't fill the backlog"); + return NULL; +} + +enum http_cancel_test_type { + BASIC = 1, + BY_HOST = 2, + NO_NS = 4, + INACTIVE_SERVER = 8, + SERVER_TIMEOUT = 16, + NS_TIMEOUT = 32, +}; +static struct evhttp_request * +http_cancel_test_bad_request_new(enum http_cancel_test_type type, + struct event_base *base) +{ +#ifndef _WIN32 + if (!(type & NO_NS) && (type & SERVER_TIMEOUT)) + return evhttp_request_new(http_timed_out_request_done, base); + else +#endif + if ((type & INACTIVE_SERVER) || (type & NO_NS)) + return evhttp_request_new(http_failed_request_done, base); + else + return NULL; +} +static void +http_cancel_test(void *arg) +{ + struct basic_test_data *data = arg; + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct bufferevent *bufev = NULL; + struct timeval tv; + struct evdns_base *dns_base = NULL; + ev_uint16_t portnum = 0; + char address[64]; + struct evhttp *inactive_http = NULL; + struct event_base *inactive_base = NULL; + struct evhttp_connection **evcons = NULL; + struct event_base *base_to_fill = data->base; + + enum http_cancel_test_type type = + (enum http_cancel_test_type)data->setup_data; + struct evhttp *http = http_setup(&port, data->base, 0); + + if (type & BY_HOST) { + const char *timeout = (type & NS_TIMEOUT) ? "6" : "3"; + + tt_assert(regress_dnsserver(data->base, &portnum, search_table)); + + dns_base = evdns_base_new(data->base, 0/* init name servers */); + tt_assert(dns_base); + + /** XXX: Hack the port to make timeout after resolving */ + if (type & NO_NS) + ++portnum; + + evutil_snprintf(address, sizeof(address), "127.0.0.1:%d", portnum); + evdns_base_nameserver_ip_add(dns_base, address); + + evdns_base_set_option(dns_base, "timeout:", timeout); + evdns_base_set_option(dns_base, "initial-probe-timeout:", timeout); + evdns_base_set_option(dns_base, "attempts:", "1"); + } + + exit_base = data->base; + + test_ok = 0; + + if (type & INACTIVE_SERVER) { + port = 0; + inactive_base = event_base_new(); + inactive_http = http_setup(&port, inactive_base, 0); + + base_to_fill = inactive_base; + } + + if (type & SERVER_TIMEOUT) + evcons = http_fill_backlog(base_to_fill, port); + + evcon = evhttp_connection_base_new( + data->base, dns_base, + type & BY_HOST ? "localhost" : "127.0.0.1", + port); + if (type & INACTIVE_SERVER) + evhttp_connection_set_timeout(evcon, 5); + tt_assert(evcon); + + bufev = evhttp_connection_get_bufferevent(evcon); + /* Guarantee that we stack in connect() not after waiting EV_READ after + * write() */ + if (type & SERVER_TIMEOUT) + evbuffer_add_cb(bufferevent_get_output(bufev), http_no_write, NULL); + + /* + * At this point, we want to schedule a request to the HTTP + * server using our make request method. + */ + + req = evhttp_request_new(http_request_never_call, NULL); + evhttp_request_set_error_cb(req, http_request_error_cb_with_cancel); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* We give ownership of the request to the connection */ + tt_int_op(evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/delay"), + !=, -1); + + evutil_timerclear(&tv); + tv.tv_sec = 0; + tv.tv_usec = 100 * 1000; + + event_base_once(data->base, -1, EV_TIMEOUT, http_do_cancel, req, &tv); + + event_base_dispatch(data->base); + + if (type & NO_NS || type & INACTIVE_SERVER) + tt_int_op(test_ok, ==, 2); /** no servers responses */ + else + tt_int_op(test_ok, ==, 3); + + /* try to make another request over the same connection */ + test_ok = 0; + + http_free_evcons(evcons); + if (type & SERVER_TIMEOUT) + evcons = http_fill_backlog(base_to_fill, port); + + req = http_cancel_test_bad_request_new(type, data->base); + if (!req) + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* We give ownership of the request to the connection */ + tt_int_op(evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test"), + !=, -1); + + event_base_dispatch(data->base); + + /* make another request: request empty reply */ + test_ok = 0; + + http_free_evcons(evcons); + if (type & SERVER_TIMEOUT) + evcons = http_fill_backlog(base_to_fill, port); + + req = http_cancel_test_bad_request_new(type, data->base); + if (!req) + req = evhttp_request_new(http_request_empty_done, data->base); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Empty", "itis"); + + /* We give ownership of the request to the connection */ + tt_int_op(evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test"), + !=, -1); + + event_base_dispatch(data->base); + + end: + http_free_evcons(evcons); + if (bufev) + evbuffer_remove_cb(bufferevent_get_output(bufev), http_no_write, NULL); + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); + if (dns_base) + evdns_base_free(dns_base, 0); + regress_clean_dnsserver(); + if (inactive_http) + evhttp_free(inactive_http); + if (inactive_base) + event_base_free(inactive_base); +} + +static void +http_request_no_action_done(struct evhttp_request *req, void *arg) +{ + EVUTIL_ASSERT(exit_base); + event_base_loopexit(exit_base, NULL); +} + +static void +http_request_done(struct evhttp_request *req, void *arg) +{ + const char *what = arg; + + if (!req) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evhttp_request_get_response_code(req) != HTTP_OK) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") == NULL) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(what)) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), what) != 0) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + test_ok = 1; + EVUTIL_ASSERT(exit_base); + event_base_loopexit(exit_base, NULL); +} + +static void +http_request_expect_error(struct evhttp_request *req, void *arg) +{ + if (evhttp_request_get_response_code(req) == HTTP_OK) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + test_ok = 1; + EVUTIL_ASSERT(arg); + event_base_loopexit(arg, NULL); +} + +/* test virtual hosts */ +static void +http_virtual_host_test(void *arg) +{ + struct basic_test_data *data = arg; + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct evhttp *second = NULL, *third = NULL; + evutil_socket_t fd; + struct bufferevent *bev; + const char *http_request; + struct evhttp *http = http_setup(&port, data->base, 0); + + exit_base = data->base; + + /* virtual host */ + second = evhttp_new(NULL); + evhttp_set_cb(second, "/funnybunny", http_basic_cb, http); + third = evhttp_new(NULL); + evhttp_set_cb(third, "/blackcoffee", http_basic_cb, http); + + if (evhttp_add_virtual_host(http, "foo.com", second) == -1) { + tt_abort_msg("Couldn't add vhost"); + } + + if (evhttp_add_virtual_host(http, "bar.*.foo.com", third) == -1) { + tt_abort_msg("Couldn't add wildcarded vhost"); + } + + /* add some aliases to the vhosts */ + tt_assert(evhttp_add_server_alias(second, "manolito.info") == 0); + tt_assert(evhttp_add_server_alias(third, "bonkers.org") == 0); + + evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port); + tt_assert(evcon); + + /* make a request with a different host and expect an error */ + req = evhttp_request_new(http_request_expect_error, data->base); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, + "/funnybunny") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + tt_assert(test_ok == 1); + + test_ok = 0; + + /* make a request with the right host and expect a response */ + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "foo.com"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, + "/funnybunny") == -1) { + fprintf(stdout, "FAILED\n"); + exit(1); + } + + event_base_dispatch(data->base); + + tt_assert(test_ok == 1); + + test_ok = 0; + + /* make a request with the right host and expect a response */ + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "bar.magic.foo.com"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, + "/blackcoffee") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + tt_assert(test_ok == 1) + + test_ok = 0; + + /* make a request with the right host and expect a response */ + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "manolito.info"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, + "/funnybunny") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + tt_assert(test_ok == 1) + + test_ok = 0; + + /* make a request with the right host and expect a response */ + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + + /* Add the Host header. This time with the optional port. */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "bonkers.org:8000"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, + "/blackcoffee") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + tt_assert(test_ok == 1) + + test_ok = 0; + + /* Now make a raw request with an absolute URI. */ + fd = http_connect("127.0.0.1", port); + tt_assert(fd != EVUTIL_INVALID_SOCKET); + + /* Stupid thing to send a request */ + bev = bufferevent_socket_new(data->base, fd, 0); + bufferevent_setcb(bev, http_readcb, http_writecb, + http_errorcb, NULL); + + /* The host in the URI should override the Host: header */ + http_request = + "GET http://manolito.info/funnybunny HTTP/1.1\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n"; + + bufferevent_write(bev, http_request, strlen(http_request)); + + event_base_dispatch(data->base); + + tt_int_op(test_ok, ==, 2); + + bufferevent_free(bev); + evutil_closesocket(fd); + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); +} + + +/* test date header and content length */ + +static void +http_request_empty_done(struct evhttp_request *req, void *arg) +{ + if (!req) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evhttp_request_get_response_code(req) != HTTP_OK) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evhttp_find_header(evhttp_request_get_input_headers(req), "Date") == NULL) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + + if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Length") == NULL) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (strcmp(evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Length"), + "0")) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != 0) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + test_ok = 1; + EVUTIL_ASSERT(arg); + event_base_loopexit(arg, NULL); +} + +/* + * HTTP DISPATCHER test + */ + +void +http_dispatcher_cb(struct evhttp_request *req, void *arg) +{ + + struct evbuffer *evb = evbuffer_new(); + TT_BLATHER(("%s: called\n", __func__)); + evbuffer_add_printf(evb, "DISPATCHER_TEST"); + + evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb); + + evbuffer_free(evb); +} + +static void +http_dispatcher_test_done(struct evhttp_request *req, void *arg) +{ + struct event_base *base = arg; + const char *what = "DISPATCHER_TEST"; + + if (!req) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evhttp_request_get_response_code(req) != HTTP_OK) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") == NULL) { + fprintf(stderr, "FAILED (content type)\n"); + exit(1); + } + + if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(what)) { + fprintf(stderr, "FAILED (length %lu vs %lu)\n", + (unsigned long)evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long)strlen(what)); + exit(1); + } + + if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), what) != 0) { + fprintf(stderr, "FAILED (data)\n"); + exit(1); + } + + test_ok = 1; + event_base_loopexit(base, NULL); +} + +static void +http_dispatcher_test(void *arg) +{ + struct basic_test_data *data = arg; + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct evhttp *http = http_setup(&port, data->base, 0); + + test_ok = 0; + + evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port); + tt_assert(evcon); + + /* also bind to local host */ + evhttp_connection_set_local_address(evcon, "127.0.0.1"); + + /* + * At this point, we want to schedule an HTTP GET request + * server using our make request method. + */ + + req = evhttp_request_new(http_dispatcher_test_done, data->base); + tt_assert(req); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/?arg=val") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); +} + +/* + * HTTP POST test. + */ + +void http_postrequest_done(struct evhttp_request *, void *); + +#define POST_DATA "Okay. Not really printf" + +static void +http_post_test(void *arg) +{ + struct basic_test_data *data = arg; + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct evhttp *http = http_setup(&port, data->base, 0); + + test_ok = 0; + + evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port); + tt_assert(evcon); + + /* + * At this point, we want to schedule an HTTP POST request + * server using our make request method. + */ + + req = evhttp_request_new(http_postrequest_done, data->base); + tt_assert(req); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + evbuffer_add_printf(evhttp_request_get_output_buffer(req), POST_DATA); + + if (evhttp_make_request(evcon, req, EVHTTP_REQ_POST, "/postit") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + tt_int_op(test_ok, ==, 1); + + test_ok = 0; + + req = evhttp_request_new(http_postrequest_done, data->base); + tt_assert(req); + + /* Now try with 100-continue. */ + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + evhttp_add_header(evhttp_request_get_output_headers(req), "Expect", "100-continue"); + evbuffer_add_printf(evhttp_request_get_output_buffer(req), POST_DATA); + + if (evhttp_make_request(evcon, req, EVHTTP_REQ_POST, "/postit") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + tt_int_op(test_ok, ==, 1); + + evhttp_connection_free(evcon); + evhttp_free(http); + + end: + ; +} + +void +http_post_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *evb; + TT_BLATHER(("%s: called\n", __func__)); + + /* Yes, we are expecting a post request */ + if (evhttp_request_get_command(req) != EVHTTP_REQ_POST) { + fprintf(stdout, "FAILED (post type)\n"); + exit(1); + } + + if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(POST_DATA)) { + fprintf(stdout, "FAILED (length: %lu vs %lu)\n", + (unsigned long) evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long) strlen(POST_DATA)); + exit(1); + } + + if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), POST_DATA) != 0) { + fprintf(stdout, "FAILED (data)\n"); + fprintf(stdout, "Got :%s\n", evbuffer_pullup(evhttp_request_get_input_buffer(req),-1)); + fprintf(stdout, "Want:%s\n", POST_DATA); + exit(1); + } + + evb = evbuffer_new(); + evbuffer_add_printf(evb, BASIC_REQUEST_BODY); + + evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb); + + evbuffer_free(evb); +} + +void +http_postrequest_done(struct evhttp_request *req, void *arg) +{ + const char *what = BASIC_REQUEST_BODY; + struct event_base *base = arg; + + if (req == NULL) { + fprintf(stderr, "FAILED (timeout)\n"); + exit(1); + } + + if (evhttp_request_get_response_code(req) != HTTP_OK) { + + fprintf(stderr, "FAILED (response code)\n"); + exit(1); + } + + if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") == NULL) { + fprintf(stderr, "FAILED (content type)\n"); + exit(1); + } + + if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(what)) { + fprintf(stderr, "FAILED (length %lu vs %lu)\n", + (unsigned long)evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long)strlen(what)); + exit(1); + } + + if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), what) != 0) { + fprintf(stderr, "FAILED (data)\n"); + exit(1); + } + + test_ok = 1; + event_base_loopexit(base, NULL); +} + +/* + * HTTP PUT test, basically just like POST, but ... + */ + +void http_putrequest_done(struct evhttp_request *, void *); + +#define PUT_DATA "Hi, I'm some PUT data" + +static void +http_put_test(void *arg) +{ + struct basic_test_data *data = arg; + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct evhttp *http = http_setup(&port, data->base, 0); + + test_ok = 0; + + evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port); + tt_assert(evcon); + + /* + * Schedule the HTTP PUT request + */ + + req = evhttp_request_new(http_putrequest_done, data->base); + tt_assert(req); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "someotherhost"); + evbuffer_add_printf(evhttp_request_get_output_buffer(req), PUT_DATA); + + if (evhttp_make_request(evcon, req, EVHTTP_REQ_PUT, "/putit") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + evhttp_connection_free(evcon); + evhttp_free(http); + + tt_int_op(test_ok, ==, 1); + end: + ; +} + +void +http_put_cb(struct evhttp_request *req, void *arg) +{ + struct evbuffer *evb; + TT_BLATHER(("%s: called\n", __func__)); + + /* Expecting a PUT request */ + if (evhttp_request_get_command(req) != EVHTTP_REQ_PUT) { + fprintf(stdout, "FAILED (put type)\n"); + exit(1); + } + + if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(PUT_DATA)) { + fprintf(stdout, "FAILED (length: %lu vs %lu)\n", + (unsigned long)evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long)strlen(PUT_DATA)); + exit(1); + } + + if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), PUT_DATA) != 0) { + fprintf(stdout, "FAILED (data)\n"); + fprintf(stdout, "Got :%s\n", evbuffer_pullup(evhttp_request_get_input_buffer(req),-1)); + fprintf(stdout, "Want:%s\n", PUT_DATA); + exit(1); + } + + evb = evbuffer_new(); + evbuffer_add_printf(evb, "That ain't funny"); + + evhttp_send_reply(req, HTTP_OK, "Everything is great", evb); + + evbuffer_free(evb); +} + +void +http_putrequest_done(struct evhttp_request *req, void *arg) +{ + struct event_base *base = arg; + const char *what = "That ain't funny"; + + if (req == NULL) { + fprintf(stderr, "FAILED (timeout)\n"); + exit(1); + } + + if (evhttp_request_get_response_code(req) != HTTP_OK) { + + fprintf(stderr, "FAILED (response code)\n"); + exit(1); + } + + if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") == NULL) { + fprintf(stderr, "FAILED (content type)\n"); + exit(1); + } + + if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(what)) { + fprintf(stderr, "FAILED (length %lu vs %lu)\n", + (unsigned long)evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long)strlen(what)); + exit(1); + } + + + if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), what) != 0) { + fprintf(stderr, "FAILED (data)\n"); + exit(1); + } + + test_ok = 1; + event_base_loopexit(base, NULL); +} + +static void +http_failure_readcb(struct bufferevent *bev, void *arg) +{ + const char *what = "400 Bad Request"; + if (evbuffer_contains(bufferevent_get_input(bev), what)) { + test_ok = 2; + bufferevent_disable(bev, EV_READ); + event_base_loopexit(arg, NULL); + } +} + +/* + * Testing that the HTTP server can deal with a malformed request. + */ +static void +http_failure_test(void *arg) +{ + struct basic_test_data *data = arg; + struct bufferevent *bev; + evutil_socket_t fd = EVUTIL_INVALID_SOCKET; + const char *http_request; + ev_uint16_t port = 0; + struct evhttp *http = http_setup(&port, data->base, 0); + + test_ok = 0; + + fd = http_connect("127.0.0.1", port); + tt_assert(fd != EVUTIL_INVALID_SOCKET); + + /* Stupid thing to send a request */ + bev = bufferevent_socket_new(data->base, fd, 0); + bufferevent_setcb(bev, http_failure_readcb, http_writecb, + http_errorcb, data->base); + + http_request = "illegal request\r\n"; + + bufferevent_write(bev, http_request, strlen(http_request)); + + event_base_dispatch(data->base); + + bufferevent_free(bev); + + evhttp_free(http); + + tt_int_op(test_ok, ==, 2); + end: + if (fd >= 0) + evutil_closesocket(fd); +} + +static void +close_detect_done(struct evhttp_request *req, void *arg) +{ + struct timeval tv; + tt_assert(req); + tt_assert(evhttp_request_get_response_code(req) == HTTP_OK); + + test_ok = 1; + + end: + evutil_timerclear(&tv); + tv.tv_usec = 150000; + event_base_loopexit(arg, &tv); +} + +static void +close_detect_launch(evutil_socket_t fd, short what, void *arg) +{ + struct evhttp_connection *evcon = arg; + struct event_base *base = evhttp_connection_get_base(evcon); + struct evhttp_request *req; + + req = evhttp_request_new(close_detect_done, base); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) { + tt_fail_msg("Couldn't make request"); + } +} + +static void +close_detect_cb(struct evhttp_request *req, void *arg) +{ + struct evhttp_connection *evcon = arg; + struct event_base *base = evhttp_connection_get_base(evcon); + struct timeval tv; + + if (req != NULL && evhttp_request_get_response_code(req) != HTTP_OK) { + tt_abort_msg("Failed"); + } + + evutil_timerclear(&tv); + tv.tv_sec = 0; /* longer than the http time out */ + tv.tv_usec = 600000; /* longer than the http time out */ + + /* launch a new request on the persistent connection in .3 seconds */ + event_base_once(base, -1, EV_TIMEOUT, close_detect_launch, evcon, &tv); + end: + ; +} + + +static void +http_close_detection_(struct basic_test_data *data, int with_delay) +{ + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + const struct timeval sec_tenth = { 0, 100000 }; + struct evhttp *http = http_setup(&port, data->base, 0); + + test_ok = 0; + + /* .1 second timeout */ + evhttp_set_timeout_tv(http, &sec_tenth); + + evcon = evhttp_connection_base_new(data->base, NULL, + "127.0.0.1", port); + tt_assert(evcon); + evhttp_connection_set_timeout_tv(evcon, &sec_tenth); + + + tt_assert(evcon); + delayed_client = evcon; + + /* + * At this point, we want to schedule a request to the HTTP + * server using our make request method. + */ + + req = evhttp_request_new(close_detect_cb, evcon); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, + req, EVHTTP_REQ_GET, with_delay ? "/largedelay" : "/test") == -1) { + tt_abort_msg("couldn't make request"); + } + + event_base_dispatch(data->base); + + /* at this point, the http server should have no connection */ + tt_assert(TAILQ_FIRST(&http->connections) == NULL); + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); +} +static void +http_close_detection_test(void *arg) +{ + http_close_detection_(arg, 0); +} +static void +http_close_detection_delay_test(void *arg) +{ + http_close_detection_(arg, 1); +} + +static void +http_highport_test(void *arg) +{ + struct basic_test_data *data = arg; + int i = -1; + struct evhttp *myhttp = NULL; + + /* Try a few different ports */ + for (i = 0; i < 50; ++i) { + myhttp = evhttp_new(data->base); + if (evhttp_bind_socket(myhttp, "127.0.0.1", 65535 - i) == 0) { + test_ok = 1; + evhttp_free(myhttp); + return; + } + evhttp_free(myhttp); + } + + tt_fail_msg("Couldn't get a high port"); +} + +static void +http_bad_header_test(void *ptr) +{ + struct evkeyvalq headers; + + TAILQ_INIT(&headers); + + tt_want(evhttp_add_header(&headers, "One", "Two") == 0); + tt_want(evhttp_add_header(&headers, "One", "Two\r\n Three") == 0); + tt_want(evhttp_add_header(&headers, "One\r", "Two") == -1); + tt_want(evhttp_add_header(&headers, "One\n", "Two") == -1); + tt_want(evhttp_add_header(&headers, "One", "Two\r") == -1); + tt_want(evhttp_add_header(&headers, "One", "Two\n") == -1); + + evhttp_clear_headers(&headers); +} + +static int validate_header( + const struct evkeyvalq* headers, + const char *key, const char *value) +{ + const char *real_val = evhttp_find_header(headers, key); + tt_assert(real_val != NULL); + tt_want(strcmp(real_val, value) == 0); +end: + return (0); +} + +static void +http_parse_query_test(void *ptr) +{ + struct evkeyvalq headers; + int r; + + TAILQ_INIT(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=test", &headers); + tt_want(validate_header(&headers, "q", "test") == 0); + tt_int_op(r, ==, 0); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=test&foo=bar", &headers); + tt_want(validate_header(&headers, "q", "test") == 0); + tt_want(validate_header(&headers, "foo", "bar") == 0); + tt_int_op(r, ==, 0); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=test+foo", &headers); + tt_want(validate_header(&headers, "q", "test foo") == 0); + tt_int_op(r, ==, 0); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=test%0Afoo", &headers); + tt_want(validate_header(&headers, "q", "test\nfoo") == 0); + tt_int_op(r, ==, 0); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=test%0Dfoo", &headers); + tt_want(validate_header(&headers, "q", "test\rfoo") == 0); + tt_int_op(r, ==, 0); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=test&&q2", &headers); + tt_int_op(r, ==, -1); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=test+this", &headers); + tt_want(validate_header(&headers, "q", "test this") == 0); + tt_int_op(r, ==, 0); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=test&q2=foo", &headers); + tt_int_op(r, ==, 0); + tt_want(validate_header(&headers, "q", "test") == 0); + tt_want(validate_header(&headers, "q2", "foo") == 0); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q&q2=foo", &headers); + tt_int_op(r, ==, -1); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=foo&q2", &headers); + tt_int_op(r, ==, -1); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=foo&q2&q3=x", &headers); + tt_int_op(r, ==, -1); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query("http://www.test.com/?q=&q2=&q3=", &headers); + tt_int_op(r, ==, 0); + tt_want(validate_header(&headers, "q", "") == 0); + tt_want(validate_header(&headers, "q2", "") == 0); + tt_want(validate_header(&headers, "q3", "") == 0); + evhttp_clear_headers(&headers); + +end: + evhttp_clear_headers(&headers); +} +static void +http_parse_query_str_test(void *ptr) +{ + struct evkeyvalq headers; + int r; + + TAILQ_INIT(&headers); + + r = evhttp_parse_query_str("http://www.test.com/?q=test", &headers); + tt_assert(evhttp_find_header(&headers, "q") == NULL); + tt_int_op(r, ==, 0); + evhttp_clear_headers(&headers); + + r = evhttp_parse_query_str("q=test", &headers); + tt_want(validate_header(&headers, "q", "test") == 0); + tt_int_op(r, ==, 0); + evhttp_clear_headers(&headers); + +end: + evhttp_clear_headers(&headers); +} + +static void +http_parse_uri_test(void *ptr) +{ + const int nonconform = (ptr != NULL); + const unsigned parse_flags = + nonconform ? EVHTTP_URI_NONCONFORMANT : 0; + struct evhttp_uri *uri = NULL; + char url_tmp[4096]; +#define URI_PARSE(uri) \ + evhttp_uri_parse_with_flags((uri), parse_flags) + +#define TT_URI(want) do { \ + char *ret = evhttp_uri_join(uri, url_tmp, sizeof(url_tmp)); \ + tt_want(ret != NULL); \ + tt_want(ret == url_tmp); \ + if (strcmp(ret,want) != 0) \ + TT_FAIL(("\"%s\" != \"%s\"",ret,want)); \ + } while(0) + + tt_want(evhttp_uri_join(NULL, 0, 0) == NULL); + tt_want(evhttp_uri_join(NULL, url_tmp, 0) == NULL); + tt_want(evhttp_uri_join(NULL, url_tmp, sizeof(url_tmp)) == NULL); + + /* bad URIs: parsing */ +#define BAD(s) do { \ + if (URI_PARSE(s) != NULL) \ + TT_FAIL(("Expected error parsing \"%s\"",s)); \ + } while(0) + /* Nonconformant URIs we can parse: parsing */ +#define NCF(s) do { \ + uri = URI_PARSE(s); \ + if (uri != NULL && !nonconform) { \ + TT_FAIL(("Expected error parsing \"%s\"",s)); \ + } else if (uri == NULL && nonconform) { \ + TT_FAIL(("Couldn't parse nonconformant URI \"%s\"", \ + s)); \ + } \ + if (uri) { \ + tt_want(evhttp_uri_join(uri, url_tmp, \ + sizeof(url_tmp))); \ + evhttp_uri_free(uri); \ + } \ + } while(0) + + NCF("http://www.test.com/ why hello"); + NCF("http://www.test.com/why-hello\x01"); + NCF("http://www.test.com/why-hello?\x01"); + NCF("http://www.test.com/why-hello#\x01"); + BAD("http://www.\x01.test.com/why-hello"); + BAD("http://www.%7test.com/why-hello"); + NCF("http://www.test.com/why-hell%7o"); + BAD("h%3ttp://www.test.com/why-hello"); + NCF("http://www.test.com/why-hello%7"); + NCF("http://www.test.com/why-hell%7o"); + NCF("http://www.test.com/foo?ba%r"); + NCF("http://www.test.com/foo#ba%r"); + BAD("99:99/foo"); + BAD("http://www.test.com:999x/"); + BAD("http://www.test.com:x/"); + BAD("http://[hello-there]/"); + BAD("http://[::1]]/"); + BAD("http://[::1/"); + BAD("http://[foob/"); + BAD("http://[/"); + BAD("http://[ffff:ffff:ffff:ffff:Ffff:ffff:ffff:" + "ffff:ffff:ffff:ffff:ffff:ffff:ffff]/"); + BAD("http://[vX.foo]/"); + BAD("http://[vX.foo]/"); + BAD("http://[v.foo]/"); + BAD("http://[v5.fo%o]/"); + BAD("http://[v5X]/"); + BAD("http://[v5]/"); + BAD("http://[]/"); + BAD("http://f\x01red@www.example.com/"); + BAD("http://f%0red@www.example.com/"); + BAD("http://www.example.com:9999999999999999999999999999999999999/"); + BAD("http://www.example.com:hihi/"); + BAD("://www.example.com/"); + + /* bad URIs: joining */ + uri = evhttp_uri_new(); + tt_want(0==evhttp_uri_set_host(uri, "www.example.com")); + tt_want(evhttp_uri_join(uri, url_tmp, sizeof(url_tmp)) != NULL); + /* not enough space: */ + tt_want(evhttp_uri_join(uri, url_tmp, 3) == NULL); + /* host is set, but path doesn't start with "/": */ + tt_want(0==evhttp_uri_set_path(uri, "hi_mom")); + tt_want(evhttp_uri_join(uri, url_tmp, sizeof(url_tmp)) == NULL); + tt_want(evhttp_uri_join(uri, NULL, sizeof(url_tmp))==NULL); + tt_want(evhttp_uri_join(uri, url_tmp, 0)==NULL); + evhttp_uri_free(uri); + uri = URI_PARSE("mailto:foo@bar"); + tt_want(uri != NULL); + tt_want(evhttp_uri_get_host(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(!strcmp(evhttp_uri_get_scheme(uri), "mailto")); + tt_want(!strcmp(evhttp_uri_get_path(uri), "foo@bar")); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("mailto:foo@bar"); + evhttp_uri_free(uri); + + uri = evhttp_uri_new(); + /* Bad URI usage: setting invalid values */ + tt_want(-1 == evhttp_uri_set_scheme(uri,"")); + tt_want(-1 == evhttp_uri_set_scheme(uri,"33")); + tt_want(-1 == evhttp_uri_set_scheme(uri,"hi!")); + tt_want(-1 == evhttp_uri_set_userinfo(uri,"hello@")); + tt_want(-1 == evhttp_uri_set_host(uri,"[1.2.3.4]")); + tt_want(-1 == evhttp_uri_set_host(uri,"[")); + tt_want(-1 == evhttp_uri_set_host(uri,"www.[foo].com")); + tt_want(-1 == evhttp_uri_set_port(uri,-3)); + tt_want(-1 == evhttp_uri_set_path(uri,"hello?world")); + tt_want(-1 == evhttp_uri_set_query(uri,"hello#world")); + tt_want(-1 == evhttp_uri_set_fragment(uri,"hello#world")); + /* Valid URI usage: setting valid values */ + tt_want(0 == evhttp_uri_set_scheme(uri,"http")); + tt_want(0 == evhttp_uri_set_scheme(uri,NULL)); + tt_want(0 == evhttp_uri_set_userinfo(uri,"username:pass")); + tt_want(0 == evhttp_uri_set_userinfo(uri,NULL)); + tt_want(0 == evhttp_uri_set_host(uri,"www.example.com")); + tt_want(0 == evhttp_uri_set_host(uri,"1.2.3.4")); + tt_want(0 == evhttp_uri_set_host(uri,"[1:2:3:4::]")); + tt_want(0 == evhttp_uri_set_host(uri,"[v7.wobblewobble]")); + tt_want(0 == evhttp_uri_set_host(uri,NULL)); + tt_want(0 == evhttp_uri_set_host(uri,"")); + tt_want(0 == evhttp_uri_set_port(uri, -1)); + tt_want(0 == evhttp_uri_set_port(uri, 80)); + tt_want(0 == evhttp_uri_set_port(uri, 65535)); + tt_want(0 == evhttp_uri_set_path(uri, "")); + tt_want(0 == evhttp_uri_set_path(uri, "/documents/public/index.html")); + tt_want(0 == evhttp_uri_set_path(uri, NULL)); + tt_want(0 == evhttp_uri_set_query(uri, "key=val&key2=val2")); + tt_want(0 == evhttp_uri_set_query(uri, "keyvalblarg")); + tt_want(0 == evhttp_uri_set_query(uri, "")); + tt_want(0 == evhttp_uri_set_query(uri, NULL)); + tt_want(0 == evhttp_uri_set_fragment(uri, "")); + tt_want(0 == evhttp_uri_set_fragment(uri, "here?i?am")); + tt_want(0 == evhttp_uri_set_fragment(uri, NULL)); + evhttp_uri_free(uri); + + /* Valid parsing */ + uri = URI_PARSE("http://www.test.com/?q=t%33est"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(strcmp(evhttp_uri_get_query(uri), "q=t%33est") == 0); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("http://www.test.com/?q=t%33est"); + evhttp_uri_free(uri); + + uri = URI_PARSE("http://%77ww.test.com"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "%77ww.test.com") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("http://%77ww.test.com"); + evhttp_uri_free(uri); + + uri = URI_PARSE("http://www.test.com?q=test"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0); + tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("http://www.test.com?q=test"); + evhttp_uri_free(uri); + + uri = URI_PARSE("http://www.test.com#fragment"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want_str_op(evhttp_uri_get_fragment(uri), ==, "fragment"); + TT_URI("http://www.test.com#fragment"); + evhttp_uri_free(uri); + + uri = URI_PARSE("http://8000/"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "8000") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("http://8000/"); + evhttp_uri_free(uri); + + uri = URI_PARSE("http://:8000/"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == 8000); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("http://:8000/"); + evhttp_uri_free(uri); + + uri = URI_PARSE("http://www.test.com:/"); /* empty port */ + tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0); + tt_want_str_op(evhttp_uri_get_path(uri), ==, "/"); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("http://www.test.com/"); + evhttp_uri_free(uri); + + uri = URI_PARSE("http://www.test.com:"); /* empty port 2 */ + tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("http://www.test.com"); + evhttp_uri_free(uri); + + uri = URI_PARSE("ftp://www.test.com/?q=test"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "ftp") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("ftp://www.test.com/?q=test"); + evhttp_uri_free(uri); + + uri = URI_PARSE("ftp://[::1]:999/?q=test"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "ftp") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "[::1]") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == 999); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("ftp://[::1]:999/?q=test"); + evhttp_uri_free(uri); + + uri = URI_PARSE("ftp://[ff00::127.0.0.1]/?q=test"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "ftp") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "[ff00::127.0.0.1]") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("ftp://[ff00::127.0.0.1]/?q=test"); + evhttp_uri_free(uri); + + uri = URI_PARSE("ftp://[v99.not_(any:time)_soon]/?q=test"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "ftp") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "[v99.not_(any:time)_soon]") == 0); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("ftp://[v99.not_(any:time)_soon]/?q=test"); + evhttp_uri_free(uri); + + uri = URI_PARSE("scheme://user:pass@foo.com:42/?q=test&s=some+thing#fragment"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "scheme") == 0); + tt_want(strcmp(evhttp_uri_get_userinfo(uri), "user:pass") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "foo.com") == 0); + tt_want(evhttp_uri_get_port(uri) == 42); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(strcmp(evhttp_uri_get_query(uri), "q=test&s=some+thing") == 0); + tt_want(strcmp(evhttp_uri_get_fragment(uri), "fragment") == 0); + TT_URI("scheme://user:pass@foo.com:42/?q=test&s=some+thing#fragment"); + evhttp_uri_free(uri); + + uri = URI_PARSE("scheme://user@foo.com/#fragment"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "scheme") == 0); + tt_want(strcmp(evhttp_uri_get_userinfo(uri), "user") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "foo.com") == 0); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(strcmp(evhttp_uri_get_fragment(uri), "fragment") == 0); + TT_URI("scheme://user@foo.com/#fragment"); + evhttp_uri_free(uri); + + uri = URI_PARSE("scheme://%75ser@foo.com/#frag@ment"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "scheme") == 0); + tt_want(strcmp(evhttp_uri_get_userinfo(uri), "%75ser") == 0); + tt_want(strcmp(evhttp_uri_get_host(uri), "foo.com") == 0); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(strcmp(evhttp_uri_get_fragment(uri), "frag@ment") == 0); + TT_URI("scheme://%75ser@foo.com/#frag@ment"); + evhttp_uri_free(uri); + + uri = URI_PARSE("file:///some/path/to/the/file"); + tt_want(strcmp(evhttp_uri_get_scheme(uri), "file") == 0); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(strcmp(evhttp_uri_get_host(uri), "") == 0); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(strcmp(evhttp_uri_get_path(uri), "/some/path/to/the/file") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("file:///some/path/to/the/file"); + evhttp_uri_free(uri); + + uri = URI_PARSE("///some/path/to/the-file"); + tt_want(uri != NULL); + tt_want(evhttp_uri_get_scheme(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(strcmp(evhttp_uri_get_host(uri), "") == 0); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(strcmp(evhttp_uri_get_path(uri), "/some/path/to/the-file") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("///some/path/to/the-file"); + evhttp_uri_free(uri); + + uri = URI_PARSE("/s:ome/path/to/the-file?q=99#fred"); + tt_want(uri != NULL); + tt_want(evhttp_uri_get_scheme(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_host(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(strcmp(evhttp_uri_get_path(uri), "/s:ome/path/to/the-file") == 0); + tt_want(strcmp(evhttp_uri_get_query(uri), "q=99") == 0); + tt_want(strcmp(evhttp_uri_get_fragment(uri), "fred") == 0); + TT_URI("/s:ome/path/to/the-file?q=99#fred"); + evhttp_uri_free(uri); + + uri = URI_PARSE("relative/path/with/co:lon"); + tt_want(uri != NULL); + tt_want(evhttp_uri_get_scheme(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_host(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(strcmp(evhttp_uri_get_path(uri), "relative/path/with/co:lon") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(evhttp_uri_get_fragment(uri) == NULL); + TT_URI("relative/path/with/co:lon"); + evhttp_uri_free(uri); + + uri = URI_PARSE("bob?q=99&q2=q?33#fr?ed"); + tt_want(uri != NULL); + tt_want(evhttp_uri_get_scheme(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_host(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(strcmp(evhttp_uri_get_path(uri), "bob") == 0); + tt_want(strcmp(evhttp_uri_get_query(uri), "q=99&q2=q?33") == 0); + tt_want(strcmp(evhttp_uri_get_fragment(uri), "fr?ed") == 0); + TT_URI("bob?q=99&q2=q?33#fr?ed"); + evhttp_uri_free(uri); + + uri = URI_PARSE("#fr?ed"); + tt_want(uri != NULL); + tt_want(evhttp_uri_get_scheme(uri) == NULL); + tt_want(evhttp_uri_get_userinfo(uri) == NULL); + tt_want(evhttp_uri_get_host(uri) == NULL); + tt_want(evhttp_uri_get_port(uri) == -1); + tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0); + tt_want(evhttp_uri_get_query(uri) == NULL); + tt_want(strcmp(evhttp_uri_get_fragment(uri), "fr?ed") == 0); + TT_URI("#fr?ed"); + evhttp_uri_free(uri); +#undef URI_PARSE +#undef TT_URI +#undef BAD +} + +static void +http_uriencode_test(void *ptr) +{ + char *s=NULL, *s2=NULL; + size_t sz; + int bytes_decoded; + +#define ENC(from,want,plus) do { \ + s = evhttp_uriencode((from), -1, (plus)); \ + tt_assert(s); \ + tt_str_op(s,==,(want)); \ + sz = -1; \ + s2 = evhttp_uridecode((s), (plus), &sz); \ + tt_assert(s2); \ + tt_str_op(s2,==,(from)); \ + tt_int_op(sz,==,strlen(from)); \ + free(s); \ + free(s2); \ + s = s2 = NULL; \ + } while (0) + +#define DEC(from,want,dp) do { \ + s = evhttp_uridecode((from),(dp),&sz); \ + tt_assert(s); \ + tt_str_op(s,==,(want)); \ + tt_int_op(sz,==,strlen(want)); \ + free(s); \ + s = NULL; \ + } while (0) + +#define OLD_DEC(from,want) do { \ + s = evhttp_decode_uri((from)); \ + tt_assert(s); \ + tt_str_op(s,==,(want)); \ + free(s); \ + s = NULL; \ + } while (0) + + + ENC("Hello", "Hello",0); + ENC("99", "99",0); + ENC("", "",0); + ENC( + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789-.~_", + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789-.~_",0); + ENC(" ", "%20",0); + ENC(" ", "+",1); + ENC("\xff\xf0\xe0", "%FF%F0%E0",0); + ENC("\x01\x19", "%01%19",1); + ENC("http://www.ietf.org/rfc/rfc3986.txt", + "http%3A%2F%2Fwww.ietf.org%2Frfc%2Frfc3986.txt",1); + + ENC("1+2=3", "1%2B2%3D3",1); + ENC("1+2=3", "1%2B2%3D3",0); + + /* Now try encoding with internal NULs. */ + s = evhttp_uriencode("hello\0world", 11, 0); + tt_assert(s); + tt_str_op(s,==,"hello%00world"); + free(s); + s = NULL; + + /* Now try decoding just part of string. */ + s = malloc(6 + 1 /* NUL byte */); + bytes_decoded = evhttp_decode_uri_internal("hello%20%20", 6, s, 0); + tt_assert(s); + tt_int_op(bytes_decoded,==,6); + tt_str_op(s,==,"hello%"); + free(s); + s = NULL; + + /* Now try out some decoding cases that we don't generate with + * encode_uri: Make sure that malformed stuff doesn't crash... */ + DEC("%%xhello th+ere \xff", + "%%xhello th+ere \xff", 0); + /* Make sure plus decoding works */ + DEC("plus+should%20work+", "plus should work ",1); + /* Try some lowercase hex */ + DEC("%f0%a0%b0", "\xf0\xa0\xb0",1); + + /* Try an internal NUL. */ + sz = 0; + s = evhttp_uridecode("%00%00x%00%00", 1, &sz); + tt_int_op(sz,==,5); + tt_assert(!memcmp(s, "\0\0x\0\0", 5)); + free(s); + s = NULL; + + /* Try with size == NULL */ + sz = 0; + s = evhttp_uridecode("%00%00x%00%00", 1, NULL); + tt_assert(!memcmp(s, "\0\0x\0\0", 5)); + free(s); + s = NULL; + + /* Test out the crazy old behavior of the deprecated + * evhttp_decode_uri */ + OLD_DEC("http://example.com/normal+path/?key=val+with+spaces", + "http://example.com/normal+path/?key=val with spaces"); + +end: + if (s) + free(s); + if (s2) + free(s2); +#undef ENC +#undef DEC +#undef OLD_DEC +} + +static void +http_base_test(void *ptr) +{ + struct event_base *base = NULL; + struct bufferevent *bev; + evutil_socket_t fd; + const char *http_request; + ev_uint16_t port = 0; + struct evhttp *http; + + test_ok = 0; + base = event_base_new(); + tt_assert(base); + http = http_setup(&port, base, 0); + + fd = http_connect("127.0.0.1", port); + tt_assert(fd != EVUTIL_INVALID_SOCKET); + + /* Stupid thing to send a request */ + bev = bufferevent_socket_new(base, fd, 0); + bufferevent_setcb(bev, http_readcb, http_writecb, + http_errorcb, base); + bufferevent_base_set(base, bev); + + http_request = + "GET /test HTTP/1.1\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n"; + + bufferevent_write(bev, http_request, strlen(http_request)); + + event_base_dispatch(base); + + bufferevent_free(bev); + evutil_closesocket(fd); + + evhttp_free(http); + + tt_int_op(test_ok, ==, 2); + +end: + if (base) + event_base_free(base); +} + +/* + * the server is just going to close the connection if it times out during + * reading the headers. + */ + +static void +http_incomplete_readcb(struct bufferevent *bev, void *arg) +{ + test_ok = -1; + event_base_loopexit(exit_base,NULL); +} + +static void +http_incomplete_errorcb(struct bufferevent *bev, short what, void *arg) +{ + /** For ssl */ + if (what & BEV_EVENT_CONNECTED) + return; + + if (what == (BEV_EVENT_READING|BEV_EVENT_EOF)) + test_ok++; + else + test_ok = -2; + event_base_loopexit(exit_base,NULL); +} + +static void +http_incomplete_writecb(struct bufferevent *bev, void *arg) +{ + if (arg != NULL) { + evutil_socket_t fd = *(evutil_socket_t *)arg; + /* terminate the write side to simulate EOF */ + shutdown(fd, EVUTIL_SHUT_WR); + } + if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) { + /* enable reading of the reply */ + bufferevent_enable(bev, EV_READ); + test_ok++; + } +} + +static void +http_incomplete_test_(struct basic_test_data *data, int use_timeout, int ssl) +{ + struct bufferevent *bev; + evutil_socket_t fd; + const char *http_request; + ev_uint16_t port = 0; + struct timeval tv_start, tv_end; + struct evhttp *http = http_setup(&port, data->base, ssl ? HTTP_BIND_SSL : 0); + + exit_base = data->base; + test_ok = 0; + + evhttp_set_timeout(http, 1); + + fd = http_connect("127.0.0.1", port); + tt_assert(fd != EVUTIL_INVALID_SOCKET); + + /* Stupid thing to send a request */ + bev = create_bev(data->base, fd, ssl); + bufferevent_setcb(bev, + http_incomplete_readcb, http_incomplete_writecb, + http_incomplete_errorcb, use_timeout ? NULL : &fd); + + http_request = + "GET /test HTTP/1.1\r\n" + "Host: somehost\r\n"; + + bufferevent_write(bev, http_request, strlen(http_request)); + + evutil_gettimeofday(&tv_start, NULL); + + event_base_dispatch(data->base); + + evutil_gettimeofday(&tv_end, NULL); + evutil_timersub(&tv_end, &tv_start, &tv_end); + + bufferevent_free(bev); + if (use_timeout) { + evutil_closesocket(fd); + fd = EVUTIL_INVALID_SOCKET; + } + + evhttp_free(http); + + if (use_timeout && tv_end.tv_sec >= 3) { + tt_abort_msg("time"); + } else if (!use_timeout && tv_end.tv_sec >= 1) { + /* we should be done immediately */ + tt_abort_msg("time"); + } + + tt_int_op(test_ok, ==, 2); + end: + if (fd >= 0) + evutil_closesocket(fd); +} +static void http_incomplete_test(void *arg) +{ http_incomplete_test_(arg, 0, 0); } +static void http_incomplete_timeout_test(void *arg) +{ http_incomplete_test_(arg, 1, 0); } + + +/* + * the server is going to reply with chunked data. + */ + +static void +http_chunked_readcb(struct bufferevent *bev, void *arg) +{ + /* nothing here */ +} + +static void +http_chunked_errorcb(struct bufferevent *bev, short what, void *arg) +{ + struct evhttp_request *req = NULL; + + /** SSL */ + if (what & BEV_EVENT_CONNECTED) + return; + + if (!test_ok) + goto out; + + test_ok = -1; + + if ((what & BEV_EVENT_EOF) != 0) { + const char *header; + enum message_read_status done; + req = evhttp_request_new(NULL, NULL); + + /* req->kind = EVHTTP_RESPONSE; */ + done = evhttp_parse_firstline_(req, bufferevent_get_input(bev)); + if (done != ALL_DATA_READ) + goto out; + + done = evhttp_parse_headers_(req, bufferevent_get_input(bev)); + if (done != ALL_DATA_READ) + goto out; + + header = evhttp_find_header(evhttp_request_get_input_headers(req), "Transfer-Encoding"); + if (header == NULL || strcmp(header, "chunked")) + goto out; + + header = evhttp_find_header(evhttp_request_get_input_headers(req), "Connection"); + if (header == NULL || strcmp(header, "close")) + goto out; + + header = evbuffer_readln(bufferevent_get_input(bev), NULL, EVBUFFER_EOL_CRLF); + if (header == NULL) + goto out; + /* 13 chars */ + if (strcmp(header, "d")) { + free((void*)header); + goto out; + } + free((void*)header); + + if (strncmp((char *)evbuffer_pullup(bufferevent_get_input(bev), 13), + "This is funny", 13)) + goto out; + + evbuffer_drain(bufferevent_get_input(bev), 13 + 2); + + header = evbuffer_readln(bufferevent_get_input(bev), NULL, EVBUFFER_EOL_CRLF); + if (header == NULL) + goto out; + /* 18 chars */ + if (strcmp(header, "12")) + goto out; + free((char *)header); + + if (strncmp((char *)evbuffer_pullup(bufferevent_get_input(bev), 18), + "but not hilarious.", 18)) + goto out; + + evbuffer_drain(bufferevent_get_input(bev), 18 + 2); + + header = evbuffer_readln(bufferevent_get_input(bev), NULL, EVBUFFER_EOL_CRLF); + if (header == NULL) + goto out; + /* 8 chars */ + if (strcmp(header, "8")) { + free((void*)header); + goto out; + } + free((char *)header); + + if (strncmp((char *)evbuffer_pullup(bufferevent_get_input(bev), 8), + "bwv 1052.", 8)) + goto out; + + evbuffer_drain(bufferevent_get_input(bev), 8 + 2); + + header = evbuffer_readln(bufferevent_get_input(bev), NULL, EVBUFFER_EOL_CRLF); + if (header == NULL) + goto out; + /* 0 chars */ + if (strcmp(header, "0")) { + free((void*)header); + goto out; + } + free((void *)header); + + test_ok = 2; + } + +out: + if (req) + evhttp_request_free(req); + + event_base_loopexit(arg, NULL); +} + +static void +http_chunked_writecb(struct bufferevent *bev, void *arg) +{ + if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) { + /* enable reading of the reply */ + bufferevent_enable(bev, EV_READ); + test_ok++; + } +} + +static void +http_chunked_request_done(struct evhttp_request *req, void *arg) +{ + if (evhttp_request_get_response_code(req) != HTTP_OK) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evhttp_find_header(evhttp_request_get_input_headers(req), + "Transfer-Encoding") == NULL) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != 13 + 18 + 8) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + if (strncmp((char *)evbuffer_pullup(evhttp_request_get_input_buffer(req), 13 + 18 + 8), + "This is funnybut not hilarious.bwv 1052", + 13 + 18 + 8)) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + test_ok = 1; + event_base_loopexit(arg, NULL); +} + +static void +http_chunk_out_test_impl(void *arg, int ssl) +{ + struct basic_test_data *data = arg; + struct bufferevent *bev; + evutil_socket_t fd; + const char *http_request; + ev_uint16_t port = 0; + struct timeval tv_start, tv_end; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + int i; + struct evhttp *http = http_setup(&port, data->base, ssl ? HTTP_BIND_SSL : 0); + + exit_base = data->base; + test_ok = 0; + + fd = http_connect("127.0.0.1", port); + tt_assert(fd != EVUTIL_INVALID_SOCKET); + + /* Stupid thing to send a request */ + bev = create_bev(data->base, fd, ssl); + bufferevent_setcb(bev, + http_chunked_readcb, http_chunked_writecb, + http_chunked_errorcb, data->base); + + http_request = + "GET /chunked HTTP/1.1\r\n" + "Host: somehost\r\n" + "Connection: close\r\n" + "\r\n"; + + bufferevent_write(bev, http_request, strlen(http_request)); + + evutil_gettimeofday(&tv_start, NULL); + + event_base_dispatch(data->base); + + bufferevent_free(bev); + + evutil_gettimeofday(&tv_end, NULL); + evutil_timersub(&tv_end, &tv_start, &tv_end); + + tt_int_op(tv_end.tv_sec, <, 1); + + tt_int_op(test_ok, ==, 2); + + /* now try again with the regular connection object */ + bev = create_bev(data->base, -1, ssl); + evcon = evhttp_connection_base_bufferevent_new( + data->base, NULL, bev, "127.0.0.1", port); + tt_assert(evcon); + + /* make two requests to check the keepalive behavior */ + for (i = 0; i < 2; i++) { + test_ok = 0; + req = evhttp_request_new(http_chunked_request_done,data->base); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, + EVHTTP_REQ_GET, "/chunked") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + tt_assert(test_ok == 1); + } + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); +} +static void http_chunk_out_test(void *arg) +{ http_chunk_out_test_impl(arg, 0); } + +static void +http_stream_out_test_impl(void *arg, int ssl) +{ + struct basic_test_data *data = arg; + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct bufferevent *bev; + struct evhttp *http = http_setup(&port, data->base, ssl ? HTTP_BIND_SSL : 0); + + test_ok = 0; + exit_base = data->base; + + bev = create_bev(data->base, -1, ssl); + evcon = evhttp_connection_base_bufferevent_new( + data->base, NULL, bev, "127.0.0.1", port); + tt_assert(evcon); + + /* + * At this point, we want to schedule a request to the HTTP + * server using our make request method. + */ + + req = evhttp_request_new(http_request_done, + (void *)"This is funnybut not hilarious.bwv 1052"); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/streamed") + == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); +} +static void http_stream_out_test(void *arg) +{ http_stream_out_test_impl(arg, 0); } + +static void +http_stream_in_chunk(struct evhttp_request *req, void *arg) +{ + struct evbuffer *reply = arg; + + if (evhttp_request_get_response_code(req) != HTTP_OK) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + evbuffer_add_buffer(reply, evhttp_request_get_input_buffer(req)); +} + +static void +http_stream_in_done(struct evhttp_request *req, void *arg) +{ + if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != 0) { + fprintf(stderr, "FAILED\n"); + exit(1); + } + + event_base_loopexit(exit_base, NULL); +} + +/** + * Makes a request and reads the response in chunks. + */ +static void +http_stream_in_test_(struct basic_test_data *data, char const *url, + size_t expected_len, char const *expected) +{ + struct evhttp_connection *evcon; + struct evbuffer *reply = evbuffer_new(); + struct evhttp_request *req = NULL; + ev_uint16_t port = 0; + struct evhttp *http = http_setup(&port, data->base, 0); + + exit_base = data->base; + + evcon = evhttp_connection_base_new(data->base, NULL,"127.0.0.1", port); + tt_assert(evcon); + + req = evhttp_request_new(http_stream_in_done, reply); + evhttp_request_set_chunked_cb(req, http_stream_in_chunk); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, url) == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + if (evbuffer_get_length(reply) != expected_len) { + TT_DIE(("reply length %lu; expected %lu; FAILED (%s)\n", + (unsigned long)evbuffer_get_length(reply), + (unsigned long)expected_len, + (char*)evbuffer_pullup(reply, -1))); + } + + if (memcmp(evbuffer_pullup(reply, -1), expected, expected_len) != 0) { + tt_abort_msg("Memory mismatch"); + } + + test_ok = 1; + end: + if (reply) + evbuffer_free(reply); + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); +} + +static void +http_stream_in_test(void *arg) +{ + http_stream_in_test_(arg, "/chunked", 13 + 18 + 8, + "This is funnybut not hilarious.bwv 1052"); + + http_stream_in_test_(arg, "/test", strlen(BASIC_REQUEST_BODY), + BASIC_REQUEST_BODY); +} + +static void +http_stream_in_cancel_chunk(struct evhttp_request *req, void *arg) +{ + tt_int_op(evhttp_request_get_response_code(req), ==, HTTP_OK); + + end: + evhttp_cancel_request(req); + event_base_loopexit(arg, NULL); +} + +static void +http_stream_in_cancel_done(struct evhttp_request *req, void *arg) +{ + /* should never be called */ + tt_fail_msg("In cancel done"); +} + +static void +http_stream_in_cancel_test(void *arg) +{ + struct basic_test_data *data = arg; + struct evhttp_connection *evcon; + struct evhttp_request *req = NULL; + ev_uint16_t port = 0; + struct evhttp *http = http_setup(&port, data->base, 0); + + evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port); + tt_assert(evcon); + + req = evhttp_request_new(http_stream_in_cancel_done, data->base); + evhttp_request_set_chunked_cb(req, http_stream_in_cancel_chunk); + + /* We give ownership of the request to the connection */ + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/chunked") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + test_ok = 1; + end: + evhttp_connection_free(evcon); + evhttp_free(http); + +} + +static void +http_connection_fail_done(struct evhttp_request *req, void *arg) +{ + struct evhttp_connection *evcon = arg; + struct event_base *base = evhttp_connection_get_base(evcon); + + /* An ENETUNREACH error results in an unrecoverable + * evhttp_connection error (see evhttp_connection_fail_()). The + * connection will be reset, and the user will be notified with a NULL + * req parameter. */ + tt_assert(!req); + + evhttp_connection_free(evcon); + + test_ok = 1; + + end: + event_base_loopexit(base, NULL); +} + +/* Test unrecoverable evhttp_connection errors by generating an ENETUNREACH + * error on connection. */ +static void +http_connection_fail_test_impl(void *arg, int ssl) +{ + struct basic_test_data *data = arg; + ev_uint16_t port = 0; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct bufferevent *bev; + struct evhttp *http = http_setup(&port, data->base, ssl ? HTTP_BIND_SSL : 0); + + exit_base = data->base; + test_ok = 0; + + /* auto detect a port */ + evhttp_free(http); + + bev = create_bev(data->base, -1, ssl); + /* Pick an unroutable address. This administratively scoped multicast + * address should do when working with TCP. */ + evcon = evhttp_connection_base_bufferevent_new( + data->base, NULL, bev, "239.10.20.30", 80); + tt_assert(evcon); + + /* + * At this point, we want to schedule an HTTP GET request + * server using our make request method. + */ + + req = evhttp_request_new(http_connection_fail_done, evcon); + tt_assert(req); + + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/") == -1) { + tt_abort_msg("Couldn't make request"); + } + + event_base_dispatch(data->base); + + tt_int_op(test_ok, ==, 1); + + end: + ; +} +static void http_connection_fail_test(void *arg) +{ http_connection_fail_test_impl(arg, 0); } + +static void +http_connection_retry_done(struct evhttp_request *req, void *arg) +{ + tt_assert(req); + tt_int_op(evhttp_request_get_response_code(req), !=, HTTP_OK); + if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") != NULL) { + tt_abort_msg("(content type)\n"); + } + + tt_uint_op(evbuffer_get_length(evhttp_request_get_input_buffer(req)), ==, 0); + + test_ok = 1; + end: + event_base_loopexit(arg,NULL); +} + +struct http_server +{ + ev_uint16_t port; + int ssl; + struct evhttp *http; +}; +static struct event_base *http_make_web_server_base=NULL; +static void +http_make_web_server(evutil_socket_t fd, short what, void *arg) +{ + struct http_server *hs = (struct http_server *)arg; + hs->http = http_setup(&hs->port, http_make_web_server_base, hs->ssl ? HTTP_BIND_SSL : 0); +} + +static void +http_simple_test_impl(void *arg, int ssl, int dirty, const char *uri) +{ + struct basic_test_data *data = arg; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct bufferevent *bev; + struct http_server hs = { 0, ssl, NULL, }; + struct evhttp *http = http_setup(&hs.port, data->base, ssl ? HTTP_BIND_SSL : 0); + + exit_base = data->base; + test_ok = 0; + + bev = create_bev(data->base, -1, ssl); +#ifdef EVENT__HAVE_OPENSSL + bufferevent_openssl_set_allow_dirty_shutdown(bev, dirty); +#endif + + evcon = evhttp_connection_base_bufferevent_new( + data->base, NULL, bev, "127.0.0.1", hs.port); + tt_assert(evcon); + evhttp_connection_set_local_address(evcon, "127.0.0.1"); + + req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY); + tt_assert(req); + + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, uri) == -1) + tt_abort_msg("Couldn't make request"); + + event_base_dispatch(data->base); + tt_int_op(test_ok, ==, 1); + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(http); +} +static void http_simple_test(void *arg) +{ http_simple_test_impl(arg, 0, 0, "/test"); } +static void http_simple_nonconformant_test(void *arg) +{ http_simple_test_impl(arg, 0, 0, "/test nonconformant"); } + +static void +http_connection_retry_test_basic(void *arg, const char *addr, struct evdns_base *dns_base, int ssl) +{ + struct basic_test_data *data = arg; + struct evhttp_connection *evcon = NULL; + struct evhttp_request *req = NULL; + struct timeval tv, tv_start, tv_end; + struct bufferevent *bev; + struct http_server hs = { 0, ssl, NULL, }; + struct evhttp *http = http_setup(&hs.port, data->base, ssl ? HTTP_BIND_SSL : 0); + + exit_base = data->base; + test_ok = 0; + + /* auto detect a port */ + evhttp_free(http); + + bev = create_bev(data->base, -1, ssl); + evcon = evhttp_connection_base_bufferevent_new(data->base, dns_base, bev, addr, hs.port); + tt_assert(evcon); + if (dns_base) + tt_assert(!evhttp_connection_set_flags(evcon, EVHTTP_CON_REUSE_CONNECTED_ADDR)); + + evhttp_connection_set_timeout(evcon, 1); + /* also bind to local host */ + evhttp_connection_set_local_address(evcon, "127.0.0.1"); + + /* + * At this point, we want to schedule an HTTP GET request + * server using our make request method. + */ + + req = evhttp_request_new(http_connection_retry_done, data->base); + tt_assert(req); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, + "/?arg=val") == -1) { + tt_abort_msg("Couldn't make request"); + } + + evutil_gettimeofday(&tv_start, NULL); + event_base_dispatch(data->base); + evutil_gettimeofday(&tv_end, NULL); + evutil_timersub(&tv_end, &tv_start, &tv_end); + tt_int_op(tv_end.tv_sec, <, 1); + + tt_int_op(test_ok, ==, 1); + + /* + * now test the same but with retries + */ + test_ok = 0; + /** Shutdown dns server, to test conn_address reusing */ + if (dns_base) + regress_clean_dnsserver(); + + { + const struct timeval tv_timeout = { 0, 500000 }; + const struct timeval tv_retry = { 0, 500000 }; + evhttp_connection_set_timeout_tv(evcon, &tv_timeout); + evhttp_connection_set_initial_retry_tv(evcon, &tv_retry); + } + evhttp_connection_set_retries(evcon, 1); + + req = evhttp_request_new(http_connection_retry_done, data->base); + tt_assert(req); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, + "/?arg=val") == -1) { + tt_abort_msg("Couldn't make request"); + } + + evutil_gettimeofday(&tv_start, NULL); + event_base_dispatch(data->base); + evutil_gettimeofday(&tv_end, NULL); + + /* fails fast, .5 sec to wait to retry, fails fast again. */ + test_timeval_diff_leq(&tv_start, &tv_end, 500, 200); + + tt_assert(test_ok == 1); + + /* + * now test the same but with retries and give it a web server + * at the end + */ + test_ok = 0; + + evhttp_connection_set_timeout(evcon, 1); + evhttp_connection_set_retries(evcon, 3); + + req = evhttp_request_new(http_dispatcher_test_done, data->base); + tt_assert(req); + + /* Add the information that we care about */ + evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost"); + + if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, + "/?arg=val") == -1) { + tt_abort_msg("Couldn't make request"); + } + + /* start up a web server .2 seconds after the connection tried + * to send a request + */ + evutil_timerclear(&tv); + tv.tv_usec = 200000; + http_make_web_server_base = data->base; + event_base_once(data->base, -1, EV_TIMEOUT, http_make_web_server, &hs, &tv); + + evutil_gettimeofday(&tv_start, NULL); + event_base_dispatch(data->base); + evutil_gettimeofday(&tv_end, NULL); + /* We'll wait twice as long as we did last time. */ + test_timeval_diff_leq(&tv_start, &tv_end, 1000, 400); + + tt_int_op(test_ok, ==, 1); + + end: + if (evcon) + evhttp_connection_free(evcon); + if (http) + evhttp_free(hs.http); +} + +static void +http_connection_retry_conn_address_test_impl(void *arg, int ssl) +{ + struct basic_test_data *data = arg; + ev_uint16_t portnum = 0; + struct evdns_base *dns_base = NULL; + char address[64]; + + tt_assert(regress_dnsserver(data->base, &portnum, search_table)); + dns_base = evdns_base_new(data->base, 0/* init name servers */); + tt_assert(dns_base); + + /* Add ourself as the only nameserver, and make sure we really are + * the only nameserver. */ + evutil_snprintf(address, sizeof(address), "127.0.0.1:%d", portnum); + evdns_base_nameserver_ip_add(dns_base, address); + + http_connection_retry_test_basic(arg, "localhost", dns_base, ssl); + + end: + if (dns_base) + evdns_base_free(dns_base, 0); + /** dnsserver will be cleaned in http_connection_retry_test_basic() */ +} +static void http_connection_retry_conn_address_test(void *arg) +{ http_connection_retry_conn_address_test_impl(arg, 0); } + +static void +http_connection_retry_test_impl(void *arg, int ssl) +{ + http_connection_retry_test_basic(arg, "127.0.0.1", NULL, ssl); +} +static void +http_connection_retry_test(void *arg) +{ http_connection_retry_test_impl(arg, 0); } + +static void +http_primitives(void *ptr) +{ + char *escaped = NULL; + struct evhttp *http = NULL; + + escaped = evhttp_htmlescape("